mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Merge remote-tracking branch 'origin/master' into a1111-meta-v2
This commit is contained in:
commit
ba4a754a53
65
.ci/nightly/update_windows/update.py
Executable file
65
.ci/nightly/update_windows/update.py
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
import pygit2
|
||||||
|
from datetime import datetime
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def pull(repo, remote_name='origin', branch='master'):
|
||||||
|
for remote in repo.remotes:
|
||||||
|
if remote.name == remote_name:
|
||||||
|
remote.fetch()
|
||||||
|
remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
|
||||||
|
merge_result, _ = repo.merge_analysis(remote_master_id)
|
||||||
|
# Up to date, do nothing
|
||||||
|
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
|
||||||
|
return
|
||||||
|
# We can just fastforward
|
||||||
|
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
|
||||||
|
repo.checkout_tree(repo.get(remote_master_id))
|
||||||
|
try:
|
||||||
|
master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
|
||||||
|
master_ref.set_target(remote_master_id)
|
||||||
|
except KeyError:
|
||||||
|
repo.create_branch(branch, repo.get(remote_master_id))
|
||||||
|
repo.head.set_target(remote_master_id)
|
||||||
|
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
|
||||||
|
repo.merge(remote_master_id)
|
||||||
|
|
||||||
|
if repo.index.conflicts is not None:
|
||||||
|
for conflict in repo.index.conflicts:
|
||||||
|
print('Conflicts found in:', conflict[0].path)
|
||||||
|
raise AssertionError('Conflicts, ahhhhh!!')
|
||||||
|
|
||||||
|
user = repo.default_signature
|
||||||
|
tree = repo.index.write_tree()
|
||||||
|
commit = repo.create_commit('HEAD',
|
||||||
|
user,
|
||||||
|
user,
|
||||||
|
'Merge!',
|
||||||
|
tree,
|
||||||
|
[repo.head.target, remote_master_id])
|
||||||
|
# We need to do this or git CLI will think we are still merging.
|
||||||
|
repo.state_cleanup()
|
||||||
|
else:
|
||||||
|
raise AssertionError('Unknown merge analysis result')
|
||||||
|
|
||||||
|
|
||||||
|
repo = pygit2.Repository(str(sys.argv[1]))
|
||||||
|
ident = pygit2.Signature('comfyui', 'comfy@ui')
|
||||||
|
try:
|
||||||
|
print("stashing current changes")
|
||||||
|
repo.stash(ident)
|
||||||
|
except KeyError:
|
||||||
|
print("nothing to stash")
|
||||||
|
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
|
||||||
|
print("creating backup branch: {}".format(backup_branch_name))
|
||||||
|
repo.branches.local.create(backup_branch_name, repo.head.peel())
|
||||||
|
|
||||||
|
print("checking out master branch")
|
||||||
|
branch = repo.lookup_branch('master')
|
||||||
|
ref = repo.lookup_reference(branch.name)
|
||||||
|
repo.checkout(ref)
|
||||||
|
|
||||||
|
print("pulling latest changes")
|
||||||
|
pull(repo)
|
||||||
|
|
||||||
|
print("Done!")
|
||||||
|
|
2
.ci/nightly/update_windows/update_comfyui.bat
Executable file
2
.ci/nightly/update_windows/update_comfyui.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
|
pause
|
3
.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat
Executable file
3
.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
|
..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2
|
||||||
|
pause
|
27
.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt
Executable file
27
.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
HOW TO RUN:
|
||||||
|
|
||||||
|
if you have a NVIDIA gpu:
|
||||||
|
|
||||||
|
run_nvidia_gpu.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
To run it in slow CPU mode:
|
||||||
|
|
||||||
|
run_cpu.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
|
||||||
|
|
||||||
|
You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RECOMMENDED WAY TO UPDATE:
|
||||||
|
To update the ComfyUI code: update\update_comfyui.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
To update ComfyUI with the python dependencies:
|
||||||
|
update\update_comfyui_and_python_dependencies.bat
|
2
.ci/nightly/windows_base_files/run_cpu.bat
Executable file
2
.ci/nightly/windows_base_files/run_cpu.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
|
||||||
|
pause
|
2
.ci/nightly/windows_base_files/run_nvidia_gpu.bat
Executable file
2
.ci/nightly/windows_base_files/run_nvidia_gpu.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention
|
||||||
|
pause
|
65
.ci/update_windows/update.py
Executable file
65
.ci/update_windows/update.py
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
import pygit2
|
||||||
|
from datetime import datetime
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def pull(repo, remote_name='origin', branch='master'):
|
||||||
|
for remote in repo.remotes:
|
||||||
|
if remote.name == remote_name:
|
||||||
|
remote.fetch()
|
||||||
|
remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
|
||||||
|
merge_result, _ = repo.merge_analysis(remote_master_id)
|
||||||
|
# Up to date, do nothing
|
||||||
|
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
|
||||||
|
return
|
||||||
|
# We can just fastforward
|
||||||
|
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
|
||||||
|
repo.checkout_tree(repo.get(remote_master_id))
|
||||||
|
try:
|
||||||
|
master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
|
||||||
|
master_ref.set_target(remote_master_id)
|
||||||
|
except KeyError:
|
||||||
|
repo.create_branch(branch, repo.get(remote_master_id))
|
||||||
|
repo.head.set_target(remote_master_id)
|
||||||
|
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
|
||||||
|
repo.merge(remote_master_id)
|
||||||
|
|
||||||
|
if repo.index.conflicts is not None:
|
||||||
|
for conflict in repo.index.conflicts:
|
||||||
|
print('Conflicts found in:', conflict[0].path)
|
||||||
|
raise AssertionError('Conflicts, ahhhhh!!')
|
||||||
|
|
||||||
|
user = repo.default_signature
|
||||||
|
tree = repo.index.write_tree()
|
||||||
|
commit = repo.create_commit('HEAD',
|
||||||
|
user,
|
||||||
|
user,
|
||||||
|
'Merge!',
|
||||||
|
tree,
|
||||||
|
[repo.head.target, remote_master_id])
|
||||||
|
# We need to do this or git CLI will think we are still merging.
|
||||||
|
repo.state_cleanup()
|
||||||
|
else:
|
||||||
|
raise AssertionError('Unknown merge analysis result')
|
||||||
|
|
||||||
|
|
||||||
|
repo = pygit2.Repository(str(sys.argv[1]))
|
||||||
|
ident = pygit2.Signature('comfyui', 'comfy@ui')
|
||||||
|
try:
|
||||||
|
print("stashing current changes")
|
||||||
|
repo.stash(ident)
|
||||||
|
except KeyError:
|
||||||
|
print("nothing to stash")
|
||||||
|
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
|
||||||
|
print("creating backup branch: {}".format(backup_branch_name))
|
||||||
|
repo.branches.local.create(backup_branch_name, repo.head.peel())
|
||||||
|
|
||||||
|
print("checking out master branch")
|
||||||
|
branch = repo.lookup_branch('master')
|
||||||
|
ref = repo.lookup_reference(branch.name)
|
||||||
|
repo.checkout(ref)
|
||||||
|
|
||||||
|
print("pulling latest changes")
|
||||||
|
pull(repo)
|
||||||
|
|
||||||
|
print("Done!")
|
||||||
|
|
2
.ci/update_windows/update_comfyui.bat
Executable file
2
.ci/update_windows/update_comfyui.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
|
pause
|
3
.ci/update_windows/update_comfyui_and_python_dependencies.bat
Executable file
3
.ci/update_windows/update_comfyui_and_python_dependencies.bat
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
|
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2
|
||||||
|
pause
|
4
.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat
Executable file
4
.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
..\python_embeded\python.exe .\update.py ..\ComfyUI\
|
||||||
|
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2
|
||||||
|
echo NOTE If you get an error with pip you can ignore it, it's pip being pip as usual, your ComfyUI should have updated anyways.
|
||||||
|
pause
|
27
.ci/windows_base_files/README_VERY_IMPORTANT.txt
Executable file
27
.ci/windows_base_files/README_VERY_IMPORTANT.txt
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
HOW TO RUN:
|
||||||
|
|
||||||
|
if you have a NVIDIA gpu:
|
||||||
|
|
||||||
|
run_nvidia_gpu.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
To run it in slow CPU mode:
|
||||||
|
|
||||||
|
run_cpu.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
|
||||||
|
|
||||||
|
You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
|
||||||
|
|
||||||
|
RECOMMENDED WAY TO UPDATE:
|
||||||
|
To update the ComfyUI code: update\update_comfyui.bat
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
To update ComfyUI with the python dependencies, note that you should ONLY run this if you have issues with python dependencies.
|
||||||
|
update\update_comfyui_and_python_dependencies.bat
|
||||||
|
|
2
.ci/windows_base_files/run_cpu.bat
Executable file
2
.ci/windows_base_files/run_cpu.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
|
||||||
|
pause
|
2
.ci/windows_base_files/run_nvidia_gpu.bat
Executable file
2
.ci/windows_base_files/run_nvidia_gpu.bat
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build
|
||||||
|
pause
|
64
.github/workflows/windows_release.yml
vendored
Normal file
64
.github/workflows/windows_release.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
name: "Windows Release"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# push:
|
||||||
|
# branches:
|
||||||
|
# - master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
permissions:
|
||||||
|
contents: "write"
|
||||||
|
packages: "write"
|
||||||
|
pull-requests: "read"
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
cd ..
|
||||||
|
cp -r ComfyUI ComfyUI_copy
|
||||||
|
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
|
||||||
|
unzip python_embeded.zip -d python_embeded
|
||||||
|
cd python_embeded
|
||||||
|
echo 'import site' >> ./python310._pth
|
||||||
|
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||||
|
./python.exe get-pip.py
|
||||||
|
./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2
|
||||||
|
sed -i '1i../ComfyUI' ./python310._pth
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
|
||||||
|
mkdir ComfyUI_windows_portable
|
||||||
|
mv python_embeded ComfyUI_windows_portable
|
||||||
|
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable
|
||||||
|
|
||||||
|
mkdir update
|
||||||
|
cp -r ComfyUI/.ci/update_windows/* ./update/
|
||||||
|
cp -r ComfyUI/.ci/windows_base_files/* ./
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
|
||||||
|
mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable
|
||||||
|
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
|
||||||
|
|
||||||
|
ls
|
||||||
|
|
||||||
|
|
||||||
|
- name: Upload binaries to release
|
||||||
|
uses: svenstaro/upload-release-action@v2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
file: ComfyUI_windows_portable_nvidia_or_cpu.7z
|
||||||
|
tag: "latest"
|
||||||
|
overwrite: true
|
||||||
|
|
141
.github/workflows/windows_release_cu118.yml
vendored
Normal file
141
.github/workflows/windows_release_cu118.yml
vendored
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
name: "Windows Release cu118"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# push:
|
||||||
|
# branches:
|
||||||
|
# - master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_dependencies:
|
||||||
|
env:
|
||||||
|
# you need at least cuda 5.0 for some of the stuff compiled here.
|
||||||
|
TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 8.9"
|
||||||
|
FORCE_CUDA: 1
|
||||||
|
MAX_JOBS: 1 # will crash otherwise
|
||||||
|
DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc
|
||||||
|
XFORMERS_BUILD_TYPE: "Release"
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- name: Cache Built Dependencies
|
||||||
|
uses: actions/cache@v3
|
||||||
|
id: cache-cu118_python_stuff
|
||||||
|
with:
|
||||||
|
path: cu118_python_deps.tar
|
||||||
|
key: ${{ runner.os }}-build-cu118
|
||||||
|
|
||||||
|
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10.9'
|
||||||
|
|
||||||
|
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
|
||||||
|
uses: comfyanonymous/cuda-toolkit@test
|
||||||
|
id: cuda-toolkit
|
||||||
|
with:
|
||||||
|
cuda: '11.8.0'
|
||||||
|
# copied from xformers github
|
||||||
|
- name: Setup MSVC
|
||||||
|
uses: ilammy/msvc-dev-cmd@v1
|
||||||
|
- name: Configure Pagefile
|
||||||
|
# windows runners will OOM with many CUDA architectures
|
||||||
|
# we cheat here with a page file
|
||||||
|
uses: al-cheb/configure-pagefile-action@v1.3
|
||||||
|
with:
|
||||||
|
minimum-size: 2GB
|
||||||
|
# really unfortunate: https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash
|
||||||
|
- name: Remove link.exe
|
||||||
|
shell: bash
|
||||||
|
run: rm /usr/bin/link
|
||||||
|
|
||||||
|
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
|
||||||
|
python -m pip install --no-cache-dir ./temp_wheel_dir/*
|
||||||
|
echo installed basic
|
||||||
|
git clone --recurse-submodules https://github.com/facebookresearch/xformers.git
|
||||||
|
cd xformers
|
||||||
|
python -m pip install --no-cache-dir wheel setuptools twine
|
||||||
|
echo building xformers
|
||||||
|
python setup.py bdist_wheel -d ../temp_wheel_dir/
|
||||||
|
cd ..
|
||||||
|
rm -rf xformers
|
||||||
|
ls -lah temp_wheel_dir
|
||||||
|
mv temp_wheel_dir cu118_python_deps
|
||||||
|
tar cf cu118_python_deps.tar cu118_python_deps
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: cu118_python_deps
|
||||||
|
path: cu118_python_deps.tar
|
||||||
|
|
||||||
|
|
||||||
|
package_comfyui:
|
||||||
|
needs: build_dependencies
|
||||||
|
permissions:
|
||||||
|
contents: "write"
|
||||||
|
packages: "write"
|
||||||
|
pull-requests: "read"
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: cu118_python_deps
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
mv cu118_python_deps.tar ../
|
||||||
|
cd ..
|
||||||
|
tar xf cu118_python_deps.tar
|
||||||
|
pwd
|
||||||
|
ls
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
cd ..
|
||||||
|
cp -r ComfyUI ComfyUI_copy
|
||||||
|
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
|
||||||
|
unzip python_embeded.zip -d python_embeded
|
||||||
|
cd python_embeded
|
||||||
|
echo 'import site' >> ./python310._pth
|
||||||
|
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||||
|
./python.exe get-pip.py
|
||||||
|
./python.exe -s -m pip install ../cu118_python_deps/*
|
||||||
|
sed -i '1i../ComfyUI' ./python310._pth
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
|
||||||
|
mkdir ComfyUI_windows_portable
|
||||||
|
mv python_embeded ComfyUI_windows_portable
|
||||||
|
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable
|
||||||
|
|
||||||
|
mkdir update
|
||||||
|
cp -r ComfyUI/.ci/update_windows/* ./update/
|
||||||
|
cp -r ComfyUI/.ci/update_windows_cu118/* ./update/
|
||||||
|
cp -r ComfyUI/.ci/windows_base_files/* ./
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
|
||||||
|
mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable
|
||||||
|
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
|
||||||
|
|
||||||
|
ls
|
||||||
|
|
||||||
|
- name: Upload binaries to release
|
||||||
|
uses: svenstaro/upload-release-action@v2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
file: ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
|
||||||
|
tag: "latest"
|
||||||
|
overwrite: true
|
||||||
|
|
66
.github/workflows/windows_release_nightly_pytorch.yml
vendored
Normal file
66
.github/workflows/windows_release_nightly_pytorch.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
name: "Windows Release Nightly pytorch"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# push:
|
||||||
|
# branches:
|
||||||
|
# - master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
permissions:
|
||||||
|
contents: "write"
|
||||||
|
packages: "write"
|
||||||
|
pull-requests: "read"
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10.9'
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
cd ..
|
||||||
|
cp -r ComfyUI ComfyUI_copy
|
||||||
|
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
|
||||||
|
unzip python_embeded.zip -d python_embeded
|
||||||
|
cd python_embeded
|
||||||
|
echo 'import site' >> ./python310._pth
|
||||||
|
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||||
|
./python.exe get-pip.py
|
||||||
|
python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir
|
||||||
|
ls ../temp_wheel_dir
|
||||||
|
./python.exe -s -m pip install --pre ../temp_wheel_dir/*
|
||||||
|
sed -i '1i../ComfyUI' ./python310._pth
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
|
||||||
|
mkdir ComfyUI_windows_portable_nightly_pytorch
|
||||||
|
mv python_embeded ComfyUI_windows_portable_nightly_pytorch
|
||||||
|
mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable_nightly_pytorch
|
||||||
|
|
||||||
|
mkdir update
|
||||||
|
cp -r ComfyUI/.ci/update_windows/* ./update/
|
||||||
|
cp -r ComfyUI/.ci/windows_base_files/* ./
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch
|
||||||
|
mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
|
||||||
|
|
||||||
|
cd ComfyUI_windows_portable_nightly_pytorch
|
||||||
|
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
|
||||||
|
|
||||||
|
ls
|
||||||
|
|
||||||
|
- name: Upload binaries to release
|
||||||
|
uses: svenstaro/upload-release-action@v2
|
||||||
|
with:
|
||||||
|
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
file: ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
|
||||||
|
tag: "latest"
|
||||||
|
overwrite: true
|
33
README.md
33
README.md
@ -31,6 +31,20 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
|
|||||||
|
|
||||||
# Installing
|
# Installing
|
||||||
|
|
||||||
|
## Windows
|
||||||
|
|
||||||
|
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
|
||||||
|
|
||||||
|
### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z)
|
||||||
|
|
||||||
|
Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints
|
||||||
|
|
||||||
|
## Colab Notebook
|
||||||
|
|
||||||
|
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
|
||||||
|
|
||||||
|
## Manual Install (Windows, Linux)
|
||||||
|
|
||||||
Git clone this repo.
|
Git clone this repo.
|
||||||
|
|
||||||
Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints
|
Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints
|
||||||
@ -39,20 +53,17 @@ Put your VAE in: models/vae
|
|||||||
|
|
||||||
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
|
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
|
||||||
|
|
||||||
### AMD
|
### AMD (Linux only)
|
||||||
AMD users can install rocm and pytorch with pip if you don't have it already installed:
|
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
|
||||||
|
|
||||||
|
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
|
||||||
|
|
||||||
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2```
|
|
||||||
|
|
||||||
### NVIDIA
|
### NVIDIA
|
||||||
|
|
||||||
Nvidia users should install torch using this command:
|
Nvidia users should install torch and xformers using this command:
|
||||||
|
|
||||||
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117```
|
```pip install torch==1.13.1 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers```
|
||||||
|
|
||||||
Nvidia users should also install Xformers for a speed boost but can still run the software without it.
|
|
||||||
|
|
||||||
```pip install xformers```
|
|
||||||
|
|
||||||
#### Troubleshooting
|
#### Troubleshooting
|
||||||
|
|
||||||
@ -112,10 +123,6 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the
|
|||||||
|
|
||||||
```embedding:embedding_filename.pt```
|
```embedding:embedding_filename.pt```
|
||||||
|
|
||||||
### Colab Notebook
|
|
||||||
|
|
||||||
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
|
|
||||||
|
|
||||||
### Fedora
|
### Fedora
|
||||||
|
|
||||||
To get python 3.10 on fedora:
|
To get python 3.10 on fedora:
|
||||||
|
@ -59,9 +59,9 @@ class ControlNet(nn.Module):
|
|||||||
|
|
||||||
if context_dim is not None:
|
if context_dim is not None:
|
||||||
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
||||||
from omegaconf.listconfig import ListConfig
|
# from omegaconf.listconfig import ListConfig
|
||||||
if type(context_dim) == ListConfig:
|
# if type(context_dim) == ListConfig:
|
||||||
context_dim = list(context_dim)
|
# context_dim = list(context_dim)
|
||||||
|
|
||||||
if num_heads_upsample == -1:
|
if num_heads_upsample == -1:
|
||||||
num_heads_upsample = num_heads
|
num_heads_upsample = num_heads
|
||||||
|
@ -18,7 +18,6 @@ import itertools
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from torchvision.utils import make_grid
|
from torchvision.utils import make_grid
|
||||||
# from pytorch_lightning.utilities.distributed import rank_zero_only
|
# from pytorch_lightning.utilities.distributed import rank_zero_only
|
||||||
from omegaconf import ListConfig
|
|
||||||
|
|
||||||
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
||||||
from ldm.modules.ema import LitEma
|
from ldm.modules.ema import LitEma
|
||||||
@ -1124,8 +1123,8 @@ class LatentDiffusion(DDPM):
|
|||||||
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
||||||
if null_label is not None:
|
if null_label is not None:
|
||||||
xc = null_label
|
xc = null_label
|
||||||
if isinstance(xc, ListConfig):
|
# if isinstance(xc, ListConfig):
|
||||||
xc = list(xc)
|
# xc = list(xc)
|
||||||
if isinstance(xc, dict) or isinstance(xc, list):
|
if isinstance(xc, dict) or isinstance(xc, list):
|
||||||
c = self.get_learned_conditioning(xc)
|
c = self.get_learned_conditioning(xc)
|
||||||
else:
|
else:
|
||||||
|
@ -11,12 +11,10 @@ from .sub_quadratic_attention import efficient_dot_product_attention
|
|||||||
|
|
||||||
import model_management
|
import model_management
|
||||||
|
|
||||||
try:
|
|
||||||
|
if model_management.xformers_enabled():
|
||||||
import xformers
|
import xformers
|
||||||
import xformers.ops
|
import xformers.ops
|
||||||
XFORMERS_IS_AVAILBLE = True
|
|
||||||
except:
|
|
||||||
XFORMERS_IS_AVAILBLE = False
|
|
||||||
|
|
||||||
# CrossAttn precision handling
|
# CrossAttn precision handling
|
||||||
import os
|
import os
|
||||||
@ -481,23 +479,19 @@ class CrossAttentionPytorch(nn.Module):
|
|||||||
return self.to_out(out)
|
return self.to_out(out)
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
if XFORMERS_IS_AVAILBLE == False or "--disable-xformers" in sys.argv:
|
if model_management.xformers_enabled():
|
||||||
|
print("Using xformers cross attention")
|
||||||
|
CrossAttention = MemoryEfficientCrossAttention
|
||||||
|
elif model_management.pytorch_attention_enabled():
|
||||||
|
print("Using pytorch cross attention")
|
||||||
|
CrossAttention = CrossAttentionPytorch
|
||||||
|
else:
|
||||||
if "--use-split-cross-attention" in sys.argv:
|
if "--use-split-cross-attention" in sys.argv:
|
||||||
print("Using split optimization for cross attention")
|
print("Using split optimization for cross attention")
|
||||||
CrossAttention = CrossAttentionDoggettx
|
CrossAttention = CrossAttentionDoggettx
|
||||||
else:
|
else:
|
||||||
if "--use-pytorch-cross-attention" in sys.argv:
|
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
|
||||||
print("Using pytorch cross attention")
|
CrossAttention = CrossAttentionBirchSan
|
||||||
torch.backends.cuda.enable_math_sdp(False)
|
|
||||||
torch.backends.cuda.enable_flash_sdp(True)
|
|
||||||
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
|
||||||
CrossAttention = CrossAttentionPytorch
|
|
||||||
else:
|
|
||||||
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
|
|
||||||
CrossAttention = CrossAttentionBirchSan
|
|
||||||
else:
|
|
||||||
print("Using xformers cross attention")
|
|
||||||
CrossAttention = MemoryEfficientCrossAttention
|
|
||||||
|
|
||||||
|
|
||||||
class BasicTransformerBlock(nn.Module):
|
class BasicTransformerBlock(nn.Module):
|
||||||
|
@ -9,13 +9,9 @@ from typing import Optional, Any
|
|||||||
from ldm.modules.attention import MemoryEfficientCrossAttention
|
from ldm.modules.attention import MemoryEfficientCrossAttention
|
||||||
import model_management
|
import model_management
|
||||||
|
|
||||||
try:
|
if model_management.xformers_enabled():
|
||||||
import xformers
|
import xformers
|
||||||
import xformers.ops
|
import xformers.ops
|
||||||
XFORMERS_IS_AVAILBLE = True
|
|
||||||
except:
|
|
||||||
XFORMERS_IS_AVAILBLE = False
|
|
||||||
print("No module 'xformers'. Proceeding without it.")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
OOM_EXCEPTION = torch.cuda.OutOfMemoryError
|
OOM_EXCEPTION = torch.cuda.OutOfMemoryError
|
||||||
@ -303,6 +299,64 @@ class MemoryEfficientAttnBlock(nn.Module):
|
|||||||
out = self.proj_out(out)
|
out = self.proj_out(out)
|
||||||
return x+out
|
return x+out
|
||||||
|
|
||||||
|
class MemoryEfficientAttnBlockPytorch(nn.Module):
|
||||||
|
def __init__(self, in_channels):
|
||||||
|
super().__init__()
|
||||||
|
self.in_channels = in_channels
|
||||||
|
|
||||||
|
self.norm = Normalize(in_channels)
|
||||||
|
self.q = torch.nn.Conv2d(in_channels,
|
||||||
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
self.k = torch.nn.Conv2d(in_channels,
|
||||||
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
self.v = torch.nn.Conv2d(in_channels,
|
||||||
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
self.proj_out = torch.nn.Conv2d(in_channels,
|
||||||
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
self.attention_op: Optional[Any] = None
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h_ = x
|
||||||
|
h_ = self.norm(h_)
|
||||||
|
q = self.q(h_)
|
||||||
|
k = self.k(h_)
|
||||||
|
v = self.v(h_)
|
||||||
|
|
||||||
|
# compute attention
|
||||||
|
B, C, H, W = q.shape
|
||||||
|
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
|
||||||
|
|
||||||
|
q, k, v = map(
|
||||||
|
lambda t: t.unsqueeze(3)
|
||||||
|
.reshape(B, t.shape[1], 1, C)
|
||||||
|
.permute(0, 2, 1, 3)
|
||||||
|
.reshape(B * 1, t.shape[1], C)
|
||||||
|
.contiguous(),
|
||||||
|
(q, k, v),
|
||||||
|
)
|
||||||
|
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
|
||||||
|
|
||||||
|
out = (
|
||||||
|
out.unsqueeze(0)
|
||||||
|
.reshape(B, 1, out.shape[1], C)
|
||||||
|
.permute(0, 2, 1, 3)
|
||||||
|
.reshape(B, out.shape[1], C)
|
||||||
|
)
|
||||||
|
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
|
||||||
|
out = self.proj_out(out)
|
||||||
|
return x+out
|
||||||
|
|
||||||
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
||||||
def forward(self, x, context=None, mask=None):
|
def forward(self, x, context=None, mask=None):
|
||||||
@ -315,8 +369,10 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
|||||||
|
|
||||||
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
||||||
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
|
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
|
||||||
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
|
if model_management.xformers_enabled() and attn_type == "vanilla":
|
||||||
attn_type = "vanilla-xformers"
|
attn_type = "vanilla-xformers"
|
||||||
|
if model_management.pytorch_attention_enabled() and attn_type == "vanilla":
|
||||||
|
attn_type = "vanilla-pytorch"
|
||||||
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
||||||
if attn_type == "vanilla":
|
if attn_type == "vanilla":
|
||||||
assert attn_kwargs is None
|
assert attn_kwargs is None
|
||||||
@ -324,6 +380,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
|||||||
elif attn_type == "vanilla-xformers":
|
elif attn_type == "vanilla-xformers":
|
||||||
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
||||||
return MemoryEfficientAttnBlock(in_channels)
|
return MemoryEfficientAttnBlock(in_channels)
|
||||||
|
elif attn_type == "vanilla-pytorch":
|
||||||
|
return MemoryEfficientAttnBlockPytorch(in_channels)
|
||||||
elif type == "memory-efficient-cross-attn":
|
elif type == "memory-efficient-cross-attn":
|
||||||
attn_kwargs["query_dim"] = in_channels
|
attn_kwargs["query_dim"] = in_channels
|
||||||
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
||||||
|
@ -477,9 +477,9 @@ class UNetModel(nn.Module):
|
|||||||
|
|
||||||
if context_dim is not None:
|
if context_dim is not None:
|
||||||
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
||||||
from omegaconf.listconfig import ListConfig
|
# from omegaconf.listconfig import ListConfig
|
||||||
if type(context_dim) == ListConfig:
|
# if type(context_dim) == ListConfig:
|
||||||
context_dim = list(context_dim)
|
# context_dim = list(context_dim)
|
||||||
|
|
||||||
if num_heads_upsample == -1:
|
if num_heads_upsample == -1:
|
||||||
num_heads_upsample = num_heads
|
num_heads_upsample = num_heads
|
||||||
|
@ -31,8 +31,25 @@ try:
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if "--cpu" in sys.argv:
|
if "--disable-xformers" in sys.argv:
|
||||||
vram_state = CPU
|
XFORMERS_IS_AVAILBLE = False
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
import xformers
|
||||||
|
import xformers.ops
|
||||||
|
XFORMERS_IS_AVAILBLE = True
|
||||||
|
except:
|
||||||
|
XFORMERS_IS_AVAILBLE = False
|
||||||
|
|
||||||
|
ENABLE_PYTORCH_ATTENTION = False
|
||||||
|
if "--use-pytorch-cross-attention" in sys.argv:
|
||||||
|
torch.backends.cuda.enable_math_sdp(True)
|
||||||
|
torch.backends.cuda.enable_flash_sdp(True)
|
||||||
|
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
||||||
|
ENABLE_PYTORCH_ATTENTION = True
|
||||||
|
XFORMERS_IS_AVAILBLE = False
|
||||||
|
|
||||||
|
|
||||||
if "--lowvram" in sys.argv:
|
if "--lowvram" in sys.argv:
|
||||||
set_vram_to = LOW_VRAM
|
set_vram_to = LOW_VRAM
|
||||||
if "--novram" in sys.argv:
|
if "--novram" in sys.argv:
|
||||||
@ -54,6 +71,8 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
|
|||||||
total_vram_available_mb = (total_vram - 1024) // 2
|
total_vram_available_mb = (total_vram - 1024) // 2
|
||||||
total_vram_available_mb = int(max(256, total_vram_available_mb))
|
total_vram_available_mb = int(max(256, total_vram_available_mb))
|
||||||
|
|
||||||
|
if "--cpu" in sys.argv:
|
||||||
|
vram_state = CPU
|
||||||
|
|
||||||
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state])
|
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state])
|
||||||
|
|
||||||
@ -159,6 +178,14 @@ def get_autocast_device(dev):
|
|||||||
return dev.type
|
return dev.type
|
||||||
return "cuda"
|
return "cuda"
|
||||||
|
|
||||||
|
def xformers_enabled():
|
||||||
|
if vram_state == CPU:
|
||||||
|
return False
|
||||||
|
return XFORMERS_IS_AVAILBLE
|
||||||
|
|
||||||
|
def pytorch_attention_enabled():
|
||||||
|
return ENABLE_PYTORCH_ATTENTION
|
||||||
|
|
||||||
def get_free_memory(dev=None, torch_free_too=False):
|
def get_free_memory(dev=None, torch_free_too=False):
|
||||||
if dev is None:
|
if dev is None:
|
||||||
dev = get_torch_device()
|
dev = get_torch_device()
|
||||||
|
20
comfy/sd.py
20
comfy/sd.py
@ -6,7 +6,7 @@ import sd2_clip
|
|||||||
import model_management
|
import model_management
|
||||||
from .ldm.util import instantiate_from_config
|
from .ldm.util import instantiate_from_config
|
||||||
from .ldm.models.autoencoder import AutoencoderKL
|
from .ldm.models.autoencoder import AutoencoderKL
|
||||||
from omegaconf import OmegaConf
|
import yaml
|
||||||
from .cldm import cldm
|
from .cldm import cldm
|
||||||
from .t2i_adapter import adapter
|
from .t2i_adapter import adapter
|
||||||
|
|
||||||
@ -726,12 +726,19 @@ def load_clip(ckpt_path, embedding_directory=None):
|
|||||||
return clip
|
return clip
|
||||||
|
|
||||||
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
||||||
config = OmegaConf.load(config_path)
|
with open(config_path, 'r') as stream:
|
||||||
|
config = yaml.safe_load(stream)
|
||||||
model_config_params = config['model']['params']
|
model_config_params = config['model']['params']
|
||||||
clip_config = model_config_params['cond_stage_config']
|
clip_config = model_config_params['cond_stage_config']
|
||||||
scale_factor = model_config_params['scale_factor']
|
scale_factor = model_config_params['scale_factor']
|
||||||
vae_config = model_config_params['first_stage_config']
|
vae_config = model_config_params['first_stage_config']
|
||||||
|
|
||||||
|
fp16 = False
|
||||||
|
if "unet_config" in model_config_params:
|
||||||
|
if "params" in model_config_params["unet_config"]:
|
||||||
|
if "use_fp16" in model_config_params["unet_config"]["params"]:
|
||||||
|
fp16 = model_config_params["unet_config"]["params"]["use_fp16"]
|
||||||
|
|
||||||
clip = None
|
clip = None
|
||||||
vae = None
|
vae = None
|
||||||
|
|
||||||
@ -750,9 +757,13 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e
|
|||||||
w.cond_stage_model = clip.cond_stage_model
|
w.cond_stage_model = clip.cond_stage_model
|
||||||
load_state_dict_to = [w]
|
load_state_dict_to = [w]
|
||||||
|
|
||||||
model = instantiate_from_config(config.model)
|
model = instantiate_from_config(config["model"])
|
||||||
sd = load_torch_file(ckpt_path)
|
sd = load_torch_file(ckpt_path)
|
||||||
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
||||||
|
|
||||||
|
if fp16:
|
||||||
|
model = model.half()
|
||||||
|
|
||||||
return (ModelPatcher(model), clip, vae)
|
return (ModelPatcher(model), clip, vae)
|
||||||
|
|
||||||
|
|
||||||
@ -853,4 +864,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e
|
|||||||
model = instantiate_from_config(model_config)
|
model = instantiate_from_config(model_config)
|
||||||
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
||||||
|
|
||||||
|
if fp16:
|
||||||
|
model = model.half()
|
||||||
|
|
||||||
return (ModelPatcher(model), clip, vae)
|
return (ModelPatcher(model), clip, vae)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
from comfy_extras.chainner_models import model_loading
|
from comfy_extras.chainner_models import model_loading
|
||||||
from comfy.sd import load_torch_file
|
from comfy.sd import load_torch_file
|
||||||
import comfy.model_management
|
import model_management
|
||||||
from nodes import filter_files_extensions, recursive_search, supported_ckpt_extensions
|
from nodes import filter_files_extensions, recursive_search, supported_ckpt_extensions
|
||||||
import torch
|
import torch
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
@ -38,7 +38,7 @@ class ImageUpscaleWithModel:
|
|||||||
CATEGORY = "image/upscaling"
|
CATEGORY = "image/upscaling"
|
||||||
|
|
||||||
def upscale(self, upscale_model, image):
|
def upscale(self, upscale_model, image):
|
||||||
device = comfy.model_management.get_torch_device()
|
device = model_management.get_torch_device()
|
||||||
upscale_model.to(device)
|
upscale_model.to(device)
|
||||||
in_img = image.movedim(-1,-3).to(device)
|
in_img = image.movedim(-1,-3).to(device)
|
||||||
s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=128 + 64, tile_y=128 + 64, overlap = 8, upscale_amount=upscale_model.scale)
|
s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=128 + 64, tile_y=128 + 64, overlap = 8, upscale_amount=upscale_model.scale)
|
||||||
|
35
main.py
35
main.py
@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import shutil
|
||||||
|
|
||||||
import threading
|
import threading
|
||||||
import asyncio
|
import asyncio
|
||||||
@ -8,9 +9,6 @@ if os.name == "nt":
|
|||||||
import logging
|
import logging
|
||||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||||
|
|
||||||
import execution
|
|
||||||
import server
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if '--help' in sys.argv:
|
if '--help' in sys.argv:
|
||||||
print("Valid Command line Arguments:")
|
print("Valid Command line Arguments:")
|
||||||
@ -18,6 +16,8 @@ if __name__ == "__main__":
|
|||||||
print("\t--port 8188\t\t\tSet the listen port.")
|
print("\t--port 8188\t\t\tSet the listen port.")
|
||||||
print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n")
|
print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n")
|
||||||
print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.")
|
print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.")
|
||||||
|
print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.")
|
||||||
|
print("\t--disable-xformers\t\tdisables xformers")
|
||||||
print()
|
print()
|
||||||
print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n")
|
print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n")
|
||||||
print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.")
|
print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.")
|
||||||
@ -31,6 +31,9 @@ if __name__ == "__main__":
|
|||||||
print("disabling upcasting of attention")
|
print("disabling upcasting of attention")
|
||||||
os.environ['ATTN_PRECISION'] = "fp16"
|
os.environ['ATTN_PRECISION'] = "fp16"
|
||||||
|
|
||||||
|
import execution
|
||||||
|
import server
|
||||||
|
|
||||||
def prompt_worker(q, server):
|
def prompt_worker(q, server):
|
||||||
e = execution.PromptExecutor(server)
|
e = execution.PromptExecutor(server)
|
||||||
while True:
|
while True:
|
||||||
@ -38,8 +41,8 @@ def prompt_worker(q, server):
|
|||||||
e.execute(item[-2], item[-1])
|
e.execute(item[-2], item[-1])
|
||||||
q.task_done(item_id, e.outputs)
|
q.task_done(item_id, e.outputs)
|
||||||
|
|
||||||
async def run(server, address='', port=8188, verbose=True):
|
async def run(server, address='', port=8188, verbose=True, call_on_start=None):
|
||||||
await asyncio.gather(server.start(address, port, verbose), server.publish_loop())
|
await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
|
||||||
|
|
||||||
def hijack_progress(server):
|
def hijack_progress(server):
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
@ -51,7 +54,14 @@ def hijack_progress(server):
|
|||||||
return v
|
return v
|
||||||
setattr(tqdm, "update", wrapped_func)
|
setattr(tqdm, "update", wrapped_func)
|
||||||
|
|
||||||
|
def cleanup_temp():
|
||||||
|
temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
|
||||||
|
if os.path.exists(temp_dir):
|
||||||
|
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
cleanup_temp()
|
||||||
|
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
asyncio.set_event_loop(loop)
|
asyncio.set_event_loop(loop)
|
||||||
server = server.PromptServer(loop)
|
server = server.PromptServer(loop)
|
||||||
@ -76,11 +86,22 @@ if __name__ == "__main__":
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
if '--quick-test-for-ci' in sys.argv:
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
call_on_start = None
|
||||||
|
if "--windows-standalone-build" in sys.argv:
|
||||||
|
def startup_server(address, port):
|
||||||
|
import webbrowser
|
||||||
|
webbrowser.open("http://{}:{}".format(address, port))
|
||||||
|
call_on_start = startup_server
|
||||||
|
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
try:
|
try:
|
||||||
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print))
|
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start))
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print))
|
loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start))
|
||||||
|
|
||||||
|
cleanup_temp()
|
||||||
|
24
nodes.py
24
nodes.py
@ -189,6 +189,7 @@ class VAEEncodeForInpaint:
|
|||||||
y = (pixels.shape[2] // 64) * 64
|
y = (pixels.shape[2] // 64) * 64
|
||||||
mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]
|
mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]
|
||||||
|
|
||||||
|
pixels = pixels.clone()
|
||||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||||
pixels = pixels[:,:x,:y,:]
|
pixels = pixels[:,:x,:y,:]
|
||||||
mask = mask[:x,:y]
|
mask = mask[:x,:y]
|
||||||
@ -691,8 +692,8 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
|
|||||||
if t.shape[0] < noise.shape[0]:
|
if t.shape[0] < noise.shape[0]:
|
||||||
t = torch.cat([t] * noise.shape[0])
|
t = torch.cat([t] * noise.shape[0])
|
||||||
t = t.to(device)
|
t = t.to(device)
|
||||||
if 'control' in p[1]:
|
if 'control' in n[1]:
|
||||||
control_nets += [p[1]['control']]
|
control_nets += [n[1]['control']]
|
||||||
negative_copy += [[t] + n[1:]]
|
negative_copy += [[t] + n[1:]]
|
||||||
|
|
||||||
control_net_models = []
|
control_net_models = []
|
||||||
@ -775,6 +776,7 @@ class KSamplerAdvanced:
|
|||||||
class SaveImage:
|
class SaveImage:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
|
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
|
||||||
|
self.url_suffix = ""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -808,6 +810,9 @@ class SaveImage:
|
|||||||
os.mkdir(self.output_dir)
|
os.mkdir(self.output_dir)
|
||||||
counter = 1
|
counter = 1
|
||||||
|
|
||||||
|
if not os.path.exists(self.output_dir):
|
||||||
|
os.makedirs(self.output_dir)
|
||||||
|
|
||||||
paths = list()
|
paths = list()
|
||||||
for image in images:
|
for image in images:
|
||||||
i = 255. * image.cpu().numpy()
|
i = 255. * image.cpu().numpy()
|
||||||
@ -820,10 +825,22 @@ class SaveImage:
|
|||||||
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
||||||
file = f"{filename_prefix}_{counter:05}_.png"
|
file = f"{filename_prefix}_{counter:05}_.png"
|
||||||
img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True)
|
img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True)
|
||||||
paths.append(file)
|
paths.append(file + self.url_suffix)
|
||||||
counter += 1
|
counter += 1
|
||||||
return { "ui": { "images": paths } }
|
return { "ui": { "images": paths } }
|
||||||
|
|
||||||
|
class PreviewImage(SaveImage):
|
||||||
|
def __init__(self):
|
||||||
|
self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
|
||||||
|
self.url_suffix = "?type=temp"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required":
|
||||||
|
{"images": ("IMAGE", ), },
|
||||||
|
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||||
|
}
|
||||||
|
|
||||||
class LoadImage:
|
class LoadImage:
|
||||||
input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
|
input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -944,6 +961,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"EmptyLatentImage": EmptyLatentImage,
|
"EmptyLatentImage": EmptyLatentImage,
|
||||||
"LatentUpscale": LatentUpscale,
|
"LatentUpscale": LatentUpscale,
|
||||||
"SaveImage": SaveImage,
|
"SaveImage": SaveImage,
|
||||||
|
"PreviewImage": PreviewImage,
|
||||||
"LoadImage": LoadImage,
|
"LoadImage": LoadImage,
|
||||||
"LoadImageMask": LoadImageMask,
|
"LoadImageMask": LoadImageMask,
|
||||||
"ImageScale": ImageScale,
|
"ImageScale": ImageScale,
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
torch
|
torch
|
||||||
torchdiffeq
|
torchdiffeq
|
||||||
torchsde
|
torchsde
|
||||||
omegaconf
|
|
||||||
einops
|
einops
|
||||||
open-clip-torch
|
open-clip-torch
|
||||||
transformers
|
transformers
|
||||||
@ -9,3 +8,4 @@ safetensors
|
|||||||
pytorch_lightning
|
pytorch_lightning
|
||||||
aiohttp
|
aiohttp
|
||||||
accelerate
|
accelerate
|
||||||
|
pyyaml
|
||||||
|
@ -121,7 +121,7 @@ class PromptServer():
|
|||||||
async def view_image(request):
|
async def view_image(request):
|
||||||
if "file" in request.match_info:
|
if "file" in request.match_info:
|
||||||
type = request.rel_url.query.get("type", "output")
|
type = request.rel_url.query.get("type", "output")
|
||||||
if type != "output" and type != "input":
|
if type not in ["output", "input", "temp"]:
|
||||||
return web.Response(status=400)
|
return web.Response(status=400)
|
||||||
|
|
||||||
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type)
|
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type)
|
||||||
@ -268,7 +268,7 @@ class PromptServer():
|
|||||||
msg = await self.messages.get()
|
msg = await self.messages.get()
|
||||||
await self.send(*msg)
|
await self.send(*msg)
|
||||||
|
|
||||||
async def start(self, address, port, verbose=True):
|
async def start(self, address, port, verbose=True, call_on_start=None):
|
||||||
runner = web.AppRunner(self.app)
|
runner = web.AppRunner(self.app)
|
||||||
await runner.setup()
|
await runner.setup()
|
||||||
site = web.TCPSite(runner, address, port)
|
site = web.TCPSite(runner, address, port)
|
||||||
@ -279,3 +279,6 @@ class PromptServer():
|
|||||||
if verbose:
|
if verbose:
|
||||||
print("Starting server\n")
|
print("Starting server\n")
|
||||||
print("To see the GUI go to: http://{}:{}".format(address, port))
|
print("To see the GUI go to: http://{}:{}".format(address, port))
|
||||||
|
if call_on_start is not None:
|
||||||
|
call_on_start(address, port)
|
||||||
|
|
||||||
|
@ -142,7 +142,14 @@ class ComfyApp {
|
|||||||
if (numImages === 1 && !imageIndex) {
|
if (numImages === 1 && !imageIndex) {
|
||||||
this.imageIndex = imageIndex = 0;
|
this.imageIndex = imageIndex = 0;
|
||||||
}
|
}
|
||||||
let shiftY = this.type === "SaveImage" ? 55 : this.imageOffset || 0;
|
|
||||||
|
let shiftY;
|
||||||
|
if (this.imageOffset != null) {
|
||||||
|
shiftY = this.imageOffset;
|
||||||
|
} else {
|
||||||
|
shiftY = this.computeSize()[1];
|
||||||
|
}
|
||||||
|
|
||||||
let dw = this.size[0];
|
let dw = this.size[0];
|
||||||
let dh = this.size[1];
|
let dh = this.size[1];
|
||||||
dh -= shiftY;
|
dh -= shiftY;
|
||||||
@ -284,9 +291,47 @@ class ComfyApp {
|
|||||||
document.addEventListener("drop", async (event) => {
|
document.addEventListener("drop", async (event) => {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
event.stopPropagation();
|
event.stopPropagation();
|
||||||
const file = event.dataTransfer.files[0];
|
|
||||||
await this.handleFile(file);
|
const n = this.dragOverNode;
|
||||||
|
this.dragOverNode = null;
|
||||||
|
// Node handles file drop, we dont use the built in onDropFile handler as its buggy
|
||||||
|
// If you drag multiple files it will call it multiple times with the same file
|
||||||
|
if (n && n.onDragDrop && (await n.onDragDrop(event))) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.handleFile(event.dataTransfer.files[0]);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Always clear over node on drag leave
|
||||||
|
this.canvasEl.addEventListener("dragleave", async () => {
|
||||||
|
if (this.dragOverNode) {
|
||||||
|
this.dragOverNode = null;
|
||||||
|
this.graph.setDirtyCanvas(false, true);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add handler for dropping onto a specific node
|
||||||
|
this.canvasEl.addEventListener(
|
||||||
|
"dragover",
|
||||||
|
(e) => {
|
||||||
|
this.canvas.adjustMouseEvent(e);
|
||||||
|
const node = this.graph.getNodeOnPos(e.canvasX, e.canvasY);
|
||||||
|
if (node) {
|
||||||
|
if (node.onDragOver && node.onDragOver(e)) {
|
||||||
|
this.dragOverNode = node;
|
||||||
|
|
||||||
|
// dragover event is fired very frequently, run this on an animation frame
|
||||||
|
requestAnimationFrame(() => {
|
||||||
|
this.graph.setDirtyCanvas(false, true);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.dragOverNode = null;
|
||||||
|
},
|
||||||
|
false
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -314,15 +359,22 @@ class ComfyApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Draws currently executing node highlight and progress bar
|
* Draws node highlights (executing, drag drop) and progress bar
|
||||||
*/
|
*/
|
||||||
#addDrawNodeProgressHandler() {
|
#addDrawNodeHandler() {
|
||||||
const orig = LGraphCanvas.prototype.drawNodeShape;
|
const orig = LGraphCanvas.prototype.drawNodeShape;
|
||||||
const self = this;
|
const self = this;
|
||||||
LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) {
|
LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) {
|
||||||
const res = orig.apply(this, arguments);
|
const res = orig.apply(this, arguments);
|
||||||
|
|
||||||
if (node.id + "" === self.runningNodeId) {
|
let color = null;
|
||||||
|
if (node.id === +self.runningNodeId) {
|
||||||
|
color = "#0f0";
|
||||||
|
} else if (self.dragOverNode && node.id === self.dragOverNode.id) {
|
||||||
|
color = "dodgerblue";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (color) {
|
||||||
const shape = node._shape || node.constructor.shape || LiteGraph.ROUND_SHAPE;
|
const shape = node._shape || node.constructor.shape || LiteGraph.ROUND_SHAPE;
|
||||||
ctx.lineWidth = 1;
|
ctx.lineWidth = 1;
|
||||||
ctx.globalAlpha = 0.8;
|
ctx.globalAlpha = 0.8;
|
||||||
@ -348,7 +400,7 @@ class ComfyApp {
|
|||||||
);
|
);
|
||||||
else if (shape == LiteGraph.CIRCLE_SHAPE)
|
else if (shape == LiteGraph.CIRCLE_SHAPE)
|
||||||
ctx.arc(size[0] * 0.5, size[1] * 0.5, size[0] * 0.5 + 6, 0, Math.PI * 2);
|
ctx.arc(size[0] * 0.5, size[1] * 0.5, size[0] * 0.5 + 6, 0, Math.PI * 2);
|
||||||
ctx.strokeStyle = "#0f0";
|
ctx.strokeStyle = color;
|
||||||
ctx.stroke();
|
ctx.stroke();
|
||||||
ctx.strokeStyle = fgcolor;
|
ctx.strokeStyle = fgcolor;
|
||||||
ctx.globalAlpha = 1;
|
ctx.globalAlpha = 1;
|
||||||
@ -398,6 +450,15 @@ class ComfyApp {
|
|||||||
api.init();
|
api.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#addKeyboardHandler() {
|
||||||
|
window.addEventListener("keydown", (e) => {
|
||||||
|
// Queue prompt using ctrl or command + enter
|
||||||
|
if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) {
|
||||||
|
this.queuePrompt(e.shiftKey ? -1 : 0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Loads all extensions from the API into the window
|
* Loads all extensions from the API into the window
|
||||||
*/
|
*/
|
||||||
@ -419,7 +480,7 @@ class ComfyApp {
|
|||||||
await this.#loadExtensions();
|
await this.#loadExtensions();
|
||||||
|
|
||||||
// Create and mount the LiteGraph in the DOM
|
// Create and mount the LiteGraph in the DOM
|
||||||
const canvasEl = Object.assign(document.createElement("canvas"), { id: "graph-canvas" });
|
const canvasEl = (this.canvasEl = Object.assign(document.createElement("canvas"), { id: "graph-canvas" }));
|
||||||
document.body.prepend(canvasEl);
|
document.body.prepend(canvasEl);
|
||||||
|
|
||||||
this.graph = new LGraph();
|
this.graph = new LGraph();
|
||||||
@ -460,10 +521,11 @@ class ComfyApp {
|
|||||||
// Save current workflow automatically
|
// Save current workflow automatically
|
||||||
setInterval(() => localStorage.setItem("workflow", JSON.stringify(this.graph.serialize())), 1000);
|
setInterval(() => localStorage.setItem("workflow", JSON.stringify(this.graph.serialize())), 1000);
|
||||||
|
|
||||||
this.#addDrawNodeProgressHandler();
|
this.#addDrawNodeHandler();
|
||||||
this.#addApiUpdateHandlers();
|
this.#addApiUpdateHandlers();
|
||||||
this.#addDropHandler();
|
this.#addDropHandler();
|
||||||
this.#addPasteHandler();
|
this.#addPasteHandler();
|
||||||
|
this.#addKeyboardHandler();
|
||||||
|
|
||||||
await this.#invokeExtensionsAsync("setup");
|
await this.#invokeExtensionsAsync("setup");
|
||||||
}
|
}
|
||||||
@ -497,7 +559,11 @@ class ComfyApp {
|
|||||||
|
|
||||||
if (Array.isArray(type)) {
|
if (Array.isArray(type)) {
|
||||||
// Enums e.g. latent rotation
|
// Enums e.g. latent rotation
|
||||||
this.addWidget("combo", inputName, type[0], () => {}, { values: type });
|
let defaultValue = type[0];
|
||||||
|
if (inputData[1] && inputData[1].default) {
|
||||||
|
defaultValue = inputData[1].default;
|
||||||
|
}
|
||||||
|
this.addWidget("combo", inputName, defaultValue, () => {}, { values: type });
|
||||||
} else if (`${type}:${inputName}` in widgets) {
|
} else if (`${type}:${inputName}` in widgets) {
|
||||||
// Support custom widgets by Type:Name
|
// Support custom widgets by Type:Name
|
||||||
Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});
|
Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});
|
||||||
@ -641,31 +707,33 @@ class ComfyApp {
|
|||||||
return { workflow, output };
|
return { workflow, output };
|
||||||
}
|
}
|
||||||
|
|
||||||
async queuePrompt(number) {
|
async queuePrompt(number, batchCount = 1) {
|
||||||
const p = await this.graphToPrompt();
|
for (let i = 0; i < batchCount; i++) {
|
||||||
|
const p = await this.graphToPrompt();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await api.queuePrompt(number, p);
|
await api.queuePrompt(number, p);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
this.ui.dialog.show(error.response || error.toString());
|
this.ui.dialog.show(error.response || error.toString());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const n of p.workflow.nodes) {
|
for (const n of p.workflow.nodes) {
|
||||||
const node = graph.getNodeById(n.id);
|
const node = graph.getNodeById(n.id);
|
||||||
if (node.widgets) {
|
if (node.widgets) {
|
||||||
for (const widget of node.widgets) {
|
for (const widget of node.widgets) {
|
||||||
// Allow widgets to run callbacks after a prompt has been queued
|
// Allow widgets to run callbacks after a prompt has been queued
|
||||||
// e.g. random seed after every gen
|
// e.g. random seed after every gen
|
||||||
if (widget.afterQueued) {
|
if (widget.afterQueued) {
|
||||||
widget.afterQueued();
|
widget.afterQueued();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
this.canvas.draw(true, true);
|
this.canvas.draw(true, true);
|
||||||
await this.ui.queue.update();
|
await this.ui.queue.update();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -231,6 +231,7 @@ export class ComfyUI {
|
|||||||
this.dialog = new ComfyDialog();
|
this.dialog = new ComfyDialog();
|
||||||
this.settings = new ComfySettingsDialog();
|
this.settings = new ComfySettingsDialog();
|
||||||
|
|
||||||
|
this.batchCount = 1;
|
||||||
this.queue = new ComfyList("Queue");
|
this.queue = new ComfyList("Queue");
|
||||||
this.history = new ComfyList("History");
|
this.history = new ComfyList("History");
|
||||||
|
|
||||||
@ -254,9 +255,35 @@ export class ComfyUI {
|
|||||||
$el("span", { $: (q) => (this.queueSize = q) }),
|
$el("span", { $: (q) => (this.queueSize = q) }),
|
||||||
$el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }),
|
$el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }),
|
||||||
]),
|
]),
|
||||||
$el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0) }),
|
$el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }),
|
||||||
|
$el("div", {}, [
|
||||||
|
$el("label", { innerHTML: "Extra options"}, [
|
||||||
|
$el("input", { type: "checkbox",
|
||||||
|
onchange: (i) => {
|
||||||
|
document.getElementById('extraOptions').style.display = i.srcElement.checked ? "block" : "none";
|
||||||
|
this.batchCount = i.srcElement.checked ? document.getElementById('batchCountInputRange').value : 1;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
])
|
||||||
|
]),
|
||||||
|
$el("div", { id: "extraOptions", style: { width: "100%", display: "none" }}, [
|
||||||
|
$el("label", { innerHTML: "Batch count" }, [
|
||||||
|
$el("input", { id: "batchCountInputNumber", type: "number", value: this.batchCount, min: "1", style: { width: "35%", "margin-left": "0.4em" },
|
||||||
|
oninput: (i) => {
|
||||||
|
this.batchCount = i.target.value;
|
||||||
|
document.getElementById('batchCountInputRange').value = this.batchCount;
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
$el("input", { id: "batchCountInputRange", type: "range", min: "1", max: "100", value: this.batchCount,
|
||||||
|
oninput: (i) => {
|
||||||
|
this.batchCount = i.srcElement.value;
|
||||||
|
document.getElementById('batchCountInputNumber').value = i.srcElement.value;
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
]),
|
||||||
|
]),
|
||||||
$el("div.comfy-menu-btns", [
|
$el("div.comfy-menu-btns", [
|
||||||
$el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1) }),
|
$el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
|
||||||
$el("button", {
|
$el("button", {
|
||||||
$: (b) => (this.queue.button = b),
|
$: (b) => (this.queue.button = b),
|
||||||
textContent: "View Queue",
|
textContent: "View Queue",
|
||||||
|
@ -132,7 +132,7 @@ export const ComfyWidgets = {
|
|||||||
|
|
||||||
function showImage(name) {
|
function showImage(name) {
|
||||||
// Position the image somewhere sensible
|
// Position the image somewhere sensible
|
||||||
if(!node.imageOffset) {
|
if (!node.imageOffset) {
|
||||||
node.imageOffset = uploadWidget.last_y ? uploadWidget.last_y + 25 : 75;
|
node.imageOffset = uploadWidget.last_y ? uploadWidget.last_y + 25 : 75;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,6 +162,36 @@ export const ComfyWidgets = {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
async function uploadFile(file, updateNode) {
|
||||||
|
try {
|
||||||
|
// Wrap file in formdata so it includes filename
|
||||||
|
const body = new FormData();
|
||||||
|
body.append("image", file);
|
||||||
|
const resp = await fetch("/upload/image", {
|
||||||
|
method: "POST",
|
||||||
|
body,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (resp.status === 200) {
|
||||||
|
const data = await resp.json();
|
||||||
|
// Add the file as an option and update the widget value
|
||||||
|
if (!imageWidget.options.values.includes(data.name)) {
|
||||||
|
imageWidget.options.values.push(data.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (updateNode) {
|
||||||
|
showImage(data.name);
|
||||||
|
|
||||||
|
imageWidget.value = data.name;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
alert(resp.status + " - " + resp.statusText);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
alert(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const fileInput = document.createElement("input");
|
const fileInput = document.createElement("input");
|
||||||
Object.assign(fileInput, {
|
Object.assign(fileInput, {
|
||||||
type: "file",
|
type: "file",
|
||||||
@ -169,30 +199,7 @@ export const ComfyWidgets = {
|
|||||||
style: "display: none",
|
style: "display: none",
|
||||||
onchange: async () => {
|
onchange: async () => {
|
||||||
if (fileInput.files.length) {
|
if (fileInput.files.length) {
|
||||||
try {
|
await uploadFile(fileInput.files[0], true);
|
||||||
// Wrap file in formdata so it includes filename
|
|
||||||
const body = new FormData();
|
|
||||||
body.append("image", fileInput.files[0]);
|
|
||||||
const resp = await fetch("/upload/image", {
|
|
||||||
method: "POST",
|
|
||||||
body,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (resp.status === 200) {
|
|
||||||
const data = await resp.json();
|
|
||||||
showImage(data.name);
|
|
||||||
|
|
||||||
// Add the file as an option and update the widget value
|
|
||||||
if (!imageWidget.options.values.includes(data.name)) {
|
|
||||||
imageWidget.options.values.push(data.name);
|
|
||||||
}
|
|
||||||
imageWidget.value = data.name;
|
|
||||||
} else {
|
|
||||||
alert(resp.status + " - " + resp.statusText);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
alert(error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
@ -204,6 +211,30 @@ export const ComfyWidgets = {
|
|||||||
});
|
});
|
||||||
uploadWidget.serialize = false;
|
uploadWidget.serialize = false;
|
||||||
|
|
||||||
|
// Add handler to check if an image is being dragged over our node
|
||||||
|
node.onDragOver = function (e) {
|
||||||
|
if (e.dataTransfer && e.dataTransfer.items) {
|
||||||
|
const image = [...e.dataTransfer.items].find((f) => f.kind === "file" && f.type.startsWith("image/"));
|
||||||
|
return !!image;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// On drop upload files
|
||||||
|
node.onDragDrop = function (e) {
|
||||||
|
console.log("onDragDrop called");
|
||||||
|
let handled = false;
|
||||||
|
for (const file of e.dataTransfer.files) {
|
||||||
|
if (file.type.startsWith("image/")) {
|
||||||
|
uploadFile(file, !handled); // Dont await these, any order is fine, only update on first one
|
||||||
|
handled = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return handled;
|
||||||
|
};
|
||||||
|
|
||||||
return { widget: uploadWidget };
|
return { widget: uploadWidget };
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user