From 0f3ba7482f9784b29c4f269f29b8bb8341379653 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Mar 2023 15:44:16 -0400 Subject: [PATCH 01/34] Xformers is now properly disabled when --cpu used. Added --windows-standalone-build option, currently it only opens makes the code open up comfyui in the browser. --- comfy/ldm/modules/attention.py | 5 ++--- comfy/ldm/modules/diffusionmodules/model.py | 6 ++---- comfy/model_management.py | 15 +++++++++++++++ main.py | 15 +++++++++++---- server.py | 5 ++++- 5 files changed, 34 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 692952f3..a6d40e89 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -14,9 +14,8 @@ import model_management try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True except: - XFORMERS_IS_AVAILBLE = False + pass # CrossAttn precision handling import os @@ -481,7 +480,7 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) import sys -if XFORMERS_IS_AVAILBLE == False or "--disable-xformers" in sys.argv: +if model_management.xformers_enabled() == False: if "--use-split-cross-attention" in sys.argv: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 18f7a8b0..15f35b91 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -12,10 +12,8 @@ import model_management try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True except: - XFORMERS_IS_AVAILBLE = False - print("No module 'xformers'. Proceeding without it.") + pass try: OOM_EXCEPTION = torch.cuda.OutOfMemoryError @@ -315,7 +313,7 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' - if XFORMERS_IS_AVAILBLE and attn_type == "vanilla": + if model_management.xformers_enabled() and attn_type == "vanilla": attn_type = "vanilla-xformers" print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": diff --git a/comfy/model_management.py b/comfy/model_management.py index 4b061c32..c1a8f5a2 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -31,6 +31,16 @@ try: except: pass +try: + import xformers + import xformers.ops + XFORMERS_IS_AVAILBLE = True +except: + XFORMERS_IS_AVAILBLE = False + +if "--disable-xformers" in sys.argv: + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU if "--lowvram" in sys.argv: @@ -159,6 +169,11 @@ def get_autocast_device(dev): return dev.type return "cuda" +def xformers_enabled(): + if vram_state == CPU: + return False + return XFORMERS_IS_AVAILBLE + def get_free_memory(dev=None, torch_free_too=False): if dev is None: dev = get_torch_device() diff --git a/main.py b/main.py index ca8674b5..c3d96039 100644 --- a/main.py +++ b/main.py @@ -38,8 +38,8 @@ def prompt_worker(q, server): e.execute(item[-2], item[-1]) q.task_done(item_id, e.outputs) -async def run(server, address='', port=8188, verbose=True): - await asyncio.gather(server.start(address, port, verbose), server.publish_loop()) +async def run(server, address='', port=8188, verbose=True, call_on_start=None): + await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) def hijack_progress(server): from tqdm.auto import tqdm @@ -76,11 +76,18 @@ if __name__ == "__main__": except: pass + call_on_start = None + if "--windows-standalone-build" in sys.argv: + def startup_server(address, port): + import webbrowser + webbrowser.open("http://{}:{}".format(address, port)) + call_on_start = startup_server + if os.name == "nt": try: - loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print)) + loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) except KeyboardInterrupt: pass else: - loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print)) + loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) diff --git a/server.py b/server.py index 5aba5761..a29d8597 100644 --- a/server.py +++ b/server.py @@ -260,7 +260,7 @@ class PromptServer(): msg = await self.messages.get() await self.send(*msg) - async def start(self, address, port, verbose=True): + async def start(self, address, port, verbose=True, call_on_start=None): runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, address, port) @@ -271,3 +271,6 @@ class PromptServer(): if verbose: print("Starting server\n") print("To see the GUI go to: http://{}:{}".format(address, port)) + if call_on_start is not None: + call_on_start(address, port) + From f0d8fdc0d6bb664d60aff82cdc7614e828b2e9d9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 01:34:09 -0400 Subject: [PATCH 02/34] Add an automatic standalone windows release for nvidia and cpu. --- .ci/setup_windows_zip.ps1 | 25 +++++++ .ci/update_windows/update.py | 65 +++++++++++++++++++ .ci/update_windows/update_all.bat | 3 + .ci/update_windows/update_comfyui_only.bat | 2 + .../README_VERY_IMPORTANT.txt | 22 +++++++ .ci/windows_base_files/run_cpu.bat | 2 + .ci/windows_base_files/run_nvidia_gpu.bat | 2 + .github/workflows/windows_release.yml | 33 ++++++++++ 8 files changed, 154 insertions(+) create mode 100755 .ci/setup_windows_zip.ps1 create mode 100755 .ci/update_windows/update.py create mode 100755 .ci/update_windows/update_all.bat create mode 100755 .ci/update_windows/update_comfyui_only.bat create mode 100755 .ci/windows_base_files/README_VERY_IMPORTANT.txt create mode 100755 .ci/windows_base_files/run_cpu.bat create mode 100755 .ci/windows_base_files/run_nvidia_gpu.bat create mode 100644 .github/workflows/windows_release.yml diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 new file mode 100755 index 00000000..4bd2f0b4 --- /dev/null +++ b/.ci/setup_windows_zip.ps1 @@ -0,0 +1,25 @@ +Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip +Expand-Archive python_embeded.zip +cd python_embeded +Add-Content -Path .\python310._pth -Value 'import site' +Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py +.\python.exe get-pip.py +.\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth +cd .. + + +mkdir ComfyUI_windows_portable +mv python_embeded ComfyUI_windows_portable +mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + +cd ComfyUI_windows_portable + +mkdir update +cp ComfyUI/.ci/update_windows/* ./update/ +cp ComfyUI/.ci/windows_base_files/* ./ + +cd .. + +& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable +mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py new file mode 100755 index 00000000..c09f29a8 --- /dev/null +++ b/.ci/update_windows/update.py @@ -0,0 +1,65 @@ +import pygit2 +from datetime import datetime +import sys + +def pull(repo, remote_name='origin', branch='master'): + for remote in repo.remotes: + if remote.name == remote_name: + remote.fetch() + remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target + merge_result, _ = repo.merge_analysis(remote_master_id) + # Up to date, do nothing + if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE: + return + # We can just fastforward + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD: + repo.checkout_tree(repo.get(remote_master_id)) + try: + master_ref = repo.lookup_reference('refs/heads/%s' % (branch)) + master_ref.set_target(remote_master_id) + except KeyError: + repo.create_branch(branch, repo.get(remote_master_id)) + repo.head.set_target(remote_master_id) + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL: + repo.merge(remote_master_id) + + if repo.index.conflicts is not None: + for conflict in repo.index.conflicts: + print('Conflicts found in:', conflict[0].path) + raise AssertionError('Conflicts, ahhhhh!!') + + user = repo.default_signature + tree = repo.index.write_tree() + commit = repo.create_commit('HEAD', + user, + user, + 'Merge!', + tree, + [repo.head.target, remote_master_id]) + # We need to do this or git CLI will think we are still merging. + repo.state_cleanup() + else: + raise AssertionError('Unknown merge analysis result') + + +repo = pygit2.Repository(str(sys.argv[1])) +ident = pygit2.Signature('comfyui', 'comfy@ui') +try: + print("stashing current changes") + repo.stash(ident) +except KeyError: + print("nothing to stash") +backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) +print("creating backup branch: {}".format(backup_branch_name)) +repo.branches.local.create(backup_branch_name, repo.head.peel()) + +print("checking out master branch") +branch = repo.lookup_branch('master') +ref = repo.lookup_reference(branch.name) +repo.checkout(ref) + +print("pulling latest changes") +pull(repo) + +print("Done!") + diff --git a/.ci/update_windows/update_all.bat b/.ci/update_windows/update_all.bat new file mode 100755 index 00000000..b7308550 --- /dev/null +++ b/.ci/update_windows/update_all.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/update_windows/update_comfyui_only.bat b/.ci/update_windows/update_comfyui_only.bat new file mode 100755 index 00000000..60d1e694 --- /dev/null +++ b/.ci/update_windows/update_comfyui_only.bat @@ -0,0 +1,2 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +pause diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt new file mode 100755 index 00000000..3c73a27a --- /dev/null +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -0,0 +1,22 @@ +HOW TO RUN: + +if you have a NVIDIA gpu: + +run_nvidia_gpu.bat + + + +To run it in slow CPU mode: + +run_cpu.bat + + + +IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints + +You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt + + + +To update only the ComfyUI code: update\update_comfyui_only.bat +To update ComfyUI with the python dependencies: update\update_all.bat diff --git a/.ci/windows_base_files/run_cpu.bat b/.ci/windows_base_files/run_cpu.bat new file mode 100755 index 00000000..c3ba4172 --- /dev/null +++ b/.ci/windows_base_files/run_cpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build +pause diff --git a/.ci/windows_base_files/run_nvidia_gpu.bat b/.ci/windows_base_files/run_nvidia_gpu.bat new file mode 100755 index 00000000..274d7c94 --- /dev/null +++ b/.ci/windows_base_files/run_nvidia_gpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build +pause diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml new file mode 100644 index 00000000..bc94a079 --- /dev/null +++ b/.github/workflows/windows_release.yml @@ -0,0 +1,33 @@ +name: "Windows Release" + +on: + push: + branches: + - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip.ps1 ./ + cp -r ComfyUI ComfyUI_copy + .\setup_windows_zip.ps1 + ls + + - uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + automatic_release_tag: "latest" + prerelease: true + title: "ComfyUI Standalone Portable Windows Build (For NVIDIA or CPU only)" + files: ComfyUI_windows_portable_nvidia_or_cpu.7z From 17636d083fd0d5fcc4a8566be4296ee3040ca4cb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 02:54:30 -0400 Subject: [PATCH 03/34] Probably safer to manually trigger builds instead of every push. --- .github/workflows/windows_release.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index bc94a079..a6cb883b 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -1,9 +1,10 @@ name: "Windows Release" on: - push: - branches: - - master + workflow_dispatch: +# push: +# branches: +# - master jobs: build: From dc24ad15545f4ea169894d01f64d497ebb267377 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:42:02 -0400 Subject: [PATCH 04/34] Add download link of portable build to README. --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 75d75ada..5870f852 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,16 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git # Installing +### Windows + +There is a portable standalone build for Windows that should work on for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). + +[Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) + +Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints + +## Manual Install (Windows, Linux) + Git clone this repo. Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints @@ -39,7 +49,7 @@ Put your VAE in: models/vae At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10. -### AMD +### AMD (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed: ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` From 6fd8377429df5983d00147d80757974da599a793 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:43:07 -0400 Subject: [PATCH 05/34] Typo. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5870f852..c34fdf69 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git ### Windows -There is a portable standalone build for Windows that should work on for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). +There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) From 010bbb9c5ae1b44ef000a9fec96c01faf90cda67 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:44:29 -0400 Subject: [PATCH 06/34] Readme style adjustments. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c34fdf69..7366fa25 100644 --- a/README.md +++ b/README.md @@ -31,11 +31,11 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git # Installing -### Windows +## Windows There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). -[Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) +### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints From ebfa749b7b58767687f3106e484339e7299aeb78 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:47:12 -0400 Subject: [PATCH 07/34] Readme improvement. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7366fa25..b49d0256 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ There is a portable standalone build for Windows that should work for running on ### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) -Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints +Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints ## Manual Install (Windows, Linux) From a256a2abdee341a15fcbd4612a8b1cf206f9195b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 11:36:48 -0400 Subject: [PATCH 08/34] --disable-xformers should not even try to import xformers. --- comfy/ldm/modules/attention.py | 5 ++--- comfy/ldm/modules/diffusionmodules/model.py | 4 +--- comfy/model_management.py | 15 ++++++++------- main.py | 7 ++++--- 4 files changed, 15 insertions(+), 16 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index a6d40e89..f78a1a6c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -11,11 +11,10 @@ from .sub_quadratic_attention import efficient_dot_product_attention import model_management -try: + +if model_management.xformers_enabled(): import xformers import xformers.ops -except: - pass # CrossAttn precision handling import os diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 15f35b91..fcbee29f 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -9,11 +9,9 @@ from typing import Optional, Any from ldm.modules.attention import MemoryEfficientCrossAttention import model_management -try: +if model_management.xformers_enabled(): import xformers import xformers.ops -except: - pass try: OOM_EXCEPTION = torch.cuda.OutOfMemoryError diff --git a/comfy/model_management.py b/comfy/model_management.py index c1a8f5a2..7365beef 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -31,15 +31,16 @@ try: except: pass -try: - import xformers - import xformers.ops - XFORMERS_IS_AVAILBLE = True -except: - XFORMERS_IS_AVAILBLE = False - if "--disable-xformers" in sys.argv: XFORMERS_IS_AVAILBLE = False +else: + try: + import xformers + import xformers.ops + XFORMERS_IS_AVAILBLE = True + except: + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU diff --git a/main.py b/main.py index c3d96039..fc37781c 100644 --- a/main.py +++ b/main.py @@ -8,9 +8,6 @@ if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -import execution -import server - if __name__ == "__main__": if '--help' in sys.argv: print("Valid Command line Arguments:") @@ -18,6 +15,7 @@ if __name__ == "__main__": print("\t--port 8188\t\t\tSet the listen port.") print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") + print("\t--disable-xformers\t\tdisables xformers") print() print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.") @@ -31,6 +29,9 @@ if __name__ == "__main__": print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" +import execution +import server + def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: From 83f23f82b8a4ac041dd8262ee12951ac6826162f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 12:25:19 -0400 Subject: [PATCH 09/34] Add pytorch attention support to VAE. --- comfy/ldm/modules/attention.py | 22 +++----- comfy/ldm/modules/diffusionmodules/model.py | 62 +++++++++++++++++++++ comfy/model_management.py | 11 ++++ main.py | 1 + 4 files changed, 83 insertions(+), 13 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f78a1a6c..e97badd0 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -479,23 +479,19 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) import sys -if model_management.xformers_enabled() == False: +if model_management.xformers_enabled(): + print("Using xformers cross attention") + CrossAttention = MemoryEfficientCrossAttention +elif model_management.pytorch_attention_enabled(): + print("Using pytorch cross attention") + CrossAttention = CrossAttentionPytorch +else: if "--use-split-cross-attention" in sys.argv: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx else: - if "--use-pytorch-cross-attention" in sys.argv: - print("Using pytorch cross attention") - torch.backends.cuda.enable_math_sdp(False) - torch.backends.cuda.enable_flash_sdp(True) - torch.backends.cuda.enable_mem_efficient_sdp(True) - CrossAttention = CrossAttentionPytorch - else: - print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") - CrossAttention = CrossAttentionBirchSan -else: - print("Using xformers cross attention") - CrossAttention = MemoryEfficientCrossAttention + print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") + CrossAttention = CrossAttentionBirchSan class BasicTransformerBlock(nn.Module): diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index fcbee29f..129b86a7 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -299,6 +299,64 @@ class MemoryEfficientAttnBlock(nn.Module): out = self.proj_out(out) return x+out +class MemoryEfficientAttnBlockPytorch(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.attention_op: Optional[Any] = None + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + B, C, H, W = q.shape + q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v)) + + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(B, t.shape[1], 1, C) + .permute(0, 2, 1, 3) + .reshape(B * 1, t.shape[1], C) + .contiguous(), + (q, k, v), + ) + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + + out = ( + out.unsqueeze(0) + .reshape(B, 1, out.shape[1], C) + .permute(0, 2, 1, 3) + .reshape(B, out.shape[1], C) + ) + out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C) + out = self.proj_out(out) + return x+out class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): def forward(self, x, context=None, mask=None): @@ -313,6 +371,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' if model_management.xformers_enabled() and attn_type == "vanilla": attn_type = "vanilla-xformers" + if model_management.pytorch_attention_enabled() and attn_type == "vanilla": + attn_type = "vanilla-pytorch" print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": assert attn_kwargs is None @@ -320,6 +380,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): elif attn_type == "vanilla-xformers": print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...") return MemoryEfficientAttnBlock(in_channels) + elif attn_type == "vanilla-pytorch": + return MemoryEfficientAttnBlockPytorch(in_channels) elif type == "memory-efficient-cross-attn": attn_kwargs["query_dim"] = in_channels return MemoryEfficientCrossAttentionWrapper(**attn_kwargs) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7365beef..482b1add 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -41,6 +41,14 @@ else: except: XFORMERS_IS_AVAILBLE = False +ENABLE_PYTORCH_ATTENTION = False +if "--use-pytorch-cross-attention" in sys.argv: + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(True) + ENABLE_PYTORCH_ATTENTION = True + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU @@ -175,6 +183,9 @@ def xformers_enabled(): return False return XFORMERS_IS_AVAILBLE +def pytorch_attention_enabled(): + return ENABLE_PYTORCH_ATTENTION + def get_free_memory(dev=None, torch_free_too=False): if dev is None: dev = get_torch_device() diff --git a/main.py b/main.py index fc37781c..b2b3f1c4 100644 --- a/main.py +++ b/main.py @@ -15,6 +15,7 @@ if __name__ == "__main__": print("\t--port 8188\t\t\tSet the listen port.") print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") + print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.") print("\t--disable-xformers\t\tdisables xformers") print() print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") From c672000954d3040d6c049303ed83edd8ab0a6079 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 12:47:12 -0400 Subject: [PATCH 10/34] Add github workflow for a build with pytorch nightly. --- .ci/nightly/update_windows/update.py | 65 +++++++++++++++++++ .ci/nightly/update_windows/update_all.bat | 3 + .../update_windows/update_comfyui_only.bat | 2 + .../README_VERY_IMPORTANT.txt | 22 +++++++ .ci/nightly/windows_base_files/run_cpu.bat | 2 + .../windows_base_files/run_nvidia_gpu.bat | 2 + .ci/setup_windows_zip_nightly_pytorch.ps1 | 26 ++++++++ .../windows_release_nightly_pytorch.yml | 34 ++++++++++ 8 files changed, 156 insertions(+) create mode 100755 .ci/nightly/update_windows/update.py create mode 100755 .ci/nightly/update_windows/update_all.bat create mode 100755 .ci/nightly/update_windows/update_comfyui_only.bat create mode 100755 .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt create mode 100755 .ci/nightly/windows_base_files/run_cpu.bat create mode 100755 .ci/nightly/windows_base_files/run_nvidia_gpu.bat create mode 100755 .ci/setup_windows_zip_nightly_pytorch.ps1 create mode 100644 .github/workflows/windows_release_nightly_pytorch.yml diff --git a/.ci/nightly/update_windows/update.py b/.ci/nightly/update_windows/update.py new file mode 100755 index 00000000..c09f29a8 --- /dev/null +++ b/.ci/nightly/update_windows/update.py @@ -0,0 +1,65 @@ +import pygit2 +from datetime import datetime +import sys + +def pull(repo, remote_name='origin', branch='master'): + for remote in repo.remotes: + if remote.name == remote_name: + remote.fetch() + remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target + merge_result, _ = repo.merge_analysis(remote_master_id) + # Up to date, do nothing + if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE: + return + # We can just fastforward + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD: + repo.checkout_tree(repo.get(remote_master_id)) + try: + master_ref = repo.lookup_reference('refs/heads/%s' % (branch)) + master_ref.set_target(remote_master_id) + except KeyError: + repo.create_branch(branch, repo.get(remote_master_id)) + repo.head.set_target(remote_master_id) + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL: + repo.merge(remote_master_id) + + if repo.index.conflicts is not None: + for conflict in repo.index.conflicts: + print('Conflicts found in:', conflict[0].path) + raise AssertionError('Conflicts, ahhhhh!!') + + user = repo.default_signature + tree = repo.index.write_tree() + commit = repo.create_commit('HEAD', + user, + user, + 'Merge!', + tree, + [repo.head.target, remote_master_id]) + # We need to do this or git CLI will think we are still merging. + repo.state_cleanup() + else: + raise AssertionError('Unknown merge analysis result') + + +repo = pygit2.Repository(str(sys.argv[1])) +ident = pygit2.Signature('comfyui', 'comfy@ui') +try: + print("stashing current changes") + repo.stash(ident) +except KeyError: + print("nothing to stash") +backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) +print("creating backup branch: {}".format(backup_branch_name)) +repo.branches.local.create(backup_branch_name, repo.head.peel()) + +print("checking out master branch") +branch = repo.lookup_branch('master') +ref = repo.lookup_reference(branch.name) +repo.checkout(ref) + +print("pulling latest changes") +pull(repo) + +print("Done!") + diff --git a/.ci/nightly/update_windows/update_all.bat b/.ci/nightly/update_windows/update_all.bat new file mode 100755 index 00000000..c5e0c6be --- /dev/null +++ b/.ci/nightly/update_windows/update_all.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/update_windows/update_comfyui_only.bat b/.ci/nightly/update_windows/update_comfyui_only.bat new file mode 100755 index 00000000..60d1e694 --- /dev/null +++ b/.ci/nightly/update_windows/update_comfyui_only.bat @@ -0,0 +1,2 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +pause diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt new file mode 100755 index 00000000..3c73a27a --- /dev/null +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -0,0 +1,22 @@ +HOW TO RUN: + +if you have a NVIDIA gpu: + +run_nvidia_gpu.bat + + + +To run it in slow CPU mode: + +run_cpu.bat + + + +IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints + +You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt + + + +To update only the ComfyUI code: update\update_comfyui_only.bat +To update ComfyUI with the python dependencies: update\update_all.bat diff --git a/.ci/nightly/windows_base_files/run_cpu.bat b/.ci/nightly/windows_base_files/run_cpu.bat new file mode 100755 index 00000000..c3ba4172 --- /dev/null +++ b/.ci/nightly/windows_base_files/run_cpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build +pause diff --git a/.ci/nightly/windows_base_files/run_nvidia_gpu.bat b/.ci/nightly/windows_base_files/run_nvidia_gpu.bat new file mode 100755 index 00000000..8ee2f340 --- /dev/null +++ b/.ci/nightly/windows_base_files/run_nvidia_gpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention +pause diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 new file mode 100755 index 00000000..edc3024a --- /dev/null +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -0,0 +1,26 @@ +Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip +Expand-Archive python_embeded.zip +rm python_embeded.zip +cd python_embeded +Add-Content -Path .\python310._pth -Value 'import site' +Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py +.\python.exe get-pip.py +.\python.exe -s -m pip install torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 +"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth +cd .. + + +mkdir ComfyUI_windows_portable +mv python_embeded ComfyUI_windows_portable_nightly_pytorch +mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI + +cd ComfyUI_windows_portable_nightly_pytorch + +mkdir update +cp ComfyUI/.ci/nightly/update_windows/* ./update/ +cp ComfyUI/.ci/nightly/windows_base_files/* ./ + +cd .. + +& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch +mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml new file mode 100644 index 00000000..f958a4ab --- /dev/null +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -0,0 +1,34 @@ +name: "Windows Release Nightly pytorch" + +on: + workflow_dispatch: +# push: +# branches: +# - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ + cp -r ComfyUI ComfyUI_copy + .\setup_windows_zip_nightly_pytorch.ps1 + ls + + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z + tag: "latest" + overwrite: true From 80665081e0ee3a12921429a3171018ae28c85703 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 13:50:54 -0400 Subject: [PATCH 11/34] Add command to install unstable pytorch builds for ROCm. --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b49d0256..5c80b10a 100644 --- a/README.md +++ b/README.md @@ -50,10 +50,16 @@ Put your VAE in: models/vae At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10. ### AMD (Linux only) -AMD users can install rocm and pytorch with pip if you don't have it already installed: +AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` + +I highly recommend you use the nightly/unstable pytorch builds though because they work a lot better for me (run this in the ComfyUI folder so it picks up the requirements.txt): + +```pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/rocm5.4.2 -r requirements.txt``` + + ### NVIDIA Nvidia users should install torch using this command: From 54dbfaf2ec7fbc005bb52079cfdd2834f2dd8e8d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 14:49:18 -0400 Subject: [PATCH 12/34] Remove omegaconf dependency and some ci changes. --- .ci/setup_windows_zip_nightly_pytorch.ps1 | 5 +++-- .github/workflows/windows_release.yml | 13 +++++++------ .../workflows/windows_release_nightly_pytorch.yml | 4 +++- comfy/cldm/cldm.py | 6 +++--- comfy/ldm/models/diffusion/ddpm.py | 5 ++--- comfy/ldm/modules/diffusionmodules/openaimodel.py | 6 +++--- comfy/sd.py | 7 ++++--- requirements.txt | 2 +- 8 files changed, 26 insertions(+), 22 deletions(-) diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index edc3024a..6d13bad9 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -5,11 +5,12 @@ cd python_embeded Add-Content -Path .\python310._pth -Value 'import site' Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py .\python.exe get-pip.py -.\python.exe -s -m pip install torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 +python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir +ls ../temp_wheel_dir +.\python.exe -s -m pip install --pre (get-item ..\temp_wheel_dir\*) "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth cd .. - mkdir ComfyUI_windows_portable mv python_embeded ComfyUI_windows_portable_nightly_pytorch mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index a6cb883b..be48b7ea 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -25,10 +25,11 @@ jobs: .\setup_windows_zip.ps1 ls - - uses: "marvinpinto/action-automatic-releases@latest" + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 with: - repo_token: "${{ secrets.GITHUB_TOKEN }}" - automatic_release_tag: "latest" - prerelease: true - title: "ComfyUI Standalone Portable Windows Build (For NVIDIA or CPU only)" - files: ComfyUI_windows_portable_nvidia_or_cpu.7z + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_or_cpu.7z + tag: "latest" + overwrite: true + diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index f958a4ab..1aeaef45 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -17,7 +17,9 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - + - uses: actions/setup-python@v4 + with: + python-version: '3.10.9' - run: | cd .. cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 8d14a690..c60abf80 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -59,9 +59,9 @@ class ControlNet(nn.Module): if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) + # from omegaconf.listconfig import ListConfig + # if type(context_dim) == ListConfig: + # context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads diff --git a/comfy/ldm/models/diffusion/ddpm.py b/comfy/ldm/models/diffusion/ddpm.py index 802034c7..42ed2add 100644 --- a/comfy/ldm/models/diffusion/ddpm.py +++ b/comfy/ldm/models/diffusion/ddpm.py @@ -18,7 +18,6 @@ import itertools from tqdm import tqdm from torchvision.utils import make_grid # from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma @@ -1124,8 +1123,8 @@ class LatentDiffusion(DDPM): def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) + # if isinstance(xc, ListConfig): + # xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 9a652c29..09ab1a06 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -477,9 +477,9 @@ class UNetModel(nn.Module): if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) + # from omegaconf.listconfig import ListConfig + # if type(context_dim) == ListConfig: + # context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads diff --git a/comfy/sd.py b/comfy/sd.py index fd434ba6..3f5ce24e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -6,7 +6,7 @@ import sd2_clip import model_management from .ldm.util import instantiate_from_config from .ldm.models.autoencoder import AutoencoderKL -from omegaconf import OmegaConf +import yaml from .cldm import cldm from .t2i_adapter import adapter @@ -726,7 +726,8 @@ def load_clip(ckpt_path, embedding_directory=None): return clip def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None): - config = OmegaConf.load(config_path) + with open(config_path, 'r') as stream: + config = yaml.safe_load(stream) model_config_params = config['model']['params'] clip_config = model_config_params['cond_stage_config'] scale_factor = model_config_params['scale_factor'] @@ -750,7 +751,7 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e w.cond_stage_model = clip.cond_stage_model load_state_dict_to = [w] - model = instantiate_from_config(config.model) + model = instantiate_from_config(config["model"]) sd = load_torch_file(ckpt_path) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) return (ModelPatcher(model), clip, vae) diff --git a/requirements.txt b/requirements.txt index 45f2599d..bc8b3c55 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ torch torchdiffeq torchsde -omegaconf einops open-clip-torch transformers @@ -9,3 +8,4 @@ safetensors pytorch_lightning aiohttp accelerate +pyyaml From a50e1118155d8057dd0364644e8259d55a84bb4a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 15:09:11 -0400 Subject: [PATCH 13/34] Fix small issue with build. --- .ci/setup_windows_zip_nightly_pytorch.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index 6d13bad9..b4d5633a 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -11,7 +11,7 @@ ls ../temp_wheel_dir "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth cd .. -mkdir ComfyUI_windows_portable +mkdir ComfyUI_windows_portable_nightly_pytorch mv python_embeded ComfyUI_windows_portable_nightly_pytorch mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI From 6db777b3489798740c0ffdc6f503fe4279f2c435 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 13 Mar 2023 19:34:05 +0000 Subject: [PATCH 14/34] Added ability to save images to temp dir --- main.py | 9 +++++++++ nodes.py | 21 ++++++++++++++++++--- server.py | 2 +- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index b2b3f1c4..889e2cef 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,6 @@ import os import sys +import shutil import threading import asyncio @@ -53,7 +54,14 @@ def hijack_progress(server): return v setattr(tqdm, "update", wrapped_func) +def cleanup_temp(): + temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + if __name__ == "__main__": + cleanup_temp() + loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) server = server.PromptServer(loop) @@ -93,3 +101,4 @@ if __name__ == "__main__": else: loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) + cleanup_temp() diff --git a/nodes.py b/nodes.py index 0a0a0a9c..b201c352 100644 --- a/nodes.py +++ b/nodes.py @@ -775,12 +775,14 @@ class KSamplerAdvanced: class SaveImage: def __init__(self): self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") + self.temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") @classmethod def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "use_temp_file": (["yes", "no"], {"default" : "no"} ),}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -791,7 +793,7 @@ class SaveImage: CATEGORY = "image" - def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + def save_images(self, images, filename_prefix="ComfyUI", use_temp_file="no", prompt=None, extra_pnginfo=None): def map_filename(filename): prefix_len = len(filename_prefix) prefix = filename[:prefix_len + 1] @@ -818,8 +820,21 @@ class SaveImage: if extra_pnginfo is not None: for x in extra_pnginfo: metadata.add_text(x, json.dumps(extra_pnginfo[x])) + file = f"{filename_prefix}_{counter:05}_.png" - img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True) + + if use_temp_file == "yes": + if not os.path.exists(self.temp_dir): + os.makedirs(self.temp_dir) + dir = self.temp_dir + else: + dir = self.output_dir + + img.save(os.path.join(dir, file), pnginfo=metadata, optimize=True) + + if use_temp_file == "yes": + file += "?type=temp" + paths.append(file) counter += 1 return { "ui": { "images": paths } } diff --git a/server.py b/server.py index a29d8597..eb685701 100644 --- a/server.py +++ b/server.py @@ -113,7 +113,7 @@ class PromptServer(): async def view_image(request): if "file" in request.match_info: type = request.rel_url.query.get("type", "output") - if type != "output" and type != "input": + if type not in ["output", "input", "temp"]: return web.Response(status=400) output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type) From e5318d918cde493eb2100bd308cf76425d828da1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 13 Mar 2023 19:34:29 +0000 Subject: [PATCH 15/34] Combo support detault value --- web/scripts/app.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index e70e1c15..8b832eba 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -497,7 +497,11 @@ class ComfyApp { if (Array.isArray(type)) { // Enums e.g. latent rotation - this.addWidget("combo", inputName, type[0], () => {}, { values: type }); + let defaultValue = type[0]; + if (inputData[1] && inputData[1].default) { + defaultValue = inputData[1].default; + } + this.addWidget("combo", inputName, defaultValue, () => {}, { values: type }); } else if (`${type}:${inputName}` in widgets) { // Support custom widgets by Type:Name Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {}); From dd35588b29bb1c4a8bd1fd97be78108b63ded3d5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 17:50:48 -0400 Subject: [PATCH 16/34] Move colab link to the installing section. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5c80b10a..8877d449 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,10 @@ There is a portable standalone build for Windows that should work for running on Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints +## Colab Notebook + +To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) + ## Manual Install (Windows, Linux) Git clone this repo. @@ -128,10 +132,6 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the ```embedding:embedding_filename.pt``` -### Colab Notebook - -To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) - ### Fedora To get python 3.10 on fedora: From 986dd820dcae3b86a047b07a95f4d925fc625db0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 20:58:09 -0400 Subject: [PATCH 17/34] Use half() function on model when loading in fp16. --- comfy/sd.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 3f5ce24e..c7e0b073 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -854,4 +854,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e model = instantiate_from_config(model_config) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + if fp16: + model = model.half() + return (ModelPatcher(model), clip, vae) From 0e836d525e7f6a3b44704402c954141af324ea47 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 21:12:48 -0400 Subject: [PATCH 18/34] use half() on fp16 models loaded with config. --- comfy/sd.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index c7e0b073..61d1916d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -733,6 +733,12 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e scale_factor = model_config_params['scale_factor'] vae_config = model_config_params['first_stage_config'] + fp16 = False + if "unet_config" in model_config_params: + if "params" in model_config_params["unet_config"]: + if "use_fp16" in model_config_params["unet_config"]["params"]: + fp16 = model_config_params["unet_config"]["params"]["use_fp16"] + clip = None vae = None @@ -754,6 +760,10 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e model = instantiate_from_config(config["model"]) sd = load_torch_file(ckpt_path) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + + if fp16: + model = model.half() + return (ModelPatcher(model), clip, vae) From ee46bef03a98903831c01d31094a0c30ea411b28 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 21:30:01 -0400 Subject: [PATCH 19/34] Make --cpu have priority over everything else. --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 482b1add..c26d682f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -50,8 +50,6 @@ if "--use-pytorch-cross-attention" in sys.argv: XFORMERS_IS_AVAILBLE = False -if "--cpu" in sys.argv: - vram_state = CPU if "--lowvram" in sys.argv: set_vram_to = LOW_VRAM if "--novram" in sys.argv: @@ -73,6 +71,8 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: total_vram_available_mb = (total_vram - 1024) // 2 total_vram_available_mb = int(max(256, total_vram_available_mb)) +if "--cpu" in sys.argv: + vram_state = CPU print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state]) From ca25f0c0c15e0a622e508bd22aab4c0d58faeefb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 13:39:58 -0400 Subject: [PATCH 20/34] Update standalone readme with recommended way to update. --- .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 6 +++++- .ci/windows_base_files/README_VERY_IMPORTANT.txt | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 3c73a27a..4c6a20a7 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -18,5 +18,9 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa +RECOMMENDED WAY TO UPDATE: To update only the ComfyUI code: update\update_comfyui_only.bat -To update ComfyUI with the python dependencies: update\update_all.bat + + + +To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 3c73a27a..69520db9 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -17,6 +17,9 @@ IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: Comfy You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt - +RECOMMENDED WAY TO UPDATE: To update only the ComfyUI code: update\update_comfyui_only.bat -To update ComfyUI with the python dependencies: update\update_all.bat + + + +To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat From 6a6256a75c39c0e945db40670e2855808c2f4663 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:28:07 +0000 Subject: [PATCH 21/34] Changed flag to new node --- nodes.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/nodes.py b/nodes.py index b201c352..b5aa3efe 100644 --- a/nodes.py +++ b/nodes.py @@ -775,14 +775,13 @@ class KSamplerAdvanced: class SaveImage: def __init__(self): self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") - self.temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + self.url_suffix = "" @classmethod def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "use_temp_file": (["yes", "no"], {"default" : "no"} ),}, + "filename_prefix": ("STRING", {"default": "ComfyUI"}),}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -810,6 +809,9 @@ class SaveImage: os.mkdir(self.output_dir) counter = 1 + if not os.path.exists(self.output_dir): + os.makedirs(self.output_dir) + paths = list() for image in images: i = 255. * image.cpu().numpy() @@ -820,25 +822,24 @@ class SaveImage: if extra_pnginfo is not None: for x in extra_pnginfo: metadata.add_text(x, json.dumps(extra_pnginfo[x])) - file = f"{filename_prefix}_{counter:05}_.png" - - if use_temp_file == "yes": - if not os.path.exists(self.temp_dir): - os.makedirs(self.temp_dir) - dir = self.temp_dir - else: - dir = self.output_dir - - img.save(os.path.join(dir, file), pnginfo=metadata, optimize=True) - - if use_temp_file == "yes": - file += "?type=temp" - - paths.append(file) + img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True) + paths.append(file + self.url_suffix) counter += 1 return { "ui": { "images": paths } } +class PreviewImage(SaveImage): + def __init__(self): + self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + self.url_suffix = "?type=temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + class LoadImage: input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod @@ -959,6 +960,7 @@ NODE_CLASS_MAPPINGS = { "EmptyLatentImage": EmptyLatentImage, "LatentUpscale": LatentUpscale, "SaveImage": SaveImage, + "PreviewImage": PreviewImage, "LoadImage": LoadImage, "LoadImageMask": LoadImageMask, "ImageScale": ImageScale, From 94a279373bf5104b9ad9ac2bc6da3e8bbe1e5141 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:39:49 +0000 Subject: [PATCH 22/34] Better auto pos of images --- web/scripts/app.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 8b832eba..445bc5d4 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -142,7 +142,14 @@ class ComfyApp { if (numImages === 1 && !imageIndex) { this.imageIndex = imageIndex = 0; } - let shiftY = this.type === "SaveImage" ? 55 : this.imageOffset || 0; + + let shiftY; + if (this.imageOffset != null) { + shiftY = this.imageOffset; + } else { + shiftY = this.computeSize()[1]; + } + let dw = this.size[0]; let dh = this.size[1]; dh -= shiftY; From 8537ab6f45cdfe060e5fc3fb4aff87a35a0b9072 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:42:28 +0000 Subject: [PATCH 23/34] tidy --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index b5aa3efe..650d7f65 100644 --- a/nodes.py +++ b/nodes.py @@ -781,7 +781,7 @@ class SaveImage: def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}),}, + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -792,7 +792,7 @@ class SaveImage: CATEGORY = "image" - def save_images(self, images, filename_prefix="ComfyUI", use_temp_file="no", prompt=None, extra_pnginfo=None): + def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): def map_filename(filename): prefix_len = len(filename_prefix) prefix = filename[:prefix_len + 1] From 255ff2d6ddd34b377a31e247fde9b2afa2cff730 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:29:18 +0000 Subject: [PATCH 24/34] Added ctrl+enter to queue prompt --- web/scripts/app.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index e70e1c15..6703908d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -398,6 +398,15 @@ class ComfyApp { api.init(); } + #addKeyboardHandler() { + window.addEventListener("keydown", (e) => { + // Queue prompt using ctrl or command + enter + if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) { + this.queuePrompt(0); + } + }); + } + /** * Loads all extensions from the API into the window */ @@ -464,6 +473,7 @@ class ComfyApp { this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addPasteHandler(); + this.#addKeyboardHandler(); await this.#invokeExtensionsAsync("setup"); } From 01ec3db9324a6c2a3ac2bdc6b99ed729528a10e1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:31:27 +0000 Subject: [PATCH 25/34] Add ctrl+shift+enter for queue front --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 6703908d..942fc902 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -402,7 +402,7 @@ class ComfyApp { window.addEventListener("keydown", (e) => { // Queue prompt using ctrl or command + enter if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) { - this.queuePrompt(0); + this.queuePrompt(e.shiftKey ? -1 : 0); } }); } From ff255d9dcdea17e1a017268e165268079617b46a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 18:07:09 -0400 Subject: [PATCH 26/34] Make sure windows permission issues don't mess things up. --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 889e2cef..a7f6541f 100644 --- a/main.py +++ b/main.py @@ -57,7 +57,7 @@ def hijack_progress(server): def cleanup_temp(): temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) + shutil.rmtree(temp_dir, ignore_errors=True) if __name__ == "__main__": cleanup_temp() From 6d44cf74e39d510b22e427d42a0975d4c9f2b1de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 19:08:23 -0400 Subject: [PATCH 27/34] Make it more clear the recommended way to update the standalone build. --- .../update_comfyui_and_python_dependencies.bat | 3 +++ .ci/nightly/update_windows/update_all.bat | 3 --- .../{update_comfyui_only.bat => update_comfyui.bat} | 0 .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 5 +++-- .ci/setup_windows_zip.ps1 | 4 ++-- .ci/setup_windows_zip_nightly_pytorch.ps1 | 4 ++-- .../update_comfyui_and_python_dependencies.bat | 3 +++ .ci/update_windows/update_all.bat | 3 --- .../{update_comfyui_only.bat => update_comfyui.bat} | 0 .ci/windows_base_files/README_VERY_IMPORTANT.txt | 6 ++++-- nodes.py | 6 +++--- 11 files changed, 20 insertions(+), 17 deletions(-) create mode 100755 .ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/nightly/update_windows/update_all.bat rename .ci/nightly/update_windows/{update_comfyui_only.bat => update_comfyui.bat} (100%) create mode 100755 .ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/update_windows/update_all.bat rename .ci/update_windows/{update_comfyui_only.bat => update_comfyui.bat} (100%) diff --git a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat new file mode 100755 index 00000000..d58e3341 --- /dev/null +++ b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ +..\..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/update_windows/update_all.bat b/.ci/nightly/update_windows/update_all.bat deleted file mode 100755 index c5e0c6be..00000000 --- a/.ci/nightly/update_windows/update_all.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/nightly/update_windows/update_comfyui_only.bat b/.ci/nightly/update_windows/update_comfyui.bat similarity index 100% rename from .ci/nightly/update_windows/update_comfyui_only.bat rename to .ci/nightly/update_windows/update_comfyui.bat diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 4c6a20a7..7066f91b 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -19,8 +19,9 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa RECOMMENDED WAY TO UPDATE: -To update only the ComfyUI code: update\update_comfyui_only.bat +To update the ComfyUI code: update\update_comfyui.bat -To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat +To update ComfyUI with the python dependencies: +update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 index 4bd2f0b4..6b38f498 100755 --- a/.ci/setup_windows_zip.ps1 +++ b/.ci/setup_windows_zip.ps1 @@ -16,8 +16,8 @@ mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI cd ComfyUI_windows_portable mkdir update -cp ComfyUI/.ci/update_windows/* ./update/ -cp ComfyUI/.ci/windows_base_files/* ./ +cp -r ComfyUI/.ci/update_windows/* ./update/ +cp -r ComfyUI/.ci/windows_base_files/* ./ cd .. diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index b4d5633a..31721e5f 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -18,8 +18,8 @@ mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI cd ComfyUI_windows_portable_nightly_pytorch mkdir update -cp ComfyUI/.ci/nightly/update_windows/* ./update/ -cp ComfyUI/.ci/nightly/windows_base_files/* ./ +cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ +cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ cd .. diff --git a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat new file mode 100755 index 00000000..51462193 --- /dev/null +++ b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ +..\..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/update_windows/update_all.bat b/.ci/update_windows/update_all.bat deleted file mode 100755 index b7308550..00000000 --- a/.ci/update_windows/update_all.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/update_windows/update_comfyui_only.bat b/.ci/update_windows/update_comfyui.bat similarity index 100% rename from .ci/update_windows/update_comfyui_only.bat rename to .ci/update_windows/update_comfyui.bat diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 69520db9..143ee462 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -18,8 +18,10 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa RECOMMENDED WAY TO UPDATE: -To update only the ComfyUI code: update\update_comfyui_only.bat +To update the ComfyUI code: update\update_comfyui.bat -To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat +To update ComfyUI with the python dependencies: +update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat + diff --git a/nodes.py b/nodes.py index 650d7f65..f956eaa6 100644 --- a/nodes.py +++ b/nodes.py @@ -811,7 +811,7 @@ class SaveImage: if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) - + paths = list() for image in images: i = 255. * image.cpu().numpy() @@ -835,11 +835,11 @@ class PreviewImage(SaveImage): @classmethod def INPUT_TYPES(s): - return {"required": + return {"required": {"images": ("IMAGE", ), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } - + class LoadImage: input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod From fade8a352756958d7b2bcc173dea0d449f211a8b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 21:38:59 -0400 Subject: [PATCH 28/34] Don't need to be that explicit. --- .../update_comfyui_and_python_dependencies.bat | 3 --- .../update_windows/update_comfyui_and_python_dependencies.bat | 3 +++ .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 2 +- .../update_comfyui_and_python_dependencies.bat | 3 --- .ci/update_windows/update_comfyui_and_python_dependencies.bat | 3 +++ .ci/windows_base_files/README_VERY_IMPORTANT.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) delete mode 100755 .ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat create mode 100755 .ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat create mode 100755 .ci/update_windows/update_comfyui_and_python_dependencies.bat diff --git a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat deleted file mode 100755 index d58e3341..00000000 --- a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ -..\..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat new file mode 100755 index 00000000..c5e0c6be --- /dev/null +++ b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 7066f91b..656b9db4 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -24,4 +24,4 @@ To update the ComfyUI code: update\update_comfyui.bat To update ComfyUI with the python dependencies: -update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat +update\update_comfyui_and_python_dependencies.bat diff --git a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat deleted file mode 100755 index 51462193..00000000 --- a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ -..\..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/update_comfyui_and_python_dependencies.bat new file mode 100755 index 00000000..b7308550 --- /dev/null +++ b/.ci/update_windows/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 143ee462..a6214e73 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -23,5 +23,5 @@ To update the ComfyUI code: update\update_comfyui.bat To update ComfyUI with the python dependencies: -update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat +update\update_comfyui_and_python_dependencies.bat From 760f10d173e26fcf959accb815d6437c29453af3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 21:46:59 -0400 Subject: [PATCH 29/34] Test workflow for cu118 test build. --- .github/workflows/windows_release_cu118.yml | 61 +++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/windows_release_cu118.yml diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml new file mode 100644 index 00000000..05a81841 --- /dev/null +++ b/.github/workflows/windows_release_cu118.yml @@ -0,0 +1,61 @@ +name: "Windows Release cu118" + +on: + workflow_dispatch: +# push: +# branches: +# - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip.ps1 ./ + cp -r ComfyUI ComfyUI_copy + + Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + Expand-Archive python_embeded.zip + cd python_embeded + Add-Content -Path .\python310._pth -Value 'import site' + Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py + .\python.exe get-pip.py + .\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 + "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth + cd .. + + + mkdir ComfyUI_windows_portable + mv python_embeded ComfyUI_windows_portable + mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + + cd ComfyUI_windows_portable + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + & "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + + ls + + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + tag: "latest" + overwrite: true + From b5f2bc971183c9b846151de4aa1b7a1a19465461 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 22:09:15 -0400 Subject: [PATCH 30/34] Try to make the workflow actually fail when there's a problem. --- .github/workflows/windows_release_cu118.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 05a81841..65f0d29b 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -20,7 +20,6 @@ jobs: - run: | cd .. - cp ComfyUI/.ci/setup_windows_zip.ps1 ./ cp -r ComfyUI ComfyUI_copy Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip @@ -29,8 +28,13 @@ jobs: Add-Content -Path .\python310._pth -Value 'import site' Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py .\python.exe get-pip.py - .\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth + + - shell: bash + run: | + cd .. + cd python_embeded + ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 cd .. @@ -46,7 +50,7 @@ jobs: cd .. - & "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z ls From 5ad9f86514822a0aa3d1ce113676ae1b46874681 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:02:57 -0400 Subject: [PATCH 31/34] Do a quick test on the CI to see if ComfyUI actually runs before pushing the build. --- .github/workflows/windows_release_cu118.yml | 22 ++++++++++----------- main.py | 3 +++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 65f0d29b..b757a540 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -18,23 +18,18 @@ jobs: with: fetch-depth: 0 - - run: | - cd .. - cp -r ComfyUI ComfyUI_copy - - Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip - Expand-Archive python_embeded.zip - cd python_embeded - Add-Content -Path .\python310._pth -Value 'import site' - Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py - .\python.exe get-pip.py - "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth - - shell: bash run: | cd .. + cp -r ComfyUI ComfyUI_copy + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 + sed -i '1i../ComfyUI' ./python310._pth cd .. @@ -53,6 +48,9 @@ jobs: "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + cd ComfyUI_windows_portable + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls - name: Upload binaries to release diff --git a/main.py b/main.py index a7f6541f..3c03381d 100644 --- a/main.py +++ b/main.py @@ -86,6 +86,9 @@ if __name__ == "__main__": except: pass + if '--quick-test-for-ci' in sys.argv: + exit(0) + call_on_start = None if "--windows-standalone-build" in sys.argv: def startup_server(address, port): From 5bde13495527a40ea3110ca91442db14243a8dc3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:17:50 -0400 Subject: [PATCH 32/34] More proper ci workflows. --- .ci/setup_windows_zip.ps1 | 25 ------------- .ci/setup_windows_zip_nightly_pytorch.ps1 | 27 -------------- .github/workflows/windows_release.yml | 35 ++++++++++++++++-- .github/workflows/windows_release_cu118.yml | 2 +- .../windows_release_nightly_pytorch.yml | 36 +++++++++++++++++-- 5 files changed, 66 insertions(+), 59 deletions(-) delete mode 100755 .ci/setup_windows_zip.ps1 delete mode 100755 .ci/setup_windows_zip_nightly_pytorch.ps1 diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 deleted file mode 100755 index 6b38f498..00000000 --- a/.ci/setup_windows_zip.ps1 +++ /dev/null @@ -1,25 +0,0 @@ -Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip -Expand-Archive python_embeded.zip -cd python_embeded -Add-Content -Path .\python310._pth -Value 'import site' -Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py -.\python.exe get-pip.py -.\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 -"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth -cd .. - - -mkdir ComfyUI_windows_portable -mv python_embeded ComfyUI_windows_portable -mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI - -cd ComfyUI_windows_portable - -mkdir update -cp -r ComfyUI/.ci/update_windows/* ./update/ -cp -r ComfyUI/.ci/windows_base_files/* ./ - -cd .. - -& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable -mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 deleted file mode 100755 index 31721e5f..00000000 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ /dev/null @@ -1,27 +0,0 @@ -Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip -Expand-Archive python_embeded.zip -rm python_embeded.zip -cd python_embeded -Add-Content -Path .\python310._pth -Value 'import site' -Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py -.\python.exe get-pip.py -python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir -ls ../temp_wheel_dir -.\python.exe -s -m pip install --pre (get-item ..\temp_wheel_dir\*) -"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth -cd .. - -mkdir ComfyUI_windows_portable_nightly_pytorch -mv python_embeded ComfyUI_windows_portable_nightly_pytorch -mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI - -cd ComfyUI_windows_portable_nightly_pytorch - -mkdir update -cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ -cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ - -cd .. - -& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch -mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index be48b7ea..1b2694a3 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -18,13 +18,42 @@ jobs: with: fetch-depth: 0 - - run: | + - shell: bash + run: | cd .. - cp ComfyUI/.ci/setup_windows_zip.ps1 ./ cp -r ComfyUI ComfyUI_copy - .\setup_windows_zip.ps1 + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip -d python_embeded + cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py + ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 + sed -i '1i../ComfyUI' ./python310._pth + cd .. + + + mkdir ComfyUI_windows_portable + mv python_embeded ComfyUI_windows_portable + mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + + cd ComfyUI_windows_portable + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z + + cd ComfyUI_windows_portable + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls + - name: Upload binaries to release uses: svenstaro/upload-release-action@v2 with: diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index b757a540..773483f6 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -23,7 +23,7 @@ jobs: cd .. cp -r ComfyUI ComfyUI_copy wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip - unzip python_embeded.zip + unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 1aeaef45..2679b0b6 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -20,11 +20,41 @@ jobs: - uses: actions/setup-python@v4 with: python-version: '3.10.9' - - run: | + - shell: bash + run: | cd .. - cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ cp -r ComfyUI ComfyUI_copy - .\setup_windows_zip_nightly_pytorch.ps1 + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip -d python_embeded + cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py + python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir + ls ../temp_wheel_dir + ./python.exe -s -m pip install --pre ../temp_wheel_dir/* + sed -i '1i../ComfyUI' ./python310._pth + cd .. + + + mkdir ComfyUI_windows_portable_nightly_pytorch + mv python_embeded ComfyUI_windows_portable_nightly_pytorch + mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI + + cd ComfyUI_windows_portable_nightly_pytorch + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch + mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z + + cd ComfyUI_windows_portable_nightly_pytorch + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls - name: Upload binaries to release From ed4810e6701cdea1f6bf59e667736efc87e4f600 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:22:41 -0400 Subject: [PATCH 33/34] CI fix. --- .github/workflows/windows_release.yml | 4 ++-- .github/workflows/windows_release_cu118.yml | 4 ++-- .github/workflows/windows_release_nightly_pytorch.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index 1b2694a3..3f7d4d73 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -22,11 +22,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 sed -i '1i../ComfyUI' ./python310._pth diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 773483f6..cd0ca9a6 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -22,11 +22,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 sed -i '1i../ComfyUI' ./python310._pth diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 2679b0b6..291d754e 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -24,11 +24,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir ls ../temp_wheel_dir From 54593db6dcc9081c58fb9688a49aab49286bb741 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Mar 2023 01:58:27 -0400 Subject: [PATCH 34/34] Update install instructions for torch. --- README.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 8877d449..b5a0c48f 100644 --- a/README.md +++ b/README.md @@ -56,19 +56,14 @@ At the time of writing this pytorch has issues with python versions higher than ### AMD (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` - - -I highly recommend you use the nightly/unstable pytorch builds though because they work a lot better for me (run this in the ComfyUI folder so it picks up the requirements.txt): - -```pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/rocm5.4.2 -r requirements.txt``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` ### NVIDIA Nvidia users should install torch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118``` Nvidia users should also install Xformers for a speed boost but can still run the software without it.