mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Compare commits
4 Commits
b6e6b390a6
...
054b185cbb
Author | SHA1 | Date | |
---|---|---|---|
|
054b185cbb | ||
|
2307ff6746 | ||
|
88d1879d59 | ||
|
b62b606d30 |
7
.dockerignore
Normal file
7
.dockerignore
Normal file
@ -0,0 +1,7 @@
|
||||
.git
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
input
|
||||
models
|
||||
notebooks
|
||||
output
|
98
.github/workflows/docker.yml
vendored
Normal file
98
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
name: Build and publish Docker images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "**"
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- id: cu118
|
||||
name: CUDA 11.8
|
||||
pytorch_install_args: "--index-url https://download.pytorch.org/whl/cu118"
|
||||
- id: cu121
|
||||
name: CUDA 12.1
|
||||
pytorch_install_args: "--index-url https://download.pytorch.org/whl/cu121"
|
||||
- id: rocm6.0
|
||||
name: ROCm 6.0
|
||||
pytorch_install_args: "--index-url https://download.pytorch.org/whl/rocm6.0"
|
||||
- id: cpu
|
||||
name: CPU only
|
||||
pytorch_install_args: "--index-url https://download.pytorch.org/whl/cpu"
|
||||
extra_args: --cpu
|
||||
|
||||
|
||||
name: ${{ matrix.name }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Check which repositories to use
|
||||
id: repositories
|
||||
run: |
|
||||
echo "GHCR_IMAGE_NAME=ghcr.io/${GITHUB_REPOSITORY_OWNER}/comfyui" >> "$GITHUB_ENV"
|
||||
if [[ -n "${DOCKERHUB_USERNAME}" ]]; then
|
||||
echo "DOCKERHUB_IMAGE_NAME=${DOCKERHUB_USERNAME}/comfyui" >> "$GITHUB_ENV"
|
||||
else
|
||||
echo "DOCKERHUB_IMAGE_NAME=" >> "$GITHUB_ENV"
|
||||
echo "No Docker Hub username set, only deploying to GitHub Container Repository"
|
||||
fi
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ env.DOCKERHUB_IMAGE_NAME }}
|
||||
${{ env.GHCR_IMAGE_NAME }}
|
||||
flavor: |
|
||||
suffix=-${{ matrix.id }},onlatest=true
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
# set latest tag for default branch
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && env.DOCKERHUB_IMAGE_NAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
PYTORCH_INSTALL_ARGS=${{ matrix.pytorch_install_args }}
|
||||
EXTRA_ARGS=${{ matrix.extra_args }}
|
||||
cache-from: type=gha,scope=${{ github.ref_name }}-${{ matrix.id }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.id }}
|
72
Dockerfile
Normal file
72
Dockerfile
Normal file
@ -0,0 +1,72 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim
|
||||
|
||||
ARG PYTORCH_INSTALL_ARGS=""
|
||||
ARG EXTRA_ARGS=""
|
||||
ARG USERNAME=comfyui
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=${USER_UID}
|
||||
|
||||
# Fail fast on errors or unset variables
|
||||
SHELL ["/bin/bash", "-eux", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN <<EOF
|
||||
groupadd --gid ${USER_GID} ${USERNAME}
|
||||
useradd --uid ${USER_UID} --gid ${USER_GID} -m ${USERNAME}
|
||||
EOF
|
||||
|
||||
RUN <<EOF
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
git-lfs \
|
||||
rsync \
|
||||
fonts-recommended
|
||||
EOF
|
||||
|
||||
# run instructions as user
|
||||
USER ${USER_UID}:${USER_GID}
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV XDG_CACHE_HOME=/cache
|
||||
ENV PIP_CACHE_DIR=/cache/pip
|
||||
ENV VIRTUAL_ENV=/app/venv
|
||||
ENV VIRTUAL_ENV_CUSTOM=/app/custom_venv
|
||||
|
||||
# create cache directory. During build we will use a cache mount,
|
||||
# but later this is useful for custom node installs
|
||||
RUN --mount=type=cache,target=/cache/,uid=${USER_UID},gid=${USER_GID} \
|
||||
mkdir -p ${PIP_CACHE_DIR}
|
||||
|
||||
# create virtual environment to manage packages
|
||||
RUN python -m venv ${VIRTUAL_ENV}
|
||||
|
||||
# run python from venv (prefer custom_venv over baked-in one)
|
||||
ENV PATH="${VIRTUAL_ENV_CUSTOM}/bin:${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
RUN --mount=type=cache,target=/cache/,uid=${USER_UID},gid=${USER_GID} \
|
||||
pip install torch torchvision torchaudio ${PYTORCH_INSTALL_ARGS}
|
||||
|
||||
# copy requirements files first so packages can be cached separately
|
||||
COPY --chown=${USER_UID}:${USER_GID} requirements.txt .
|
||||
RUN --mount=type=cache,target=/cache/,uid=${USER_UID},gid=${USER_GID} \
|
||||
pip install -r requirements.txt
|
||||
|
||||
COPY --chown=${USER_UID}:${USER_GID} . .
|
||||
|
||||
# default environment variables
|
||||
ENV COMFYUI_ADDRESS=0.0.0.0
|
||||
ENV COMFYUI_PORT=8188
|
||||
ENV COMFYUI_EXTRA_BUILD_ARGS="${EXTRA_ARGS}"
|
||||
ENV COMFYUI_EXTRA_ARGS=""
|
||||
# default start command
|
||||
CMD \
|
||||
if [ -d "${VIRTUAL_ENV_CUSTOM}" ]; then \
|
||||
rsync -aP "${VIRTUAL_ENV}/" "${VIRTUAL_ENV_CUSTOM}/" ;\
|
||||
sed -i "s!${VIRTUAL_ENV}!${VIRTUAL_ENV_CUSTOM}!g" "${VIRTUAL_ENV_CUSTOM}/pyvenv.cfg" ;\
|
||||
fi ;\
|
||||
python -u main.py --listen ${COMFYUI_ADDRESS} --port ${COMFYUI_PORT} ${COMFYUI_EXTRA_BUILD_ARGS} ${COMFYUI_EXTRA_ARGS}
|
35
README.md
35
README.md
@ -207,6 +207,41 @@ Install the dependencies by opening your terminal inside the ComfyUI folder and:
|
||||
|
||||
After this you should have everything installed and can proceed to running ComfyUI.
|
||||
|
||||
## Docker
|
||||
|
||||
There are prebuilt docker images for AMD and NVIDIA GPUs on [GitHub Packages](https://ghcr.io/comfyanonymous/comfyui).
|
||||
|
||||
You can pull them to your local docker registry with:
|
||||
|
||||
```shell
|
||||
# For NVIDIA GPUs
|
||||
docker pull ghcr.io/comfyanonymous/comfyui:latest-cu121
|
||||
# For AMD GPUs
|
||||
docker pull ghcr.io/comfyanonymous/comfyui:latest-rocm6.0
|
||||
|
||||
# For CPU only
|
||||
docker pull ghcr.io/comfyanonymous/comfyui:latest-cpu
|
||||
```
|
||||
|
||||
### Building images manually
|
||||
|
||||
You can build a docker image with the Dockerfile in this repo.
|
||||
|
||||
Specify PYTORCH_INSTALL_ARGS build arg with one of the PyTorch commands above to build for AMD or NVIDIA GPUs.
|
||||
|
||||
```docker build --build-arg PYTORCH_INSTALL_ARGS="--index-url https://download.pytorch.org/whl/cu121" .```
|
||||
|
||||
```docker build --build-arg PYTORCH_INSTALL_ARGS="--index-url https://download.pytorch.org/whl/rocm6.0" .```
|
||||
|
||||
This dockerfile requires BuildKit to be enabled. If your docker does not support the buildx command, you can
|
||||
enable BuildKit by setting the DOCKER_BUILDKIT environment variable.
|
||||
|
||||
```DOCKER_BUILDKIT=1 docker build --build-arg PYTORCH_INSTALL_ARGS="--index-url https://download.pytorch.org/whl/cu121" .```
|
||||
|
||||
NOTE: For building the CPU-only image, it is recommended that you add the --cpu flag to the EXTRA_ARGS build arg:
|
||||
|
||||
```docker build --build-arg PYTORCH_INSTALL_ARGS="--index-url https://download.pytorch.org/whl/cpu" --build-arg EXTRA_ARGS=--cpu .```
|
||||
|
||||
### Others:
|
||||
|
||||
#### Apple Mac silicon
|
||||
|
@ -111,7 +111,7 @@ class CLIP:
|
||||
model_management.load_models_gpu([self.patcher], force_full_load=True)
|
||||
self.layer_idx = None
|
||||
self.use_clip_schedule = False
|
||||
logging.info("CLIP model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
|
||||
logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
|
||||
|
||||
def clone(self):
|
||||
n = CLIP(no_init=True)
|
||||
@ -898,7 +898,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
|
||||
if output_model:
|
||||
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
|
||||
if inital_load_device != torch.device("cpu"):
|
||||
logging.info("loaded straight to GPU")
|
||||
logging.info("loaded diffusion model directly to GPU")
|
||||
model_management.load_models_gpu([model_patcher], force_full_load=True)
|
||||
|
||||
return (model_patcher, clip, vae, clipvision)
|
||||
|
20
docker-compose.yaml
Normal file
20
docker-compose.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
comfyui:
|
||||
user: "1000:1000"
|
||||
build: .
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
ports:
|
||||
- "8188:8188"
|
||||
volumes:
|
||||
- "./models:/app/models"
|
||||
- "./input:/app/input"
|
||||
- "./temp:/app/output/temp"
|
||||
- "./output:/app/output"
|
@ -4,7 +4,8 @@ lint.ignore = ["ALL"]
|
||||
# Enable specific rules
|
||||
lint.select = [
|
||||
"S307", # suspicious-eval-usage
|
||||
"T201", # print-usage
|
||||
"S102", # exec
|
||||
"T", # print-usage
|
||||
"W",
|
||||
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
|
||||
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
|
||||
|
Loading…
Reference in New Issue
Block a user