Compare commits

...

3 Commits

Author SHA1 Message Date
Zac
0b3ecc375d
Merge 49da9c325a into 2307ff6746 2025-01-09 11:21:46 +08:00
comfyanonymous
2307ff6746 Improve some logging messages. 2025-01-08 19:05:22 -05:00
ZacharyACoon
49da9c325a . 2023-04-20 22:50:36 -07:00
5 changed files with 100 additions and 3 deletions

6
.dockerignore Normal file
View File

@ -0,0 +1,6 @@
__pycache__/
*.py[cod]
input
models
notebooks
output

70
Dockerfile Normal file
View File

@ -0,0 +1,70 @@
# 3.10.11-bullseye, has python, git, but relatively small (<250MB)
ARG BASE_IMAGE="python@sha256:88fb365ea5d52ec8f5799f40a4742b9fb3c91dac92f7048eabaae194a25ccc28"
ARG GPU_MAKE="nvidia"
ARG UID=1000
ARG GID=1000
FROM ${BASE_IMAGE}
ARG GPU_MAKE
ARG UID
ARG GID
SHELL [ "/bin/bash", "-uec"]
RUN \
--mount=target=/var/lib/apt/lists,type=cache,sharing=locked \
--mount=target=/var/cache/apt,type=cache,sharing=locked \
<<'EOF'
apt-get update
apt-get install -yq git-lfs
echo "comfyui" >> /etc/hostname
EOF
# run instructions as user
USER ${UID}:${GID}
# run python from future venv
ENV PATH="/app/venv/bin:${PATH}"
# copy context to obvious location
COPY --chown=${UID}:${GID} ./ /app
# create cache directory *with user permissions*
WORKDIR /app/.cache
# default to app directory
WORKDIR /app
# set pip cache location
ENV XDG_CACHE_HOME="/app/.cache/pip"
# run with mounted cache
RUN --mount=type=cache,target=/app/.cache,uid=${UID},gid=${GID} <<'EOF'
mkdir -p /app/.cache/transformers
# choose package index based on chosen hardware
if [ "${GPU_MAKE}" = "nvidia" ]; then
EXTRA_INDEX_URL="https://download.pytorch.org/whl/cu118"
EXTRAS="xformers"
elif [ "${GPU_MAKE}" = "amd" ]; then
EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm5.4.2"
EXTRAS=""
elif [ "${GPU_MAKE}" = "cpu" ]; then
EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
EXTRAS=""
else
echo "Unknown GPU_MAKE provided as docker build arg."
exit 2
fi
# create virtual environment to manage packages
python -m venv venv
# install framework packages
pip install \
--extra-index-url "${EXTRA_INDEX_URL}" \
install \
torch \
torchvision \
torchaudio \
${EXTRAS}
pip install -r requirements.txt
EOF
# default environment variables
ENV COMFYUI_ADDRESS=0.0.0.0
ENV COMFYUI_PORT=8188
ENV COMFYUI_EXTRA_ARGS=""
ENV TRANSFORMERS_CACHE="/app/.cache/transformers"
# default start command
CMD bash -c "python -u main.py --listen ${COMFYUI_ADDRESS} --port ${COMFYUI_PORT} ${COMFYUI_EXTRA_ARGS}"

View File

@ -111,7 +111,7 @@ class CLIP:
model_management.load_models_gpu([self.patcher], force_full_load=True)
self.layer_idx = None
self.use_clip_schedule = False
logging.info("CLIP model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
def clone(self):
n = CLIP(no_init=True)
@ -898,7 +898,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
if output_model:
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
if inital_load_device != torch.device("cpu"):
logging.info("loaded straight to GPU")
logging.info("loaded diffusion model directly to GPU")
model_management.load_models_gpu([model_patcher], force_full_load=True)
return (model_patcher, clip, vae, clipvision)

20
docker-compose.yaml Normal file
View File

@ -0,0 +1,20 @@
version: "3.9"
services:
comfyui:
user: "1000:1000"
build: .
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ports:
- "8188:8188"
volumes:
- "./models:/app/models"
- "./input:/app/input"
- "./temp:/app/output/temp"
- "./output:/app/output"

View File

@ -4,7 +4,8 @@ lint.ignore = ["ALL"]
# Enable specific rules
lint.select = [
"S307", # suspicious-eval-usage
"T201", # print-usage
"S102", # exec
"T", # print-usage
"W",
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f