mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-10 18:05:16 +00:00
Compare commits
4 Commits
3699cf3a7e
...
e88e36875c
Author | SHA1 | Date | |
---|---|---|---|
|
e88e36875c | ||
|
2307ff6746 | ||
|
76649815d3 | ||
|
22c0afc8e2 |
23
README.md
23
README.md
@ -233,10 +233,31 @@ For models compatible with Ascend Extension for PyTorch (torch_npu). To get star
|
||||
3. Next, install the necessary packages for torch-npu by adhering to the platform-specific instructions on the [Installation](https://ascend.github.io/docs/sources/pytorch/install.html#pytorch) page.
|
||||
4. Finally, adhere to the [ComfyUI manual installation](#manual-install-windows-linux) guide for Linux. Once all components are installed, you can run ComfyUI as described earlier.
|
||||
|
||||
### UV (Package Manager)
|
||||
|
||||
UV is an extremely fast Python package and project manager, written in Rust. For UV installation, checks [docs](https://docs.astral.sh/uv/). UV help isolation and reproducibility of the desired environment.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/comfyanonymous/ComfyUI.git # Clone this repo
|
||||
cd ComfyUI # Move to created folder
|
||||
uv init # Start uv project in current folder
|
||||
uv add --requirements requirements.txt # Add dependencies
|
||||
```
|
||||
|
||||
# Running
|
||||
|
||||
```python main.py```
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
If using **UV**:
|
||||
|
||||
```bash
|
||||
uv run python main.py
|
||||
# or in case of activating the virtual env before
|
||||
source .venv/bin/activate
|
||||
python main.py
|
||||
```
|
||||
|
||||
### For AMD cards not officially supported by ROCm
|
||||
|
||||
|
@ -111,7 +111,7 @@ class CLIP:
|
||||
model_management.load_models_gpu([self.patcher], force_full_load=True)
|
||||
self.layer_idx = None
|
||||
self.use_clip_schedule = False
|
||||
logging.info("CLIP model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
|
||||
logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
|
||||
|
||||
def clone(self):
|
||||
n = CLIP(no_init=True)
|
||||
@ -898,7 +898,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
|
||||
if output_model:
|
||||
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
|
||||
if inital_load_device != torch.device("cpu"):
|
||||
logging.info("loaded straight to GPU")
|
||||
logging.info("loaded diffusion model directly to GPU")
|
||||
model_management.load_models_gpu([model_patcher], force_full_load=True)
|
||||
|
||||
return (model_patcher, clip, vae, clipvision)
|
||||
|
@ -4,7 +4,8 @@ lint.ignore = ["ALL"]
|
||||
# Enable specific rules
|
||||
lint.select = [
|
||||
"S307", # suspicious-eval-usage
|
||||
"T201", # print-usage
|
||||
"S102", # exec
|
||||
"T", # print-usage
|
||||
"W",
|
||||
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
|
||||
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
|
||||
|
Loading…
Reference in New Issue
Block a user