Replace print with logging (#6138)

* Replace print with logging

* nit

* nit

* nit

* nit

* nit

* nit
This commit is contained in:
Chenlei Hu 2024-12-20 13:24:55 -08:00 committed by GitHub
parent bddb02660c
commit d7969cb070
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 49 additions and 45 deletions

View File

@ -28,7 +28,7 @@ def pull(repo, remote_name='origin', branch='master'):
if repo.index.conflicts is not None: if repo.index.conflicts is not None:
for conflict in repo.index.conflicts: for conflict in repo.index.conflicts:
print('Conflicts found in:', conflict[0].path) print('Conflicts found in:', conflict[0].path) # noqa: T201
raise AssertionError('Conflicts, ahhhhh!!') raise AssertionError('Conflicts, ahhhhh!!')
user = repo.default_signature user = repo.default_signature
@ -49,18 +49,18 @@ repo_path = str(sys.argv[1])
repo = pygit2.Repository(repo_path) repo = pygit2.Repository(repo_path)
ident = pygit2.Signature('comfyui', 'comfy@ui') ident = pygit2.Signature('comfyui', 'comfy@ui')
try: try:
print("stashing current changes") print("stashing current changes") # noqa: T201
repo.stash(ident) repo.stash(ident)
except KeyError: except KeyError:
print("nothing to stash") print("nothing to stash") # noqa: T201
backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
print("creating backup branch: {}".format(backup_branch_name)) print("creating backup branch: {}".format(backup_branch_name)) # noqa: T201
try: try:
repo.branches.local.create(backup_branch_name, repo.head.peel()) repo.branches.local.create(backup_branch_name, repo.head.peel())
except: except:
pass pass
print("checking out master branch") print("checking out master branch") # noqa: T201
branch = repo.lookup_branch('master') branch = repo.lookup_branch('master')
if branch is None: if branch is None:
ref = repo.lookup_reference('refs/remotes/origin/master') ref = repo.lookup_reference('refs/remotes/origin/master')
@ -72,7 +72,7 @@ else:
ref = repo.lookup_reference(branch.name) ref = repo.lookup_reference(branch.name)
repo.checkout(ref) repo.checkout(ref)
print("pulling latest changes") print("pulling latest changes") # noqa: T201
pull(repo) pull(repo)
if "--stable" in sys.argv: if "--stable" in sys.argv:
@ -94,7 +94,7 @@ if "--stable" in sys.argv:
if latest_tag is not None: if latest_tag is not None:
repo.checkout(latest_tag) repo.checkout(latest_tag)
print("Done!") print("Done!") # noqa: T201
self_update = True self_update = True
if len(sys.argv) > 2: if len(sys.argv) > 2:

View File

@ -38,8 +38,8 @@ class UserManager():
if not os.path.exists(user_directory): if not os.path.exists(user_directory):
os.makedirs(user_directory, exist_ok=True) os.makedirs(user_directory, exist_ok=True)
if not args.multi_user: if not args.multi_user:
print("****** User settings have been changed to be stored on the server instead of browser storage. ******") logging.warning("****** User settings have been changed to be stored on the server instead of browser storage. ******")
print("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******") logging.warning("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******")
if args.multi_user: if args.multi_user:
if os.path.isfile(self.get_users_file()): if os.path.isfile(self.get_users_file()):

View File

@ -160,7 +160,6 @@ class ControlNet(nn.Module):
if isinstance(self.num_classes, int): if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.label_emb = nn.Embedding(num_classes, time_embed_dim)
elif self.num_classes == "continuous": elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim) self.label_emb = nn.Linear(1, time_embed_dim)
elif self.num_classes == "sequential": elif self.num_classes == "sequential":
assert adm_in_channels is not None assert adm_in_channels is not None

View File

@ -2,6 +2,7 @@
import torch import torch
import math import math
import logging
from tqdm.auto import trange from tqdm.auto import trange
@ -474,7 +475,7 @@ class UniPC:
return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
ns = self.noise_schedule ns = self.noise_schedule
assert order <= len(model_prev_list) assert order <= len(model_prev_list)
@ -518,7 +519,6 @@ class UniPC:
A_p = C_inv_p A_p = C_inv_p
if use_corrector: if use_corrector:
print('using corrector')
C_inv = torch.linalg.inv(C) C_inv = torch.linalg.inv(C)
A_c = C_inv A_c = C_inv

View File

@ -5,6 +5,7 @@ import math
import torch import torch
import numpy as np import numpy as np
import itertools import itertools
import logging
if TYPE_CHECKING: if TYPE_CHECKING:
from comfy.model_patcher import ModelPatcher, PatcherInjection from comfy.model_patcher import ModelPatcher, PatcherInjection
@ -575,7 +576,7 @@ def load_hook_lora_for_models(model: 'ModelPatcher', clip: 'CLIP', lora: dict[st
k1 = set(k1) k1 = set(k1)
for x in loaded: for x in loaded:
if (x not in k) and (x not in k1): if (x not in k) and (x not in k1):
print(f"NOT LOADED {x}") logging.warning(f"NOT LOADED {x}")
return (new_modelpatcher, new_clip, hook_group) return (new_modelpatcher, new_clip, hook_group)
def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]): def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]):

View File

@ -381,7 +381,6 @@ class MMDiT(nn.Module):
pe_new = pe_as_2d.squeeze(0).permute(1, 2, 0).flatten(0, 1) pe_new = pe_as_2d.squeeze(0).permute(1, 2, 0).flatten(0, 1)
self.positional_encoding.data = pe_new.unsqueeze(0).contiguous() self.positional_encoding.data = pe_new.unsqueeze(0).contiguous()
self.h_max, self.w_max = target_dim self.h_max, self.w_max = target_dim
print("PE extended to", target_dim)
def pe_selection_index_based_on_dim(self, h, w): def pe_selection_index_based_on_dim(self, h, w):
h_p, w_p = h // self.patch_size, w // self.patch_size h_p, w_p = h // self.patch_size, w // self.patch_size

View File

@ -9,6 +9,7 @@
import math import math
import logging
import torch import torch
import torch.nn as nn import torch.nn as nn
import numpy as np import numpy as np
@ -130,7 +131,7 @@ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timestep
# add one to get the final alpha values right (the ones from first scale to data during sampling) # add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1 steps_out = ddim_timesteps + 1
if verbose: if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}') logging.info(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out return steps_out
@ -142,8 +143,8 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# according the the formula provided in https://arxiv.org/abs/2010.02502 # according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose: if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, ' logging.info(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}') f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev return sigmas, alphas, alphas_prev

View File

@ -1,4 +1,5 @@
import importlib import importlib
import logging
import torch import torch
from torch import optim from torch import optim
@ -23,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10):
try: try:
draw.text((0, 0), lines, fill="black", font=font) draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError: except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.") logging.warning("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt) txts.append(txt)
@ -65,7 +66,7 @@ def mean_flat(tensor):
def count_params(model, verbose=False): def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters()) total_params = sum(p.numel() for p in model.parameters())
if verbose: if verbose:
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") logging.info(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
return total_params return total_params

View File

@ -770,7 +770,6 @@ class Flux(BaseModel):
mask = torch.ones_like(noise)[:, :1] mask = torch.ones_like(noise)[:, :1]
mask = torch.mean(mask, dim=1, keepdim=True) mask = torch.mean(mask, dim=1, keepdim=True)
print(mask.shape)
mask = utils.common_upscale(mask.to(device), noise.shape[-1] * 8, noise.shape[-2] * 8, "bilinear", "center") mask = utils.common_upscale(mask.to(device), noise.shape[-1] * 8, noise.shape[-2] * 8, "bilinear", "center")
mask = mask.view(mask.shape[0], mask.shape[2] // 8, 8, mask.shape[3] // 8, 8).permute(0, 2, 4, 1, 3).reshape(mask.shape[0], -1, mask.shape[2] // 8, mask.shape[3] // 8) mask = mask.view(mask.shape[0], mask.shape[2] // 8, 8, mask.shape[3] // 8, 8).permute(0, 2, 4, 1, 3).reshape(mask.shape[0], -1, mask.shape[2] // 8, mask.shape[3] // 8)
mask = utils.resize_to_batch_size(mask, noise.shape[0]) mask = utils.resize_to_batch_size(mask, noise.shape[0])

View File

@ -1084,7 +1084,7 @@ def unload_all_models():
def resolve_lowvram_weight(weight, model, key): #TODO: remove def resolve_lowvram_weight(weight, model, key): #TODO: remove
print("WARNING: The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.") logging.warning("The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.")
return weight return weight
#TODO: might be cleaner to put this somewhere else #TODO: might be cleaner to put this somewhere else

View File

@ -773,7 +773,7 @@ class ModelPatcher:
return self.model.device return self.model.device
def calculate_weight(self, patches, weight, key, intermediate_dtype=torch.float32): def calculate_weight(self, patches, weight, key, intermediate_dtype=torch.float32):
print("WARNING the ModelPatcher.calculate_weight function is deprecated, please use: comfy.lora.calculate_weight instead") logging.warning("The ModelPatcher.calculate_weight function is deprecated, please use: comfy.lora.calculate_weight instead")
return comfy.lora.calculate_weight(patches, weight, key, intermediate_dtype=intermediate_dtype) return comfy.lora.calculate_weight(patches, weight, key, intermediate_dtype=intermediate_dtype)
def cleanup(self): def cleanup(self):
@ -1029,7 +1029,7 @@ class ModelPatcher:
if cached_weights is not None: if cached_weights is not None:
for key in cached_weights: for key in cached_weights:
if key not in model_sd_keys: if key not in model_sd_keys:
print(f"WARNING cached hook could not patch. key does not exist in model: {key}") logging.warning(f"Cached hook could not patch. Key does not exist in model: {key}")
continue continue
self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter) self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter)
else: else:
@ -1039,7 +1039,7 @@ class ModelPatcher:
original_weights = self.get_key_patches() original_weights = self.get_key_patches()
for key in relevant_patches: for key in relevant_patches:
if key not in model_sd_keys: if key not in model_sd_keys:
print(f"WARNING cached hook would not patch. key does not exist in model: {key}") logging.warning(f"Cached hook would not patch. Key does not exist in model: {key}")
continue continue
self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights, self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights,
memory_counter=memory_counter) memory_counter=memory_counter)

View File

@ -940,11 +940,11 @@ def load_diffusion_model(unet_path, model_options={}):
return model return model
def load_unet(unet_path, dtype=None): def load_unet(unet_path, dtype=None):
print("WARNING: the load_unet function has been deprecated and will be removed please switch to: load_diffusion_model") logging.warning("The load_unet function has been deprecated and will be removed please switch to: load_diffusion_model")
return load_diffusion_model(unet_path, model_options={"dtype": dtype}) return load_diffusion_model(unet_path, model_options={"dtype": dtype})
def load_unet_state_dict(sd, dtype=None): def load_unet_state_dict(sd, dtype=None):
print("WARNING: the load_unet_state_dict function has been deprecated and will be removed please switch to: load_diffusion_model_state_dict") logging.warning("The load_unet_state_dict function has been deprecated and will be removed please switch to: load_diffusion_model_state_dict")
return load_diffusion_model_state_dict(sd, model_options={"dtype": dtype}) return load_diffusion_model_state_dict(sd, model_options={"dtype": dtype})
def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None, extra_keys={}): def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None, extra_keys={}):

View File

@ -41,8 +41,7 @@ class ClipTokenWeightEncoder:
to_encode.append(self.gen_empty_tokens(self.special_tokens, max_token_len)) to_encode.append(self.gen_empty_tokens(self.special_tokens, max_token_len))
else: else:
to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len)) to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len))
print(to_encode)
o = self.encode(to_encode) o = self.encode(to_encode)
out, pooled = o[:2] out, pooled = o[:2]

View File

@ -1,5 +1,6 @@
import logging
from spandrel import ModelLoader from spandrel import ModelLoader
def load_state_dict(state_dict): def load_state_dict(state_dict):
print("WARNING: comfy_extras.chainner_models is deprecated and has been replaced by the spandrel library.") logging.warning("comfy_extras.chainner_models is deprecated and has been replaced by the spandrel library.")
return ModelLoader().load_from_state_dict(state_dict).eval() return ModelLoader().load_from_state_dict(state_dict).eval()

View File

@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING, Union from typing import TYPE_CHECKING, Union
import logging
import torch import torch
from collections.abc import Iterable from collections.abc import Iterable
@ -539,7 +540,7 @@ class CreateHookKeyframesInterpolated:
is_first = False is_first = False
prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
if print_keyframes: if print_keyframes:
print(f"Hook Keyframe - start_percent:{percent} = {strength}") logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}")
return (prev_hook_kf,) return (prev_hook_kf,)
class CreateHookKeyframesFromFloats: class CreateHookKeyframesFromFloats:
@ -588,7 +589,7 @@ class CreateHookKeyframesFromFloats:
is_first = False is_first = False
prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
if print_keyframes: if print_keyframes:
print(f"Hook Keyframe - start_percent:{percent} = {strength}") logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}")
return (prev_hook_kf,) return (prev_hook_kf,)
#------------------------------------------ #------------------------------------------
########################################### ###########################################

View File

@ -63,7 +63,7 @@ def execute_prestartup_script():
spec.loader.exec_module(module) spec.loader.exec_module(module)
return True return True
except Exception as e: except Exception as e:
print(f"Failed to execute startup-script: {script_path} / {e}") logging.error(f"Failed to execute startup-script: {script_path} / {e}")
return False return False
if args.disable_all_custom_nodes: if args.disable_all_custom_nodes:
@ -85,14 +85,14 @@ def execute_prestartup_script():
success = execute_script(script_path) success = execute_script(script_path)
node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
if len(node_prestartup_times) > 0: if len(node_prestartup_times) > 0:
print("\nPrestartup times for custom nodes:") logging.info("\nPrestartup times for custom nodes:")
for n in sorted(node_prestartup_times): for n in sorted(node_prestartup_times):
if n[2]: if n[2]:
import_message = "" import_message = ""
else: else:
import_message = " (PRESTARTUP FAILED)" import_message = " (PRESTARTUP FAILED)"
print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
print() logging.info("")
apply_custom_paths() apply_custom_paths()
execute_prestartup_script() execute_prestartup_script()

View File

@ -32,4 +32,4 @@ def update_windows_updater():
except: except:
pass pass
shutil.copy(bat_path, dest_bat_path) shutil.copy(bat_path, dest_bat_path)
print("Updated the windows standalone package updater.") print("Updated the windows standalone package updater.") # noqa: T201

View File

@ -4,7 +4,10 @@ lint.ignore = ["ALL"]
# Enable specific rules # Enable specific rules
lint.select = [ lint.select = [
"S307", # suspicious-eval-usage "S307", # suspicious-eval-usage
"T201", # print-usage
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names. # The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
"F", "F",
] ]
exclude = ["*.ipynb"]

View File

@ -89,9 +89,9 @@ async def test_routes_added_to_app(aiohttp_client_factory, internal_routes):
client = await aiohttp_client_factory() client = await aiohttp_client_factory()
try: try:
resp = await client.get('/files') resp = await client.get('/files')
print(f"Response received: status {resp.status}") print(f"Response received: status {resp.status}") # noqa: T201
except Exception as e: except Exception as e:
print(f"Exception occurred during GET request: {e}") print(f"Exception occurred during GET request: {e}") # noqa: T201
raise raise
assert resp.status != 404, "Route /files does not exist" assert resp.status != 404, "Route /files does not exist"

View File

@ -28,7 +28,7 @@ def pytest_collection_modifyitems(items):
last_items = [] last_items = []
for test_name in LAST_TESTS: for test_name in LAST_TESTS:
for item in items.copy(): for item in items.copy():
print(item.module.__name__, item) print(item.module.__name__, item) # noqa: T201
if item.module.__name__ == test_name: if item.module.__name__ == test_name:
last_items.append(item) last_items.append(item)
items.remove(item) items.remove(item)

View File

@ -134,7 +134,7 @@ class TestExecution:
use_lru, lru_size = request.param use_lru, lru_size = request.param
if use_lru: if use_lru:
pargs += ['--cache-lru', str(lru_size)] pargs += ['--cache-lru', str(lru_size)]
print("Running server with args:", pargs) print("Running server with args:", pargs) # noqa: T201
p = subprocess.Popen(pargs) p = subprocess.Popen(pargs)
yield yield
p.kill() p.kill()
@ -150,8 +150,8 @@ class TestExecution:
try: try:
comfy_client.connect(listen=listen, port=port) comfy_client.connect(listen=listen, port=port)
except ConnectionRefusedError as e: except ConnectionRefusedError as e:
print(e) print(e) # noqa: T201
print(f"({i+1}/{n_tries}) Retrying...") print(f"({i+1}/{n_tries}) Retrying...") # noqa: T201
else: else:
break break
return comfy_client return comfy_client

View File

@ -171,8 +171,8 @@ class TestInference:
try: try:
comfy_client.connect(listen=listen, port=port) comfy_client.connect(listen=listen, port=port)
except ConnectionRefusedError as e: except ConnectionRefusedError as e:
print(e) print(e) # noqa: T201
print(f"({i+1}/{n_tries}) Retrying...") print(f"({i+1}/{n_tries}) Retrying...") # noqa: T201
else: else:
break break
return comfy_client return comfy_client