mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 10:25:16 +00:00
844dbf97a7
This allows changing the sampling parameters of the model (eps or vpred) or set the model to use zsnr.
81 lines
3.2 KiB
Python
81 lines
3.2 KiB
Python
import torch
|
|
import numpy as np
|
|
from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule
|
|
|
|
|
|
class EPS:
|
|
def calculate_input(self, sigma, noise):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
|
return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
return model_input - model_output * sigma
|
|
|
|
|
|
class V_PREDICTION(EPS):
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
|
|
class ModelSamplingDiscrete(torch.nn.Module):
|
|
def __init__(self, model_config=None):
|
|
super().__init__()
|
|
beta_schedule = "linear"
|
|
if model_config is not None:
|
|
beta_schedule = model_config.beta_schedule
|
|
self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3)
|
|
self.sigma_data = 1.0
|
|
|
|
def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
|
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
|
if given_betas is not None:
|
|
betas = given_betas
|
|
else:
|
|
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
|
alphas = 1. - betas
|
|
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
|
|
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
|
|
|
timesteps, = betas.shape
|
|
self.num_timesteps = int(timesteps)
|
|
self.linear_start = linear_start
|
|
self.linear_end = linear_end
|
|
|
|
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
|
|
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
|
|
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
|
|
|
|
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
|
self.set_sigmas(sigmas)
|
|
|
|
def set_sigmas(self, sigmas):
|
|
self.register_buffer('sigmas', sigmas)
|
|
self.register_buffer('log_sigmas', sigmas.log())
|
|
|
|
@property
|
|
def sigma_min(self):
|
|
return self.sigmas[0]
|
|
|
|
@property
|
|
def sigma_max(self):
|
|
return self.sigmas[-1]
|
|
|
|
def timestep(self, sigma):
|
|
log_sigma = sigma.log()
|
|
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
|
|
return dists.abs().argmin(dim=0).view(sigma.shape)
|
|
|
|
def sigma(self, timestep):
|
|
t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1))
|
|
low_idx = t.floor().long()
|
|
high_idx = t.ceil().long()
|
|
w = t.frac()
|
|
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
|
|
return log_sigma.exp()
|
|
|
|
def percent_to_sigma(self, percent):
|
|
return self.sigma(torch.tensor(percent * 999.0))
|
|
|