Revert changes in comfy/ldm/modules/diffusionmodules/util.py, which is unused.

This commit is contained in:
Simon Lui 2023-09-02 20:07:52 -07:00
parent 4a0c4ce4ef
commit 2da73b7073

View File

@ -15,7 +15,6 @@ import torch.nn as nn
import numpy as np import numpy as np
from einops import repeat from einops import repeat
from comfy import model_management
from comfy.ldm.util import instantiate_from_config from comfy.ldm.util import instantiate_from_config
import comfy.ops import comfy.ops
@ -140,22 +139,13 @@ class CheckpointFunction(torch.autograd.Function):
@staticmethod @staticmethod
def backward(ctx, *output_grads): def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
if model_management.is_nvidia(): with torch.enable_grad(), \
with torch.enable_grad(), \ torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): # Fixes a bug where the first op in run_function modifies the
# Fixes a bug where the first op in run_function modifies the # Tensor storage in place, which is not allowed for detach()'d
# Tensor storage in place, which is not allowed for detach()'d # Tensors.
# Tensors. shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
shallow_copies = [x.view_as(x) for x in ctx.input_tensors] output_tensors = ctx.run_function(*shallow_copies)
output_tensors = ctx.run_function(*shallow_copies)
elif model_management.is_intel_xpu():
with torch.enable_grad(), \
torch.xpu.amp.autocast(**ctx.gpu_autocast_kwargs):
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad( input_grads = torch.autograd.grad(
output_tensors, output_tensors,
ctx.input_tensors + ctx.input_params, ctx.input_tensors + ctx.input_params,