Try to improve VAEEncode memory usage a bit.

This commit is contained in:
comfyanonymous 2023-03-22 02:33:27 -04:00
parent a3ba90d237
commit c692509c2b

View File

@ -616,19 +616,17 @@ class Encoder(nn.Module):
x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
already_padded = True already_padded = True
# downsampling # downsampling
hs = [self.conv_in(x)] h = self.conv_in(x)
for i_level in range(self.num_resolutions): for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks): for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb) h = self.down[i_level].block[i_block](h, temb)
if len(self.down[i_level].attn) > 0: if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h) h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1: if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1], already_padded)) h = self.down[i_level].downsample(h, already_padded)
already_padded = False already_padded = False
# middle # middle
h = hs[-1]
h = self.mid.block_1(h, temb) h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h) h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb) h = self.mid.block_2(h, temb)