From 87f91307782ce0b401786d8edddd8f618b955141 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 20 May 2025 02:39:55 -0700 Subject: [PATCH] Revert "This doesn't seem to be needed on chroma. (#8209)" (#8210) This reverts commit 7e84bf53737879ace37a68dc93e0df7704a53514. --- comfy/ldm/chroma/layers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/ldm/chroma/layers.py b/comfy/ldm/chroma/layers.py index 18a4a9cf..35da91ee 100644 --- a/comfy/ldm/chroma/layers.py +++ b/comfy/ldm/chroma/layers.py @@ -109,6 +109,9 @@ class DoubleStreamBlock(nn.Module): txt += txt_mod1.gate * self.txt_attn.proj(txt_attn) txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) + if txt.dtype == torch.float16: + txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504) + return img, txt @@ -160,6 +163,8 @@ class SingleStreamBlock(nn.Module): # compute activation in mlp stream, cat again and run second linear layer output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) x += mod.gate * output + if x.dtype == torch.float16: + x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504) return x