Compare commits

..

3 Commits

Author SHA1 Message Date
Gwangwon Jung
dd15653b2a
Merge 8dc9d9e812 into ff838657fa 2025-01-09 09:12:29 -05:00
comfyanonymous
ff838657fa Cleaner handling of attention mask in ltxv model code. 2025-01-09 07:12:03 -05:00
Gwangwon Jung
8dc9d9e812
Merge branch 'comfyanonymous:master' into main 2025-01-09 18:54:35 +09:00

View File

@ -456,9 +456,8 @@ class LTXVModel(torch.nn.Module):
x = self.patchify_proj(x) x = self.patchify_proj(x)
timestep = timestep * 1000.0 timestep = timestep * 1000.0
attention_mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])) if attention_mask is not None and not torch.is_floating_point(attention_mask):
attention_mask = attention_mask.masked_fill(attention_mask.to(torch.bool), float("-inf")) # not sure about this attention_mask = (attention_mask - 1).to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])) * torch.finfo(x.dtype).max
# attention_mask = (context != 0).any(dim=2).to(dtype=x.dtype)
pe = precompute_freqs_cis(indices_grid, dim=self.inner_dim, out_dtype=x.dtype) pe = precompute_freqs_cis(indices_grid, dim=self.inner_dim, out_dtype=x.dtype)