mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Use the end token for the text encoder attention mask.
This commit is contained in:
parent
0dccb4617d
commit
56333d4850
@ -168,11 +168,11 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
|||||||
attention_mask = None
|
attention_mask = None
|
||||||
if self.enable_attention_masks:
|
if self.enable_attention_masks:
|
||||||
attention_mask = torch.zeros_like(tokens)
|
attention_mask = torch.zeros_like(tokens)
|
||||||
max_token = self.transformer.get_input_embeddings().weight.shape[0] - 1
|
end_token = self.special_tokens.get("end", -1)
|
||||||
for x in range(attention_mask.shape[0]):
|
for x in range(attention_mask.shape[0]):
|
||||||
for y in range(attention_mask.shape[1]):
|
for y in range(attention_mask.shape[1]):
|
||||||
attention_mask[x, y] = 1
|
attention_mask[x, y] = 1
|
||||||
if tokens[x, y] == max_token:
|
if tokens[x, y] == end_token:
|
||||||
break
|
break
|
||||||
|
|
||||||
outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
|
outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
|
||||||
|
Loading…
Reference in New Issue
Block a user