mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Fix issue where autocast fp32 CLIP gave different results from regular.
This commit is contained in:
parent
7d401ed1d0
commit
fb3b728203
@ -60,6 +60,9 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
|||||||
|
|
||||||
if dtype is not None:
|
if dtype is not None:
|
||||||
self.transformer.to(dtype)
|
self.transformer.to(dtype)
|
||||||
|
self.transformer.text_model.embeddings.token_embedding.to(torch.float32)
|
||||||
|
self.transformer.text_model.embeddings.position_embedding.to(torch.float32)
|
||||||
|
|
||||||
self.max_length = max_length
|
self.max_length = max_length
|
||||||
if freeze:
|
if freeze:
|
||||||
self.freeze()
|
self.freeze()
|
||||||
@ -138,7 +141,7 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
|||||||
tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
|
tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
|
||||||
tokens = torch.LongTensor(tokens).to(device)
|
tokens = torch.LongTensor(tokens).to(device)
|
||||||
|
|
||||||
if backup_embeds.weight.dtype != torch.float32:
|
if self.transformer.text_model.final_layer_norm.weight.dtype != torch.float32:
|
||||||
precision_scope = torch.autocast
|
precision_scope = torch.autocast
|
||||||
else:
|
else:
|
||||||
precision_scope = lambda a, b: contextlib.nullcontext(a)
|
precision_scope = lambda a, b: contextlib.nullcontext(a)
|
||||||
|
Loading…
Reference in New Issue
Block a user