mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Lint and fix undefined names (3/N) (#6030)
This commit is contained in:
parent
d4426dce7c
commit
60749f345d
@ -97,7 +97,7 @@ def get_activation(activation: Literal["elu", "snake", "none"], antialias=False,
|
|||||||
raise ValueError(f"Unknown activation {activation}")
|
raise ValueError(f"Unknown activation {activation}")
|
||||||
|
|
||||||
if antialias:
|
if antialias:
|
||||||
act = Activation1d(act)
|
act = Activation1d(act) # noqa: F821 Activation1d is not defined
|
||||||
|
|
||||||
return act
|
return act
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ class RotaryEmbedding(nn.Module):
|
|||||||
if self.scale is None:
|
if self.scale is None:
|
||||||
return freqs, 1.
|
return freqs, 1.
|
||||||
|
|
||||||
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
|
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base # noqa: F821 seq_len is not defined
|
||||||
scale = comfy.ops.cast_to_input(self.scale, t) ** rearrange(power, 'n -> n 1')
|
scale = comfy.ops.cast_to_input(self.scale, t) ** rearrange(power, 'n -> n 1')
|
||||||
scale = torch.cat((scale, scale), dim = -1)
|
scale = torch.cat((scale, scale), dim = -1)
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ from typing import Any, Dict, Tuple, Union
|
|||||||
|
|
||||||
from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
||||||
|
|
||||||
from comfy.ldm.util import instantiate_from_config
|
from comfy.ldm.util import get_obj_from_str, instantiate_from_config
|
||||||
from comfy.ldm.modules.ema import LitEma
|
from comfy.ldm.modules.ema import LitEma
|
||||||
import comfy.ops
|
import comfy.ops
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ def _get_attention_scores_no_kv_chunking(
|
|||||||
del attn_scores
|
del attn_scores
|
||||||
except model_management.OOM_EXCEPTION:
|
except model_management.OOM_EXCEPTION:
|
||||||
logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead")
|
logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead")
|
||||||
attn_scores -= attn_scores.max(dim=-1, keepdim=True).values
|
attn_scores -= attn_scores.max(dim=-1, keepdim=True).values # noqa: F821 attn_scores is not defined
|
||||||
torch.exp(attn_scores, out=attn_scores)
|
torch.exp(attn_scores, out=attn_scores)
|
||||||
summed = torch.sum(attn_scores, dim=-1, keepdim=True)
|
summed = torch.sum(attn_scores, dim=-1, keepdim=True)
|
||||||
attn_scores /= summed
|
attn_scores /= summed
|
||||||
|
Loading…
Reference in New Issue
Block a user