mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
ensure backwards compat with optional args
This commit is contained in:
parent
752f7a162b
commit
da115bd78d
10
comfy/sd.py
10
comfy/sd.py
@ -372,12 +372,16 @@ class CLIP:
|
|||||||
def clip_layer(self, layer_idx):
|
def clip_layer(self, layer_idx):
|
||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
|
|
||||||
def tokenize(self, text):
|
def tokenize(self, text, return_word_ids=False):
|
||||||
return self.tokenizer.tokenize_with_weights(text)
|
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
|
||||||
|
|
||||||
def encode(self, tokens):
|
def encode(self, text, from_tokens=False):
|
||||||
if self.layer_idx is not None:
|
if self.layer_idx is not None:
|
||||||
self.cond_stage_model.clip_layer(self.layer_idx)
|
self.cond_stage_model.clip_layer(self.layer_idx)
|
||||||
|
if from_tokens:
|
||||||
|
tokens = text
|
||||||
|
else:
|
||||||
|
tokens = self.tokenizer.tokenize_with_weights(text)
|
||||||
try:
|
try:
|
||||||
self.patcher.patch_model()
|
self.patcher.patch_model()
|
||||||
cond = self.cond_stage_model.encode_token_weights(tokens)
|
cond = self.cond_stage_model.encode_token_weights(tokens)
|
||||||
|
@ -240,7 +240,7 @@ class SD1Tokenizer:
|
|||||||
return (embed, "")
|
return (embed, "")
|
||||||
|
|
||||||
|
|
||||||
def tokenize_with_weights(self, text:str):
|
def tokenize_with_weights(self, text:str, return_word_ids=False):
|
||||||
'''
|
'''
|
||||||
Takes a prompt and converts it to a list of (token, weight, word id) elements.
|
Takes a prompt and converts it to a list of (token, weight, word id) elements.
|
||||||
Tokens can both be integer tokens and pre computed CLIP tensors.
|
Tokens can both be integer tokens and pre computed CLIP tensors.
|
||||||
@ -301,6 +301,10 @@ class SD1Tokenizer:
|
|||||||
|
|
||||||
#add start and end tokens
|
#add start and end tokens
|
||||||
batched_tokens = [[(self.start_token, 1.0, 0)] + x + [(self.end_token, 1.0, 0)] for x in batched_tokens]
|
batched_tokens = [[(self.start_token, 1.0, 0)] + x + [(self.end_token, 1.0, 0)] for x in batched_tokens]
|
||||||
|
|
||||||
|
if not return_word_ids:
|
||||||
|
batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
|
||||||
|
|
||||||
return batched_tokens
|
return batched_tokens
|
||||||
|
|
||||||
|
|
||||||
|
3
nodes.py
3
nodes.py
@ -44,8 +44,7 @@ class CLIPTextEncode:
|
|||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning"
|
||||||
|
|
||||||
def encode(self, clip, text):
|
def encode(self, clip, text):
|
||||||
tokens = clip.tokenize(text)
|
return ([[clip.encode(text), {}]], )
|
||||||
return ([[clip.encode(tokens), {}]], )
|
|
||||||
|
|
||||||
class ConditioningCombine:
|
class ConditioningCombine:
|
||||||
@classmethod
|
@classmethod
|
||||||
|
Loading…
Reference in New Issue
Block a user