mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-19 10:53:29 +00:00
Some refactoring: from_tokens -> encode_from_tokens
This commit is contained in:
parent
719c26c3c9
commit
81d1f00df3
10
comfy/sd.py
10
comfy/sd.py
@ -375,13 +375,9 @@ class CLIP:
|
|||||||
def tokenize(self, text, return_word_ids=False):
|
def tokenize(self, text, return_word_ids=False):
|
||||||
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
|
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
|
||||||
|
|
||||||
def encode(self, text, from_tokens=False):
|
def encode_from_tokens(self, tokens):
|
||||||
if self.layer_idx is not None:
|
if self.layer_idx is not None:
|
||||||
self.cond_stage_model.clip_layer(self.layer_idx)
|
self.cond_stage_model.clip_layer(self.layer_idx)
|
||||||
if from_tokens:
|
|
||||||
tokens = text
|
|
||||||
else:
|
|
||||||
tokens = self.tokenizer.tokenize_with_weights(text)
|
|
||||||
try:
|
try:
|
||||||
self.patcher.patch_model()
|
self.patcher.patch_model()
|
||||||
cond = self.cond_stage_model.encode_token_weights(tokens)
|
cond = self.cond_stage_model.encode_token_weights(tokens)
|
||||||
@ -391,6 +387,10 @@ class CLIP:
|
|||||||
raise e
|
raise e
|
||||||
return cond
|
return cond
|
||||||
|
|
||||||
|
def encode(self, text):
|
||||||
|
tokens = self.tokenizer.tokenize_with_weights(text)
|
||||||
|
return self.encode_from_tokens(tokens)
|
||||||
|
|
||||||
class VAE:
|
class VAE:
|
||||||
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
|
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
|
||||||
if config is None:
|
if config is None:
|
||||||
|
@ -315,7 +315,7 @@ class SD1Tokenizer:
|
|||||||
continue
|
continue
|
||||||
#parse word
|
#parse word
|
||||||
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
|
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
|
||||||
|
|
||||||
#reshape token array to CLIP input size
|
#reshape token array to CLIP input size
|
||||||
batched_tokens = []
|
batched_tokens = []
|
||||||
batch = [(self.start_token, 1.0, 0)]
|
batch = [(self.start_token, 1.0, 0)]
|
||||||
@ -338,11 +338,11 @@ class SD1Tokenizer:
|
|||||||
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
|
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
|
||||||
#start new batch
|
#start new batch
|
||||||
batch = [(self.start_token, 1.0, 0)]
|
batch = [(self.start_token, 1.0, 0)]
|
||||||
batched_tokens.append(batch)
|
batched_tokens.append(batch)
|
||||||
else:
|
else:
|
||||||
batch.extend([(t,w,i+1) for t,w in t_group])
|
batch.extend([(t,w,i+1) for t,w in t_group])
|
||||||
t_group = []
|
t_group = []
|
||||||
|
|
||||||
#fill last batch
|
#fill last batch
|
||||||
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
|
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import sd1_clip
|
from comfy import sd1_clip
|
||||||
import torch
|
import torch
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user