mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Fix for new transformers version.
This commit is contained in:
parent
0f5352d96c
commit
92eca60ec9
@ -1,6 +1,7 @@
|
|||||||
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor
|
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor
|
||||||
from .utils import load_torch_file, transformers_convert
|
from .utils import load_torch_file, transformers_convert
|
||||||
import os
|
import os
|
||||||
|
import torch
|
||||||
|
|
||||||
class ClipVisionModel():
|
class ClipVisionModel():
|
||||||
def __init__(self, json_config):
|
def __init__(self, json_config):
|
||||||
@ -20,7 +21,8 @@ class ClipVisionModel():
|
|||||||
self.model.load_state_dict(sd, strict=False)
|
self.model.load_state_dict(sd, strict=False)
|
||||||
|
|
||||||
def encode_image(self, image):
|
def encode_image(self, image):
|
||||||
inputs = self.processor(images=[image[0]], return_tensors="pt")
|
img = torch.clip((255. * image[0]), 0, 255).round().int()
|
||||||
|
inputs = self.processor(images=[img], return_tensors="pt")
|
||||||
outputs = self.model(**inputs)
|
outputs = self.model(**inputs)
|
||||||
return outputs
|
return outputs
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user