Fix clip vision issue with old transformers versions.

This commit is contained in:
comfyanonymous 2023-08-16 11:36:22 -04:00
parent ae270f79bc
commit 58f0c616ed

View File

@ -25,8 +25,7 @@ class ClipVisionModel():
def encode_image(self, image): def encode_image(self, image):
img = torch.clip((255. * image), 0, 255).round().int() img = torch.clip((255. * image), 0, 255).round().int()
if len(img.shape) == 3: img = list(map(lambda a: a, img))
img = [img]
inputs = self.processor(images=img, return_tensors="pt") inputs = self.processor(images=img, return_tensors="pt")
outputs = self.model(**inputs) outputs = self.model(**inputs)
return outputs return outputs