From 17f4ca8110d16318d8e5647ca685375aafb4ef55 Mon Sep 17 00:00:00 2001 From: rromb Date: Tue, 31 May 2022 12:36:26 +0200 Subject: [PATCH] fix clip encoder code --- ldm/modules/encoders/modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py index ecaef85..d5133ab 100644 --- a/ldm/modules/encoders/modules.py +++ b/ldm/modules/encoders/modules.py @@ -138,7 +138,7 @@ class FrozenT5Embedder(AbstractEncoder): class FrozenCLIPEmbedder(AbstractEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" - def __init__(self, version="clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32 + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): # clip-vit-base-patch32 super().__init__() self.tokenizer = CLIPTokenizer.from_pretrained(version) self.transformer = CLIPTextModel.from_pretrained(version)