Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
@@ -90,21 +90,7 @@
|
|
90 |
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
91 |
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
92 |
"logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
|
93 |
-
"f_add = torch.nn.quantized.FloatFunctional()\n",
|
94 |
"\n",
|
95 |
-
"\n",
|
96 |
-
"\n",
|
97 |
-
"\n"
|
98 |
-
],
|
99 |
-
"metadata": {
|
100 |
-
"id": "TC5lMJrS1HCC"
|
101 |
-
},
|
102 |
-
"execution_count": null,
|
103 |
-
"outputs": []
|
104 |
-
},
|
105 |
-
{
|
106 |
-
"cell_type": "code",
|
107 |
-
"source": [
|
108 |
"index = 0\n",
|
109 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
|
110 |
"vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
|
@@ -118,10 +104,11 @@
|
|
118 |
"for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
|
119 |
" index = index + 1;\n",
|
120 |
"#------#\n",
|
121 |
-
"NUM_REFERENCE_ITEMS = index"
|
|
|
122 |
],
|
123 |
"metadata": {
|
124 |
-
"id": "
|
125 |
},
|
126 |
"execution_count": null,
|
127 |
"outputs": []
|
@@ -131,7 +118,7 @@
|
|
131 |
"source": [
|
132 |
"# @title \tβ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
133 |
"# @markdown Choose a pre-encoded reference\n",
|
134 |
-
"index =
|
135 |
"PROMPT_INDEX = index\n",
|
136 |
"prompt = target_prompts[f'{PROMPT_INDEX}']\n",
|
137 |
"url = target_urls[f'{PROMPT_INDEX}']\n",
|
@@ -139,24 +126,24 @@
|
|
139 |
" image = Image.open(requests.get(url, stream=True).raw)\n",
|
140 |
"#------#\n",
|
141 |
"# @markdown βοΈ πΌοΈ encoding <-----?-----> π encoding </div> <br>\n",
|
142 |
-
"C =
|
143 |
-
"log_strength_1 =
|
144 |
-
"prompt_strength =
|
145 |
-
"reference = torch.zeros(768)\n",
|
146 |
"\n",
|
147 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
|
148 |
"references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
|
149 |
-
"reference = torch.add(reference, prompt_strength * C * references[index][0].dequantize())\n",
|
150 |
-
"reference = torch.add(reference, prompt_strength * (1-C) * references[index][1].dequantize())\n",
|
151 |
"references = '' # Clear up memory\n",
|
152 |
"# @markdown -----------\n",
|
153 |
"# @markdown πβ 1st Enhance similarity to prompt(s)\n",
|
154 |
"POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
155 |
"log_strength_2 = 1.03 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
156 |
-
"pos_strength =
|
157 |
-
"for _POS in POS_2.split(','):\n",
|
158 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
159 |
-
" text_features_POS = model.get_text_features(**inputs)\n",
|
160 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
161 |
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
|
162 |
"# @markdown -----------\n",
|
@@ -165,10 +152,10 @@
|
|
165 |
"# @markdown πβ 2nd Enhance similarity to prompt(s)\n",
|
166 |
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
167 |
"log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
168 |
-
"pos_strength =
|
169 |
-
"for _POS in POS.split(','):\n",
|
170 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
171 |
-
" text_features_POS = model.get_text_features(**inputs)\n",
|
172 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
173 |
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
|
174 |
"# @markdown -----------\n",
|
@@ -176,25 +163,30 @@
|
|
176 |
"# @markdown π« Penalize similarity to prompt(s)\n",
|
177 |
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
178 |
"log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
179 |
-
"neg_strength =
|
180 |
-
"for _NEG in NEG.split(','):\n",
|
181 |
" inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
182 |
-
" text_features_NEG = model.get_text_features(**inputs)\n",
|
183 |
" text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
|
184 |
" reference = torch.sub(reference, neg_strength * text_features_NEG)\n",
|
185 |
"# @markdown -----------\n",
|
186 |
"# @markdown β© Skip item(s) containing the word(s)\n",
|
187 |
"SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
188 |
"\n",
|
189 |
-
"
|
190 |
-
"
|
191 |
-
"
|
|
|
|
|
|
|
|
|
|
|
192 |
" for item in list(blacklist.split(',')):\n",
|
193 |
" if item.strip() == '' : continue\n",
|
194 |
" if txt.find(item.strip())> -1 : return True\n",
|
195 |
" #------#\n",
|
196 |
" found = False\n",
|
197 |
-
" alphabet = '
|
198 |
" for letter in alphabet:\n",
|
199 |
" found = txt.find(letter)>-1\n",
|
200 |
" if found:break\n",
|
@@ -213,7 +205,8 @@
|
|
213 |
"update_list = True # @param {type:\"boolean\"}\n",
|
214 |
"\n",
|
215 |
"calculate_variance = False # @param {type:\"boolean\"}\n",
|
216 |
-
"
|
|
|
217 |
"\n",
|
218 |
"try: first\n",
|
219 |
"except:\n",
|
@@ -253,12 +246,18 @@
|
|
253 |
"for _index in range(list_size):\n",
|
254 |
" tmp = prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}']\n",
|
255 |
" if isBlacklisted(tmp , SKIP): continue\n",
|
256 |
-
"
|
|
|
|
|
257 |
"#---------#\n",
|
258 |
"output = (output + '}').replace('|}' , '} ')\n",
|
|
|
|
|
259 |
"for iter in range(N):\n",
|
260 |
" print(output)\n",
|
261 |
"#-------#\n",
|
|
|
|
|
262 |
"image or print('No image found')"
|
263 |
],
|
264 |
"metadata": {
|
@@ -649,7 +648,7 @@
|
|
649 |
"cellView": "form",
|
650 |
"id": "uDzsk02CbMFc"
|
651 |
},
|
652 |
-
"execution_count":
|
653 |
"outputs": []
|
654 |
},
|
655 |
{
|
@@ -681,7 +680,7 @@
|
|
681 |
"cellView": "form",
|
682 |
"id": "Azz1kCza6LB3"
|
683 |
},
|
684 |
-
"execution_count":
|
685 |
"outputs": []
|
686 |
}
|
687 |
]
|
|
|
90 |
"processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
|
91 |
"model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
|
92 |
"logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
|
|
|
93 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
"index = 0\n",
|
95 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
|
96 |
"vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
|
|
|
104 |
"for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
|
105 |
" index = index + 1;\n",
|
106 |
"#------#\n",
|
107 |
+
"NUM_REFERENCE_ITEMS = index\n",
|
108 |
+
"\n"
|
109 |
],
|
110 |
"metadata": {
|
111 |
+
"id": "TC5lMJrS1HCC"
|
112 |
},
|
113 |
"execution_count": null,
|
114 |
"outputs": []
|
|
|
118 |
"source": [
|
119 |
"# @title \tβ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
|
120 |
"# @markdown Choose a pre-encoded reference\n",
|
121 |
+
"index = 213 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
|
122 |
"PROMPT_INDEX = index\n",
|
123 |
"prompt = target_prompts[f'{PROMPT_INDEX}']\n",
|
124 |
"url = target_urls[f'{PROMPT_INDEX}']\n",
|
|
|
126 |
" image = Image.open(requests.get(url, stream=True).raw)\n",
|
127 |
"#------#\n",
|
128 |
"# @markdown βοΈ πΌοΈ encoding <-----?-----> π encoding </div> <br>\n",
|
129 |
+
"C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
130 |
+
"log_strength_1 = 2.17 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
131 |
+
"prompt_strength = torch.tensor(math.pow(10 ,log_strength_1-1)).to(dtype = torch.float32)\n",
|
132 |
+
"reference = torch.zeros(768).to(dtype = torch.float32)\n",
|
133 |
"\n",
|
134 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
|
135 |
"references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
|
136 |
+
"reference = torch.add(reference, prompt_strength * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
|
137 |
+
"reference = torch.add(reference, prompt_strength * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
|
138 |
"references = '' # Clear up memory\n",
|
139 |
"# @markdown -----------\n",
|
140 |
"# @markdown πβ 1st Enhance similarity to prompt(s)\n",
|
141 |
"POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
142 |
"log_strength_2 = 1.03 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
143 |
+
"pos_strength = torch.tensor(math.pow(10 ,log_strength_2-1)).to(dtype = torch.float32)\n",
|
144 |
+
"for _POS in POS_2.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
|
145 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
146 |
+
" text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
|
147 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
148 |
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
|
149 |
"# @markdown -----------\n",
|
|
|
152 |
"# @markdown πβ 2nd Enhance similarity to prompt(s)\n",
|
153 |
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
154 |
"log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
155 |
+
"pos_strength = torch.tensor(math.pow(10 ,log_strength_3-1)).to(dtype = torch.float32)\n",
|
156 |
+
"for _POS in POS.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
|
157 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
158 |
+
" text_features_POS = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
|
159 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
160 |
" reference = torch.add(reference, pos_strength * text_features_POS)\n",
|
161 |
"# @markdown -----------\n",
|
|
|
163 |
"# @markdown π« Penalize similarity to prompt(s)\n",
|
164 |
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
165 |
"log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
166 |
+
"neg_strength = torch.tensor(math.pow(10 ,log_strength_4-1)).to(dtype = torch.float32)\n",
|
167 |
+
"for _NEG in NEG.replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').split(','):\n",
|
168 |
" inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
169 |
+
" text_features_NEG = model.get_text_features(**inputs).to(dtype = torch.float32)\n",
|
170 |
" text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
|
171 |
" reference = torch.sub(reference, neg_strength * text_features_NEG)\n",
|
172 |
"# @markdown -----------\n",
|
173 |
"# @markdown β© Skip item(s) containing the word(s)\n",
|
174 |
"SKIP = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
175 |
"\n",
|
176 |
+
"min_wordcount = 0 # @param {type:\"slider\", min:0, max:20, step:1}\n",
|
177 |
+
"\n",
|
178 |
+
"def isBlacklisted(_txt, _blacklist):\n",
|
179 |
+
" blacklist = _blacklist.lower().replace('</w>' , ' ').replace('{' , '').replace('}' , '').replace('|' , ',').strip()\n",
|
180 |
+
" txt = _txt.lower().strip()\n",
|
181 |
+
" if len(txt)<min_wordcount: return True\n",
|
182 |
+
" if txt.isnumeric(): return True\n",
|
183 |
+
" if blacklist == '': return False\n",
|
184 |
" for item in list(blacklist.split(',')):\n",
|
185 |
" if item.strip() == '' : continue\n",
|
186 |
" if txt.find(item.strip())> -1 : return True\n",
|
187 |
" #------#\n",
|
188 |
" found = False\n",
|
189 |
+
" alphabet = 'abcdefghijklmnopqrstuvxyz'\n",
|
190 |
" for letter in alphabet:\n",
|
191 |
" found = txt.find(letter)>-1\n",
|
192 |
" if found:break\n",
|
|
|
205 |
"update_list = True # @param {type:\"boolean\"}\n",
|
206 |
"\n",
|
207 |
"calculate_variance = False # @param {type:\"boolean\"}\n",
|
208 |
+
"\n",
|
209 |
+
"ne = update_list\n",
|
210 |
"\n",
|
211 |
"try: first\n",
|
212 |
"except:\n",
|
|
|
246 |
"for _index in range(list_size):\n",
|
247 |
" tmp = prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}']\n",
|
248 |
" if isBlacklisted(tmp , SKIP): continue\n",
|
249 |
+
" tmp = fix_bad_symbols(tmp)\n",
|
250 |
+
" if output.find(tmp)>-1:continue\n",
|
251 |
+
" output = output + tmp + '|'\n",
|
252 |
"#---------#\n",
|
253 |
"output = (output + '}').replace('|}' , '} ')\n",
|
254 |
+
"print('')\n",
|
255 |
+
"print('')\n",
|
256 |
"for iter in range(N):\n",
|
257 |
" print(output)\n",
|
258 |
"#-------#\n",
|
259 |
+
"print('')\n",
|
260 |
+
"print('')\n",
|
261 |
"image or print('No image found')"
|
262 |
],
|
263 |
"metadata": {
|
|
|
648 |
"cellView": "form",
|
649 |
"id": "uDzsk02CbMFc"
|
650 |
},
|
651 |
+
"execution_count": null,
|
652 |
"outputs": []
|
653 |
},
|
654 |
{
|
|
|
680 |
"cellView": "form",
|
681 |
"id": "Azz1kCza6LB3"
|
682 |
},
|
683 |
+
"execution_count": null,
|
684 |
"outputs": []
|
685 |
}
|
686 |
]
|