rodrigomasini commited on
Commit
36e5696
·
verified ·
1 Parent(s): 3dad7d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -1
app.py CHANGED
@@ -259,7 +259,53 @@ def get_comic_classical(images,captions = None,font = None,pad_image = None):
259
 
260
  return [combine_images_vertically_with_resize(row_images)]
261
 
262
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
  def get_comic_4panel(images,captions = [],font = None,pad_image = None):
265
  if pad_image == None:
 
259
 
260
  return [combine_images_vertically_with_resize(row_images)]
261
 
262
+ class FuseModule(nn.Module):
263
+ def __init__(self, embed_dim):
264
+ super().__init__()
265
+ self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False)
266
+ self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True)
267
+ self.layer_norm = nn.LayerNorm(embed_dim)
268
+
269
+ def fuse_fn(self, prompt_embeds, id_embeds):
270
+ stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
271
+ stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
272
+ stacked_id_embeds = self.mlp2(stacked_id_embeds)
273
+ stacked_id_embeds = self.layer_norm(stacked_id_embeds)
274
+ return stacked_id_embeds
275
+
276
+ def forward(
277
+ self,
278
+ prompt_embeds,
279
+ id_embeds,
280
+ class_tokens_mask,
281
+ ) -> torch.Tensor:
282
+ # id_embeds shape: [b, max_num_inputs, 1, 2048]
283
+ id_embeds = id_embeds.to(prompt_embeds.dtype)
284
+ num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
285
+ batch_size, max_num_inputs = id_embeds.shape[:2]
286
+ # seq_length: 77
287
+ seq_length = prompt_embeds.shape[1]
288
+ # flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
289
+ flat_id_embeds = id_embeds.view(
290
+ -1, id_embeds.shape[-2], id_embeds.shape[-1]
291
+ )
292
+ # valid_id_mask [b*max_num_inputs]
293
+ valid_id_mask = (
294
+ torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
295
+ < num_inputs[:, None]
296
+ )
297
+ valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
298
+
299
+ prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
300
+ class_tokens_mask = class_tokens_mask.view(-1)
301
+ valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
302
+ # slice out the image token embeddings
303
+ image_token_embeds = prompt_embeds[class_tokens_mask]
304
+ stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
305
+ assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
306
+ prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
307
+ updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
308
+ return updated_prompt_embeds
309
 
310
  def get_comic_4panel(images,captions = [],font = None,pad_image = None):
311
  if pad_image == None: