KeerthiVM commited on
Commit
bd85722
·
1 Parent(s): b98f0ab
Files changed (1) hide show
  1. app.py +21 -15
app.py CHANGED
@@ -142,6 +142,13 @@ class SkinGPT4(nn.Module):
142
  self.P = 14 # Patch size
143
  self.D = 1408 # ViT embedding dimension
144
  self.num_query_tokens = 32
 
 
 
 
 
 
 
145
  # Initialize components
146
  self.vit = self._init_vit(vit_checkpoint_path)
147
  print("Loaded ViT")
@@ -157,6 +164,8 @@ class SkinGPT4(nn.Module):
157
  self.q_former.eval()
158
  print("Loaded QFormer")
159
  self.llama = self._init_llama()
 
 
160
  self.llama_proj = nn.Linear(
161
  self.q_former.bert_config.hidden_size,
162
  self.llama.config.hidden_size
@@ -169,10 +178,7 @@ class SkinGPT4(nn.Module):
169
  torch.zeros(1, self.num_query_tokens, self.q_former.bert_config.hidden_size)
170
  )
171
  nn.init.normal_(self.query_tokens, std=0.02)
172
- self.tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf",
173
- token=token, padding_side="right")
174
 
175
- print("Loaded tokenizer")
176
  def _init_vit(self, vit_checkpoint_path):
177
  """Initialize EVA-ViT-G with paper specifications"""
178
  vit = create_eva_vit_g(
@@ -358,8 +364,8 @@ class SkinGPT4(nn.Module):
358
 
359
  # Tokenize prompt
360
 
361
- self.tokenizer.add_special_tokens({'additional_special_tokens': ['<ImageHere>']})
362
- self.llama.resize_token_embeddings(len(self.tokenizer))
363
 
364
  inputs = self.tokenizer(prompt, return_tensors="pt").to(images.device)
365
 
@@ -380,16 +386,16 @@ class SkinGPT4(nn.Module):
380
  return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
381
 
382
 
383
- def load_model(model_path):
384
- model_path = hf_hub_download(
385
- repo_id="KeerthiVM/SkinCancerDiagnosis",
386
- filename="dermnet_finetuned_version1.pth",
387
- )
388
- # model = SkinGPT4(vit_checkpoint_path="dermnet_finetuned_version1.pth")
389
- model = SkinGPT4(vit_checkpoint_path=model_path)
390
- model.to(device)
391
- model.eval()
392
- return model
393
 
394
 
395
 
 
142
  self.P = 14 # Patch size
143
  self.D = 1408 # ViT embedding dimension
144
  self.num_query_tokens = 32
145
+
146
+ self.tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf",
147
+ token=token, padding_side="right")
148
+
149
+ print("Loaded tokenizer")
150
+ self.tokenizer.add_special_tokens({'additional_special_tokens': ['<ImageHere>']})
151
+
152
  # Initialize components
153
  self.vit = self._init_vit(vit_checkpoint_path)
154
  print("Loaded ViT")
 
164
  self.q_former.eval()
165
  print("Loaded QFormer")
166
  self.llama = self._init_llama()
167
+ self.llama.resize_token_embeddings(len(self.tokenizer))
168
+
169
  self.llama_proj = nn.Linear(
170
  self.q_former.bert_config.hidden_size,
171
  self.llama.config.hidden_size
 
178
  torch.zeros(1, self.num_query_tokens, self.q_former.bert_config.hidden_size)
179
  )
180
  nn.init.normal_(self.query_tokens, std=0.02)
 
 
181
 
 
182
  def _init_vit(self, vit_checkpoint_path):
183
  """Initialize EVA-ViT-G with paper specifications"""
184
  vit = create_eva_vit_g(
 
364
 
365
  # Tokenize prompt
366
 
367
+ # self.tokenizer.add_special_tokens({'additional_special_tokens': ['<ImageHere>']})
368
+ # self.llama.resize_token_embeddings(len(self.tokenizer))
369
 
370
  inputs = self.tokenizer(prompt, return_tensors="pt").to(images.device)
371
 
 
386
  return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
387
 
388
 
389
+ # def load_model(model_path):
390
+ # model_path = hf_hub_download(
391
+ # repo_id="KeerthiVM/SkinCancerDiagnosis",
392
+ # filename="dermnet_finetuned_version1.pth",
393
+ # )
394
+ # # model = SkinGPT4(vit_checkpoint_path="dermnet_finetuned_version1.pth")
395
+ # model = SkinGPT4(vit_checkpoint_path=model_path)
396
+ # model.to(device)
397
+ # model.eval()
398
+ # return model
399
 
400
 
401