wooj0216 commited on
Commit
6898ce9
·
1 Parent(s): 1caec79

FIX: pretrained_models and offline

Browse files
__pycache__/detection.cpython-310.pyc CHANGED
Binary files a/__pycache__/detection.cpython-310.pyc and b/__pycache__/detection.cpython-310.pyc differ
 
__pycache__/model.cpython-310.pyc CHANGED
Binary files a/__pycache__/model.cpython-310.pyc and b/__pycache__/model.cpython-310.pyc differ
 
app.py CHANGED
@@ -14,9 +14,9 @@ def load_model(detection_type):
14
 
15
  device = torch.device("cpu")
16
 
17
- processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
18
- clip_model = CLIPVisionModel.from_pretrained("openai/clip-vit-large-patch14", output_attentions=True)
19
-
20
  model_path = f"pretrained_models/{detection_type}/clip_weights.pth"
21
  checkpoint = torch.load(model_path, map_location="cpu")
22
  input_dim = checkpoint["linear.weight"].shape[1]
@@ -32,7 +32,7 @@ def process_image(image, detection_type):
32
 
33
  results = detect_image(image, processor, clip_model, detection_model)
34
 
35
- pred_score = results["pred_score"]
36
  attn_map = results["attn_map"]
37
 
38
  return pred_score, attn_map
 
14
 
15
  device = torch.device("cpu")
16
 
17
+ processor = AutoProcessor.from_pretrained("clip-vit-large-patch14")
18
+ clip_model = CLIPVisionModel.from_pretrained("clip-vit-large-patch14", output_attentions=True)
19
+
20
  model_path = f"pretrained_models/{detection_type}/clip_weights.pth"
21
  checkpoint = torch.load(model_path, map_location="cpu")
22
  input_dim = checkpoint["linear.weight"].shape[1]
 
32
 
33
  results = detect_image(image, processor, clip_model, detection_model)
34
 
35
+ pred_score = 1 - results["pred_score"]
36
  attn_map = results["attn_map"]
37
 
38
  return pred_score, attn_map
attn.jpg DELETED
Binary file (395 Bytes)
 
clip-vit-large-patch14 ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 32bd64288804d66eefd0ccbe215aa642df71cc41