Spaces:
Runtime error
Runtime error
Commit
·
354600e
1
Parent(s):
fa4d0e8
Update app.py
Browse files
app.py
CHANGED
@@ -4,37 +4,13 @@ from io import BytesIO
|
|
4 |
import gradio as gr
|
5 |
|
6 |
|
7 |
-
# Initialize CLIP model and processor
|
8 |
-
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
9 |
-
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
|
10 |
-
|
11 |
-
def image_similarity(image: Image.Image, action_prompt: str) -> bool:
|
12 |
-
positive_text = f"a picture of someone {action_prompt}"
|
13 |
-
negative_text = f"other"
|
14 |
-
|
15 |
-
inputs = processor(
|
16 |
-
text=[positive_text, negative_text],
|
17 |
-
images=image,
|
18 |
-
return_tensors="pt",
|
19 |
-
padding=True
|
20 |
-
)
|
21 |
-
|
22 |
-
outputs = model(**inputs)
|
23 |
-
logits_per_image = outputs.logits_per_image # image-text similarity score
|
24 |
-
probs = logits_per_image.softmax(dim=1) # take the softmax to get the label probabilities
|
25 |
-
|
26 |
-
# Determine if positive prompt has a higher probability than the negative prompt
|
27 |
-
result = probs[0][0] > probs[0][1]
|
28 |
-
return result
|
29 |
-
|
30 |
-
|
31 |
# Initialize CLIP model and processor
|
32 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
33 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
|
34 |
|
35 |
def image_similarity(image: Image.Image, action_prompt: str):
|
36 |
-
positive_text = f"a picture of
|
37 |
-
negative_text = f"not a picture
|
38 |
|
39 |
inputs = processor(
|
40 |
text=[positive_text, negative_text],
|
|
|
4 |
import gradio as gr
|
5 |
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Initialize CLIP model and processor
|
8 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
9 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
|
10 |
|
11 |
def image_similarity(image: Image.Image, action_prompt: str):
|
12 |
+
positive_text = f"a picture of a person {action_prompt}"
|
13 |
+
negative_text = f"not a picture a person {action_prompt}"
|
14 |
|
15 |
inputs = processor(
|
16 |
text=[positive_text, negative_text],
|