Update app.py
Browse files
app.py
CHANGED
@@ -1,87 +1,128 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
3 |
-
from
|
4 |
import torch
|
5 |
-
from
|
6 |
-
from torchvision.models import resnet50
|
7 |
-
import torch.nn.functional as F
|
8 |
-
|
9 |
-
# Load text emotion detection pipeline
|
10 |
-
text_emotion_pipeline = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=3)
|
11 |
|
12 |
-
# Load
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
'https://huggingface.co/Celal11/resnet-50-finetuned-FER2013-0.001/resolve/main/pytorch_model.bin',
|
17 |
-
map_location=torch.device('cpu')
|
18 |
-
))
|
19 |
-
image_model.eval()
|
20 |
|
21 |
-
|
|
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
#
|
29 |
-
|
30 |
-
"
|
31 |
-
"
|
32 |
-
"
|
33 |
-
"
|
34 |
-
"
|
35 |
-
"
|
36 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
}
|
38 |
|
39 |
-
#
|
40 |
-
def
|
41 |
-
|
42 |
-
top_emotion = emotions[0]["label"]
|
43 |
-
reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qurโan 2:153)")
|
44 |
-
return top_emotion, reflection
|
45 |
-
|
46 |
-
# Image input handler
|
47 |
-
def analyze_image_emotion(image):
|
48 |
-
img_tensor = transform(image).unsqueeze(0)
|
49 |
with torch.no_grad():
|
50 |
-
logits =
|
51 |
-
|
52 |
-
top_idx = torch.argmax(probs).item()
|
53 |
-
top_emotion = image_emotions[top_idx]
|
54 |
-
reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qurโan 2:153)")
|
55 |
-
return top_emotion, reflection
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
gr.Markdown("## ๐ง EmotionLens โ AI-Powered Emotional Intelligence with Islamic Wisdom")
|
60 |
-
with gr.Tabs():
|
61 |
-
with gr.TabItem("Single Input"):
|
62 |
-
with gr.Row():
|
63 |
-
text_input = gr.Textbox(lines=2, placeholder="Enter text here...")
|
64 |
-
img_input = gr.Image(type="pil")
|
65 |
|
66 |
-
|
67 |
-
submit_btn = gr.Button("Submit")
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
|
3 |
+
from torch.nn.functional import sigmoid
|
4 |
import torch
|
5 |
+
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Load text emotion model
|
8 |
+
model_name = "SamLowe/roberta-base-go_emotions"
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Load image emotion model
|
13 |
+
image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
|
14 |
+
image_processor = AutoImageProcessor.from_pretrained(image_model_name)
|
15 |
+
image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
|
16 |
|
17 |
+
# Analyze image emotion using processor and model
|
18 |
+
def analyze_image_emotion(image):
|
19 |
+
if image is None:
|
20 |
+
return "No image provided."
|
21 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
22 |
+
with torch.no_grad():
|
23 |
+
logits = image_model(**inputs).logits
|
24 |
+
probs = torch.nn.functional.softmax(logits, dim=1)[0]
|
25 |
+
pred_idx = torch.argmax(probs).item()
|
26 |
+
label = image_model.config.id2label[pred_idx]
|
27 |
+
score = probs[pred_idx].item()
|
28 |
+
return f"{label} ({score:.2f})"
|
29 |
|
30 |
+
# Emotion label to icon mapping (subset)
|
31 |
+
emotion_icons = {
|
32 |
+
"admiration": "๐",
|
33 |
+
"amusement": "๐
",
|
34 |
+
"anger": "๐ก",
|
35 |
+
"annoyance": "๐",
|
36 |
+
"approval": "๐",
|
37 |
+
"caring": "๐",
|
38 |
+
"confusion": "๐ค",
|
39 |
+
"curiosity": "๐ฎ",
|
40 |
+
"desire": "๐คค",
|
41 |
+
"disappointment": "๐",
|
42 |
+
"disapproval": "๐",
|
43 |
+
"disgust": "๐คฎ",
|
44 |
+
"embarrassment": "๐ณ",
|
45 |
+
"excitement": "๐",
|
46 |
+
"fear": "๐ฑ",
|
47 |
+
"gratitude": "๐",
|
48 |
+
"grief": "๐ญ",
|
49 |
+
"joy": "๐",
|
50 |
+
"love": "โค๏ธ",
|
51 |
+
"nervousness": "๐คง",
|
52 |
+
"optimism": "๐",
|
53 |
+
"pride": "๐",
|
54 |
+
"realization": "๐คฏ",
|
55 |
+
"relief": "๐",
|
56 |
+
"remorse": "๐",
|
57 |
+
"sadness": "๐ข",
|
58 |
+
"surprise": "๐ฒ",
|
59 |
+
"neutral": "๐"
|
60 |
}
|
61 |
|
62 |
+
# Analyze text emotion
|
63 |
+
def get_emotions(text, threshold):
|
64 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
with torch.no_grad():
|
66 |
+
logits = model(**inputs).logits
|
67 |
+
probs = sigmoid(logits)[0]
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
labels = [model.config.id2label[i] for i, p in enumerate(probs) if p > threshold]
|
70 |
+
icons = [emotion_icons.get(label, '') + ' ' + label.capitalize() + f" ({probs[i]:.2f})" for i, label in enumerate(labels)]
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
return ", ".join(icons) if icons else "No strong emotion detected."
|
|
|
73 |
|
74 |
+
# Combined analysis
|
75 |
+
def analyze_combined(text, threshold, image):
|
76 |
+
text_result = get_emotions(text, threshold)
|
77 |
+
image_result = analyze_image_emotion(image)
|
78 |
+
return text_result, image_result
|
79 |
|
80 |
+
# Gradio UI
|
81 |
+
custom_css = """
|
82 |
+
body {
|
83 |
+
background: linear-gradient(to right, #f9f9f9, #d4ecff);
|
84 |
+
font-family: 'Segoe UI', sans-serif;
|
85 |
+
}
|
86 |
+
.gr-button {
|
87 |
+
background-color: #007BFF !important;
|
88 |
+
color: white !important;
|
89 |
+
border-radius: 8px !important;
|
90 |
+
font-weight: bold;
|
91 |
+
}
|
92 |
+
.gr-button:hover {
|
93 |
+
background-color: #0056b3 !important;
|
94 |
+
}
|
95 |
+
.gr-textbox {
|
96 |
+
border-radius: 8px !important;
|
97 |
+
border: 1px solid #ccc !important;
|
98 |
+
padding: 10px !important;
|
99 |
+
}
|
100 |
+
.output-textbox {
|
101 |
+
font-size: 1.5rem;
|
102 |
+
font-weight: bold;
|
103 |
+
color: #333;
|
104 |
+
background-color: #f1f9ff;
|
105 |
+
border-radius: 8px;
|
106 |
+
padding: 10px;
|
107 |
+
border: 1px solid #007BFF;
|
108 |
+
}
|
109 |
+
"""
|
110 |
|
111 |
+
demo = gr.Interface(
|
112 |
+
fn=analyze_combined,
|
113 |
+
inputs=[
|
114 |
+
gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
|
115 |
+
gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold"),
|
116 |
+
gr.Image(type="pil", label="Upload Face Photo")
|
117 |
+
],
|
118 |
+
outputs=[
|
119 |
+
gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
|
120 |
+
gr.Textbox(label="Detected Photo Emotion", elem_classes=["output-textbox"])
|
121 |
+
],
|
122 |
+
title="๐ฅฐ Multi-Modal Emotion Detector",
|
123 |
+
description="Analyze emotion from both text and a facial photo. Adjust the threshold for text emotion sensitivity.",
|
124 |
+
theme="default",
|
125 |
+
css=custom_css
|
126 |
+
)
|
127 |
|
128 |
demo.launch()
|