fajarah commited on
Commit
9f386ff
ยท
verified ยท
1 Parent(s): e204797

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -71
app.py CHANGED
@@ -1,87 +1,128 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- from PIL import Image
4
  import torch
5
- from torchvision import transforms
6
- from torchvision.models import resnet50
7
- import torch.nn.functional as F
8
-
9
- # Load text emotion detection pipeline
10
- text_emotion_pipeline = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=3)
11
 
12
- # Load image emotion detection model
13
- image_model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=False)
14
- image_model.fc = torch.nn.Linear(image_model.fc.in_features, 7)
15
- image_model.load_state_dict(torch.hub.load_state_dict_from_url(
16
- 'https://huggingface.co/Celal11/resnet-50-finetuned-FER2013-0.001/resolve/main/pytorch_model.bin',
17
- map_location=torch.device('cpu')
18
- ))
19
- image_model.eval()
20
 
21
- image_emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
 
 
 
22
 
23
- transform = transforms.Compose([
24
- transforms.Resize((224, 224)),
25
- transforms.ToTensor()
26
- ])
 
 
 
 
 
 
 
 
27
 
28
- # Predefined reflections (simplified mapping)
29
- spiritual_reflections = {
30
- "Sadness": "For indeed, with hardship [will be] ease. (Qurโ€™an 94:6)",
31
- "Joy": "Say, โ€˜In the bounty of Allah and in His mercy โ€“ in that let them rejoice.โ€™ (Qurโ€™an 10:58)",
32
- "Fear": "And whoever fears Allah โ€“ He will make for him a way out. (Qurโ€™an 65:2)",
33
- "Anger": "Those who restrain anger and who pardon the people โ€“ Allah loves the doers of good. (Qurโ€™an 3:134)",
34
- "Surprise": "They plan, and Allah plans. Surely, Allah is the best of planners. (Qurโ€™an 8:30)",
35
- "Disgust": "Indeed, the most noble of you in the sight of Allah is the most righteous. (Qurโ€™an 49:13)",
36
- "Neutral": "Verily, in the remembrance of Allah do hearts find rest. (Qurโ€™an 13:28)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  }
38
 
39
- # Text input handler
40
- def analyze_text_emotion(text):
41
- emotions = text_emotion_pipeline(text)
42
- top_emotion = emotions[0]["label"]
43
- reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qurโ€™an 2:153)")
44
- return top_emotion, reflection
45
-
46
- # Image input handler
47
- def analyze_image_emotion(image):
48
- img_tensor = transform(image).unsqueeze(0)
49
  with torch.no_grad():
50
- logits = image_model(img_tensor)
51
- probs = F.softmax(logits, dim=1)[0]
52
- top_idx = torch.argmax(probs).item()
53
- top_emotion = image_emotions[top_idx]
54
- reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qurโ€™an 2:153)")
55
- return top_emotion, reflection
56
 
57
- # Gradio UI
58
- with gr.Blocks() as demo:
59
- gr.Markdown("## ๐Ÿง  EmotionLens โ€” AI-Powered Emotional Intelligence with Islamic Wisdom")
60
- with gr.Tabs():
61
- with gr.TabItem("Single Input"):
62
- with gr.Row():
63
- text_input = gr.Textbox(lines=2, placeholder="Enter text here...")
64
- img_input = gr.Image(type="pil")
65
 
66
- with gr.Row():
67
- submit_btn = gr.Button("Submit")
68
 
69
- with gr.Column():
70
- emotion_output = gr.Textbox(label="Emotion Detection")
71
- reflection_output = gr.Textbox(label="Spiritual Reflection")
 
 
72
 
73
- def combined_handler(text, img):
74
- if text:
75
- return analyze_text_emotion(text)
76
- elif img:
77
- return analyze_image_emotion(img)
78
- else:
79
- return "No input", "Please provide text or image."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
- submit_btn.click(
82
- fn=combined_handler,
83
- inputs=[text_input, img_input],
84
- outputs=[emotion_output, reflection_output]
85
- )
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
3
+ from torch.nn.functional import sigmoid
4
  import torch
5
+ from PIL import Image
 
 
 
 
 
6
 
7
+ # Load text emotion model
8
+ model_name = "SamLowe/roberta-base-go_emotions"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
 
 
 
 
11
 
12
+ # Load image emotion model
13
+ image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
14
+ image_processor = AutoImageProcessor.from_pretrained(image_model_name)
15
+ image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
16
 
17
+ # Analyze image emotion using processor and model
18
+ def analyze_image_emotion(image):
19
+ if image is None:
20
+ return "No image provided."
21
+ inputs = image_processor(images=image, return_tensors="pt")
22
+ with torch.no_grad():
23
+ logits = image_model(**inputs).logits
24
+ probs = torch.nn.functional.softmax(logits, dim=1)[0]
25
+ pred_idx = torch.argmax(probs).item()
26
+ label = image_model.config.id2label[pred_idx]
27
+ score = probs[pred_idx].item()
28
+ return f"{label} ({score:.2f})"
29
 
30
+ # Emotion label to icon mapping (subset)
31
+ emotion_icons = {
32
+ "admiration": "๐Ÿ˜",
33
+ "amusement": "๐Ÿ˜…",
34
+ "anger": "๐Ÿ˜ก",
35
+ "annoyance": "๐Ÿ˜‘",
36
+ "approval": "๐Ÿ‘",
37
+ "caring": "๐Ÿ’—",
38
+ "confusion": "๐Ÿค”",
39
+ "curiosity": "๐Ÿ˜ฎ",
40
+ "desire": "๐Ÿคค",
41
+ "disappointment": "๐Ÿ˜ž",
42
+ "disapproval": "๐Ÿ‘Ž",
43
+ "disgust": "๐Ÿคฎ",
44
+ "embarrassment": "๐Ÿ˜ณ",
45
+ "excitement": "๐ŸŽ‰",
46
+ "fear": "๐Ÿ˜ฑ",
47
+ "gratitude": "๐Ÿ™",
48
+ "grief": "๐Ÿ˜ญ",
49
+ "joy": "๐Ÿ˜ƒ",
50
+ "love": "โค๏ธ",
51
+ "nervousness": "๐Ÿคง",
52
+ "optimism": "๐Ÿ˜Š",
53
+ "pride": "๐Ÿ˜Ž",
54
+ "realization": "๐Ÿคฏ",
55
+ "relief": "๐Ÿ˜Œ",
56
+ "remorse": "๐Ÿ˜”",
57
+ "sadness": "๐Ÿ˜ข",
58
+ "surprise": "๐Ÿ˜ฒ",
59
+ "neutral": "๐Ÿ˜"
60
  }
61
 
62
+ # Analyze text emotion
63
+ def get_emotions(text, threshold):
64
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
 
 
 
 
 
 
 
65
  with torch.no_grad():
66
+ logits = model(**inputs).logits
67
+ probs = sigmoid(logits)[0]
 
 
 
 
68
 
69
+ labels = [model.config.id2label[i] for i, p in enumerate(probs) if p > threshold]
70
+ icons = [emotion_icons.get(label, '') + ' ' + label.capitalize() + f" ({probs[i]:.2f})" for i, label in enumerate(labels)]
 
 
 
 
 
 
71
 
72
+ return ", ".join(icons) if icons else "No strong emotion detected."
 
73
 
74
+ # Combined analysis
75
+ def analyze_combined(text, threshold, image):
76
+ text_result = get_emotions(text, threshold)
77
+ image_result = analyze_image_emotion(image)
78
+ return text_result, image_result
79
 
80
+ # Gradio UI
81
+ custom_css = """
82
+ body {
83
+ background: linear-gradient(to right, #f9f9f9, #d4ecff);
84
+ font-family: 'Segoe UI', sans-serif;
85
+ }
86
+ .gr-button {
87
+ background-color: #007BFF !important;
88
+ color: white !important;
89
+ border-radius: 8px !important;
90
+ font-weight: bold;
91
+ }
92
+ .gr-button:hover {
93
+ background-color: #0056b3 !important;
94
+ }
95
+ .gr-textbox {
96
+ border-radius: 8px !important;
97
+ border: 1px solid #ccc !important;
98
+ padding: 10px !important;
99
+ }
100
+ .output-textbox {
101
+ font-size: 1.5rem;
102
+ font-weight: bold;
103
+ color: #333;
104
+ background-color: #f1f9ff;
105
+ border-radius: 8px;
106
+ padding: 10px;
107
+ border: 1px solid #007BFF;
108
+ }
109
+ """
110
 
111
+ demo = gr.Interface(
112
+ fn=analyze_combined,
113
+ inputs=[
114
+ gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
115
+ gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold"),
116
+ gr.Image(type="pil", label="Upload Face Photo")
117
+ ],
118
+ outputs=[
119
+ gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
120
+ gr.Textbox(label="Detected Photo Emotion", elem_classes=["output-textbox"])
121
+ ],
122
+ title="๐Ÿฅฐ Multi-Modal Emotion Detector",
123
+ description="Analyze emotion from both text and a facial photo. Adjust the threshold for text emotion sensitivity.",
124
+ theme="default",
125
+ css=custom_css
126
+ )
127
 
128
  demo.launch()