fajarah commited on
Commit
69f23c0
ยท
verified ยท
1 Parent(s): 49238a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -11
app.py CHANGED
@@ -14,18 +14,30 @@ image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
14
  image_processor = AutoImageProcessor.from_pretrained(image_model_name)
15
  image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
16
 
 
 
 
 
 
 
 
 
 
 
 
17
  # Analyze image emotion using processor and model
18
  def analyze_image_emotion(image):
19
  if image is None:
20
- return "No image provided."
21
  inputs = image_processor(images=image, return_tensors="pt")
22
  with torch.no_grad():
23
  logits = image_model(**inputs).logits
24
  probs = torch.nn.functional.softmax(logits, dim=1)[0]
25
  pred_idx = torch.argmax(probs).item()
26
- label = image_model.config.id2label[pred_idx]
27
  score = probs[pred_idx].item()
28
- return f"{label} ({score:.2f})"
 
29
 
30
  # Emotion label to icon mapping (subset)
31
  emotion_icons = {
@@ -66,16 +78,22 @@ def get_emotions(text, threshold):
66
  logits = model(**inputs).logits
67
  probs = sigmoid(logits)[0]
68
 
 
 
 
 
 
69
  labels = [model.config.id2label[i] for i, p in enumerate(probs) if p > threshold]
70
  icons = [emotion_icons.get(label, '') + ' ' + label.capitalize() + f" ({probs[i]:.2f})" for i, label in enumerate(labels)]
71
 
72
- return ", ".join(icons) if icons else "No strong emotion detected."
 
73
 
74
  # Combined analysis
75
  def analyze_combined(text, threshold, image):
76
- text_result = get_emotions(text, threshold)
77
- image_result = analyze_image_emotion(image)
78
- return text_result, image_result
79
 
80
  # Gradio UI
81
  custom_css = """
@@ -117,12 +135,14 @@ demo = gr.Interface(
117
  ],
118
  outputs=[
119
  gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
120
- gr.Textbox(label="Detected Photo Emotion", elem_classes=["output-textbox"])
 
 
121
  ],
122
- title="๐Ÿฅฐ Multi-Modal Emotion Detector",
123
- description="Analyze emotion from both text and a facial photo. Adjust the threshold for text emotion sensitivity.",
124
  theme="default",
125
  css=custom_css
126
  )
127
 
128
- demo.launch()
 
14
  image_processor = AutoImageProcessor.from_pretrained(image_model_name)
15
  image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
16
 
17
+ # Islamic advice mapping based on emotion
18
+ islamic_advice = {
19
+ "sadness": "\"Do not grieve; indeed Allah is with us.\"\n\n**Arabic:** ู„ูŽุง ุชูŽุญู’ุฒูŽู†ู’ ุฅูู†ู‘ูŽ ุงู„ู„ู‘ูŽู‡ูŽ ู…ูŽุนูŽู†ูŽุง\n(Qur'an, At-Tawbah 9:40)",
20
+ "joy": "\"If you are grateful, I will surely increase your favor.\"\n\n**Arabic:** ู„ูŽุฆูู† ุดูŽูƒูŽุฑู’ุชูู…ู’ ู„ูŽุฃูŽุฒููŠุฏูŽู†ู‘ูŽูƒูู…ู’\n(Qur'an, Ibrahim 14:7)",
21
+ "fear": "\"Sufficient for us is Allah, and [He is] the best Disposer of affairs.\"\n\n**Arabic:** ุญูŽุณู’ุจูู†ูŽุง ุงู„ู„ู‘ูŽู‡ู ูˆูŽู†ูุนู’ู…ูŽ ุงู„ู’ูˆูŽูƒููŠู„ู\n(Qur'an, Al Imran 3:173)",
22
+ "anger": "\"The strong is not the one who overcomes people by his strength, but the strong is the one who controls himself while in anger.\"\n\n(Hadith, Sahih al-Bukhari 6114)",
23
+ "confusion": "\"Seek help through patience and prayer.\"\n\n**Arabic:** ูˆูŽุงุณู’ุชูŽุนููŠู†ููˆุง ุจูุงู„ุตู‘ูŽุจู’ุฑู ูˆูŽุงู„ุตู‘ูŽู„ูŽุงุฉู\n(Qur'an, Al-Baqarah 2:45)",
24
+ "love": "\"Indeed, those who have believed and done righteous deeds - the Most Merciful will appoint for them affection.\"\n\n**Arabic:** ุณูŽูŠูŽุฌู’ุนูŽู„ู ู„ูŽู‡ูู…ู ุงู„ุฑู‘ูŽุญู’ู…ูŽูฐู†ู ูˆูุฏู‘ู‹ุง\n(Qur'an, Maryam 19:96)",
25
+ "neutral": "May Allah always guide your heart in every situation."
26
+ }
27
+
28
  # Analyze image emotion using processor and model
29
  def analyze_image_emotion(image):
30
  if image is None:
31
+ return "No image provided.", ""
32
  inputs = image_processor(images=image, return_tensors="pt")
33
  with torch.no_grad():
34
  logits = image_model(**inputs).logits
35
  probs = torch.nn.functional.softmax(logits, dim=1)[0]
36
  pred_idx = torch.argmax(probs).item()
37
+ label = image_model.config.id2label[pred_idx].lower()
38
  score = probs[pred_idx].item()
39
+ islamic = islamic_advice.get(label, "May Allah always guide your heart.")
40
+ return f"{label.capitalize()} ({score:.2f})", islamic
41
 
42
  # Emotion label to icon mapping (subset)
43
  emotion_icons = {
 
78
  logits = model(**inputs).logits
79
  probs = sigmoid(logits)[0]
80
 
81
+ top_idx = torch.argmax(probs).item()
82
+ top_label = model.config.id2label[top_idx].lower()
83
+ top_score = probs[top_idx].item()
84
+ islamic = islamic_advice.get(top_label, "May Allah always guide your heart.")
85
+
86
  labels = [model.config.id2label[i] for i, p in enumerate(probs) if p > threshold]
87
  icons = [emotion_icons.get(label, '') + ' ' + label.capitalize() + f" ({probs[i]:.2f})" for i, label in enumerate(labels)]
88
 
89
+ result = ", ".join(icons) if icons else "No strong emotion detected."
90
+ return result, islamic
91
 
92
  # Combined analysis
93
  def analyze_combined(text, threshold, image):
94
+ text_result, text_tip = get_emotions(text, threshold)
95
+ image_result, image_tip = analyze_image_emotion(image)
96
+ return text_result, text_tip, image_result, image_tip
97
 
98
  # Gradio UI
99
  custom_css = """
 
135
  ],
136
  outputs=[
137
  gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
138
+ gr.Textbox(label="Quranic/Hadith Advice (Text)", elem_classes=["output-textbox"]),
139
+ gr.Textbox(label="Detected Photo Emotion", elem_classes=["output-textbox"]),
140
+ gr.Textbox(label="Quranic/Hadith Advice (Image)", elem_classes=["output-textbox"])
141
  ],
142
+ title="๐Ÿฅฐ Multi-Modal Emotion Detector with Islamic Insight",
143
+ description="Analyze emotion from both text and a facial photo. Then receive inspirational Islamic advice based on your mood.",
144
  theme="default",
145
  css=custom_css
146
  )
147
 
148
+ demo.launch()