fajarah commited on
Commit
1c3cba7
·
verified ·
1 Parent(s): 04db36e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -72
app.py CHANGED
@@ -1,86 +1,87 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
3
- from torch.nn.functional import sigmoid
4
- import torch
5
  from PIL import Image
 
 
 
 
6
 
7
- # Load models
8
- text_model_name = "SamLowe/roberta-base-go_emotions"
9
- tokenizer = AutoTokenizer.from_pretrained(text_model_name)
10
- text_model = AutoModelForSequenceClassification.from_pretrained(text_model_name)
11
 
12
- image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
13
- image_processor = AutoImageProcessor.from_pretrained(image_model_name)
14
- image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
 
 
 
 
 
15
 
16
- # Analyze function
17
- def analyze(text, threshold, image):
18
- result_html = ""
19
 
20
- if text:
21
- inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
22
- with torch.no_grad():
23
- logits = text_model(**inputs).logits
24
- probs = sigmoid(logits)[0]
25
- top = torch.topk(probs, k=3)
26
- top_emotions = [f"<li>{text_model.config.id2label[i]} ({probs[i]:.2f})</li>" for i in top.indices]
27
- result_html += f"<div class='notion-card fade-in slide-up'><h3>📝 Text Emotion</h3><ul>{''.join(top_emotions)}</ul></div>"
28
 
29
- if image:
30
- inputs_image = image_processor(images=image, return_tensors="pt")
31
- with torch.no_grad():
32
- logits_img = image_model(**inputs_image).logits
33
- probs_img = torch.nn.functional.softmax(logits_img, dim=1)[0]
34
- img_idx = torch.argmax(probs_img).item()
35
- img_label = image_model.config.id2label[img_idx]
36
- confidence = probs_img[img_idx].item()
37
- result_html += f"<div class='notion-card fade-in slide-up'><h3>🖼️ Image Emotion</h3><p>{img_label} ({confidence:.2f})</p></div>"
 
38
 
39
- return result_html or "<div class='notion-card fade-in'><p>No input provided.</p></div>"
 
 
 
 
 
40
 
41
- # CSS
42
- custom_css = """
43
- @keyframes fadeInPop {
44
- 0% { opacity: 0; transform: scale(0.95); }
45
- 100% { opacity: 1; transform: scale(1); }
46
- }
47
- .fade-in {
48
- animation: fadeInPop 0.6s ease-out both;
49
- }
50
- .slide-up {
51
- animation: slideInUp 0.6s ease-out both;
52
- }
53
- @keyframes slideInUp {
54
- from { transform: translateY(20px); opacity: 0; }
55
- to { transform: translateY(0); opacity: 1; }
56
- }
57
- .notion-card {
58
- background: white;
59
- border-radius: 12px;
60
- border: 1px solid #e5e7eb;
61
- padding: 16px;
62
- margin: 16px auto;
63
- box-shadow: 0 6px 20px rgba(0,0,0,0.05);
64
- max-width: 600px;
65
- }
66
- body {
67
- background: #f9fafb;
68
- font-family: 'Inter', sans-serif;
69
- }
70
- """
71
 
72
- # UI
73
- with gr.Blocks(css=custom_css) as demo:
74
- gr.Markdown("# 🧠 EmotionLens")
75
- gr.Markdown("Detect emotion from text and face image.")
76
 
77
- with gr.Row():
78
- text_input = gr.Textbox(label="Your Text", lines=3, placeholder="How do you feel?")
79
- image_input = gr.Image(type="pil", label="Upload Face Photo")
80
- threshold_slider = gr.Slider(0.1, 0.9, value=0.3, label="Threshold")
81
- analyze_btn = gr.Button("Analyze")
82
- output = gr.HTML()
 
83
 
84
- analyze_btn.click(fn=analyze, inputs=[text_input, threshold_slider, image_input], outputs=output)
 
 
 
 
85
 
86
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
3
  from PIL import Image
4
+ import torch
5
+ from torchvision import transforms
6
+ from torchvision.models import resnet50
7
+ import torch.nn.functional as F
8
 
9
+ # Load text emotion detection pipeline
10
+ text_emotion_pipeline = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=3)
 
 
11
 
12
+ # Load image emotion detection model
13
+ image_model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=False)
14
+ image_model.fc = torch.nn.Linear(image_model.fc.in_features, 7)
15
+ image_model.load_state_dict(torch.hub.load_state_dict_from_url(
16
+ 'https://huggingface.co/Celal11/resnet-50-finetuned-FER2013-0.001/resolve/main/pytorch_model.bin',
17
+ map_location=torch.device('cpu')
18
+ ))
19
+ image_model.eval()
20
 
21
+ image_emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
 
 
22
 
23
+ transform = transforms.Compose([
24
+ transforms.Resize((224, 224)),
25
+ transforms.ToTensor()
26
+ ])
 
 
 
 
27
 
28
+ # Predefined reflections (simplified mapping)
29
+ spiritual_reflections = {
30
+ "Sadness": "For indeed, with hardship [will be] ease. (Qur’an 94:6)",
31
+ "Joy": "Say, ‘In the bounty of Allah and in His mercy – in that let them rejoice.’ (Qur’an 10:58)",
32
+ "Fear": "And whoever fears Allah – He will make for him a way out. (Qur’an 65:2)",
33
+ "Anger": "Those who restrain anger and who pardon the people – Allah loves the doers of good. (Qur’an 3:134)",
34
+ "Surprise": "They plan, and Allah plans. Surely, Allah is the best of planners. (Qur’an 8:30)",
35
+ "Disgust": "Indeed, the most noble of you in the sight of Allah is the most righteous. (Qur’an 49:13)",
36
+ "Neutral": "Verily, in the remembrance of Allah do hearts find rest. (Qur’an 13:28)"
37
+ }
38
 
39
+ # Text input handler
40
+ def analyze_text_emotion(text):
41
+ emotions = text_emotion_pipeline(text)
42
+ top_emotion = emotions[0]["label"]
43
+ reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qur’an 2:153)")
44
+ return top_emotion, reflection
45
 
46
+ # Image input handler
47
+ def analyze_image_emotion(image):
48
+ img_tensor = transform(image).unsqueeze(0)
49
+ with torch.no_grad():
50
+ logits = image_model(img_tensor)
51
+ probs = F.softmax(logits, dim=1)[0]
52
+ top_idx = torch.argmax(probs).item()
53
+ top_emotion = image_emotions[top_idx]
54
+ reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qur’an 2:153)")
55
+ return top_emotion, reflection
56
+
57
+ # Gradio UI
58
+ with gr.Blocks() as demo:
59
+ gr.Markdown("## 🧠 EmotionLens AI-Powered Emotional Intelligence with Islamic Wisdom")
60
+ with gr.Tabs():
61
+ with gr.TabItem("Single Input"):
62
+ with gr.Row():
63
+ text_input = gr.Textbox(lines=2, placeholder="Enter text here...")
64
+ img_input = gr.Image(type="pil")
65
+
66
+ with gr.Row():
67
+ submit_btn = gr.Button("Submit")
 
 
 
 
 
 
 
 
68
 
69
+ with gr.Column():
70
+ emotion_output = gr.Textbox(label="Emotion Detection")
71
+ reflection_output = gr.Textbox(label="Spiritual Reflection")
 
72
 
73
+ def combined_handler(text, img):
74
+ if text:
75
+ return analyze_text_emotion(text)
76
+ elif img:
77
+ return analyze_image_emotion(img)
78
+ else:
79
+ return "No input", "Please provide text or image."
80
 
81
+ submit_btn.click(
82
+ fn=combined_handler,
83
+ inputs=[text_input, img_input],
84
+ outputs=[emotion_output, reflection_output]
85
+ )
86
 
87
  demo.launch()