File size: 3,547 Bytes
23e46f5
1c3cba7
4aa2d44
1c3cba7
 
 
 
23e46f5
1c3cba7
 
8942940
1c3cba7
 
 
 
 
 
 
 
4aa2d44
1c3cba7
04db36e
1c3cba7
 
 
 
04db36e
1c3cba7
 
 
 
 
 
 
 
 
 
04db36e
1c3cba7
 
 
 
 
 
04db36e
1c3cba7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04db36e
1c3cba7
 
 
04db36e
1c3cba7
 
 
 
 
 
 
04db36e
1c3cba7
 
 
 
 
20d158a
04db36e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import gradio as gr
from transformers import pipeline
from PIL import Image
import torch
from torchvision import transforms
from torchvision.models import resnet50
import torch.nn.functional as F

# Load text emotion detection pipeline
text_emotion_pipeline = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=3)

# Load image emotion detection model
image_model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=False)
image_model.fc = torch.nn.Linear(image_model.fc.in_features, 7)
image_model.load_state_dict(torch.hub.load_state_dict_from_url(
    'https://huggingface.co/Celal11/resnet-50-finetuned-FER2013-0.001/resolve/main/pytorch_model.bin',
    map_location=torch.device('cpu')
))
image_model.eval()

image_emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor()
])

# Predefined reflections (simplified mapping)
spiritual_reflections = {
    "Sadness": "For indeed, with hardship [will be] ease. (Qur’an 94:6)",
    "Joy": "Say, ‘In the bounty of Allah and in His mercy – in that let them rejoice.’ (Qur’an 10:58)",
    "Fear": "And whoever fears Allah – He will make for him a way out. (Qur’an 65:2)",
    "Anger": "Those who restrain anger and who pardon the people – Allah loves the doers of good. (Qur’an 3:134)",
    "Surprise": "They plan, and Allah plans. Surely, Allah is the best of planners. (Qur’an 8:30)",
    "Disgust": "Indeed, the most noble of you in the sight of Allah is the most righteous. (Qur’an 49:13)",
    "Neutral": "Verily, in the remembrance of Allah do hearts find rest. (Qur’an 13:28)"
}

# Text input handler
def analyze_text_emotion(text):
    emotions = text_emotion_pipeline(text)
    top_emotion = emotions[0]["label"]
    reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qur’an 2:153)")
    return top_emotion, reflection

# Image input handler
def analyze_image_emotion(image):
    img_tensor = transform(image).unsqueeze(0)
    with torch.no_grad():
        logits = image_model(img_tensor)
        probs = F.softmax(logits, dim=1)[0]
    top_idx = torch.argmax(probs).item()
    top_emotion = image_emotions[top_idx]
    reflection = spiritual_reflections.get(top_emotion, "Reflect with patience and prayer. (Qur’an 2:153)")
    return top_emotion, reflection

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## 🧠 EmotionLens — AI-Powered Emotional Intelligence with Islamic Wisdom")
    with gr.Tabs():
        with gr.TabItem("Single Input"):
            with gr.Row():
                text_input = gr.Textbox(lines=2, placeholder="Enter text here...")
                img_input = gr.Image(type="pil")

            with gr.Row():
                submit_btn = gr.Button("Submit")

            with gr.Column():
                emotion_output = gr.Textbox(label="Emotion Detection")
                reflection_output = gr.Textbox(label="Spiritual Reflection")

            def combined_handler(text, img):
                if text:
                    return analyze_text_emotion(text)
                elif img:
                    return analyze_image_emotion(img)
                else:
                    return "No input", "Please provide text or image."

            submit_btn.click(
                fn=combined_handler,
                inputs=[text_input, img_input],
                outputs=[emotion_output, reflection_output]
            )

demo.launch()