File size: 3,069 Bytes
23e46f5
49238a6
89891d1
23e46f5
4aa2d44
23e46f5
04db36e
 
 
 
8942940
49238a6
 
 
4aa2d44
04db36e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
859f167
04db36e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20d158a
04db36e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
from torch.nn.functional import sigmoid
import torch
from PIL import Image

# Load models
text_model_name = "SamLowe/roberta-base-go_emotions"
tokenizer = AutoTokenizer.from_pretrained(text_model_name)
text_model = AutoModelForSequenceClassification.from_pretrained(text_model_name)

image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
image_processor = AutoImageProcessor.from_pretrained(image_model_name)
image_model = AutoModelForImageClassification.from_pretrained(image_model_name)

# Analyze function
def analyze(text, threshold, image):
    result_html = ""

    if text:
        inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
        with torch.no_grad():
            logits = text_model(**inputs).logits
        probs = sigmoid(logits)[0]
        top = torch.topk(probs, k=3)
        top_emotions = [f"<li>{text_model.config.id2label[i]} ({probs[i]:.2f})</li>" for i in top.indices]
        result_html += f"<div class='notion-card fade-in slide-up'><h3>📝 Text Emotion</h3><ul>{''.join(top_emotions)}</ul></div>"

    if image:
        inputs_image = image_processor(images=image, return_tensors="pt")
        with torch.no_grad():
            logits_img = image_model(**inputs_image).logits
        probs_img = torch.nn.functional.softmax(logits_img, dim=1)[0]
        img_idx = torch.argmax(probs_img).item()
        img_label = image_model.config.id2label[img_idx]
        confidence = probs_img[img_idx].item()
        result_html += f"<div class='notion-card fade-in slide-up'><h3>🖼️ Image Emotion</h3><p>{img_label} ({confidence:.2f})</p></div>"

    return result_html or "<div class='notion-card fade-in'><p>No input provided.</p></div>"

# CSS
custom_css = """
@keyframes fadeInPop {
  0% { opacity: 0; transform: scale(0.95); }
  100% { opacity: 1; transform: scale(1); }
}
.fade-in {
  animation: fadeInPop 0.6s ease-out both;
}
.slide-up {
  animation: slideInUp 0.6s ease-out both;
}
@keyframes slideInUp {
  from { transform: translateY(20px); opacity: 0; }
  to { transform: translateY(0); opacity: 1; }
}
.notion-card {
  background: white;
  border-radius: 12px;
  border: 1px solid #e5e7eb;
  padding: 16px;
  margin: 16px auto;
  box-shadow: 0 6px 20px rgba(0,0,0,0.05);
  max-width: 600px;
}
body {
  background: #f9fafb;
  font-family: 'Inter', sans-serif;
}
"""

# UI
with gr.Blocks(css=custom_css) as demo:
    gr.Markdown("# 🧠 EmotionLens")
    gr.Markdown("Detect emotion from text and face image.")

    with gr.Row():
        text_input = gr.Textbox(label="Your Text", lines=3, placeholder="How do you feel?")
        image_input = gr.Image(type="pil", label="Upload Face Photo")
    threshold_slider = gr.Slider(0.1, 0.9, value=0.3, label="Threshold")
    analyze_btn = gr.Button("Analyze")
    output = gr.HTML()

    analyze_btn.click(fn=analyze, inputs=[text_input, threshold_slider, image_input], outputs=output)

demo.launch()