fajarah commited on
Commit
04db36e
·
verified ·
1 Parent(s): 859f167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -32
app.py CHANGED
@@ -4,42 +4,83 @@ from torch.nn.functional import sigmoid
4
  import torch
5
  from PIL import Image
6
 
 
 
 
 
7
 
8
-
9
- # Load text emotion model
10
- model_name = "SamLowe/roberta-base-go_emotions"
11
- tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
13
-
14
- # Load image emotion model
15
  image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
16
  image_processor = AutoImageProcessor.from_pretrained(image_model_name)
17
  image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
18
 
19
- # Analyze image emotion using processor and model
20
- def analyze_image_emotion(image):
21
- if image is None:
22
- return "No image provided."
23
- inputs = image_processor(images=image, return_tensors="pt")
24
-
25
- with torch.no_grad():
26
- logits = image_model(**inputs).logits
27
- probs = torch.nn.functional.softmax(logits, dim=1)[0]
28
- pred_idx = torch.argmax(probs).item()
29
- label = image_model.config.id2label[pred_idx]
30
- score = probs[pred_idx].item()
31
- return f"{label} ({score:.2f})"
32
-
33
- # Emotion label to icon mapping (subset)
34
- emotion_icons = {
35
- inputs=[
36
- gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
37
- gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold"),
38
- gr.Image(type="pil", label="Upload Face Photo")
39
- ],
40
- outputs=[
41
- gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
42
- css=custom_css
 
 
 
 
 
 
 
 
 
43
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- demo.launch()
 
4
  import torch
5
  from PIL import Image
6
 
7
+ # Load models
8
+ text_model_name = "SamLowe/roberta-base-go_emotions"
9
+ tokenizer = AutoTokenizer.from_pretrained(text_model_name)
10
+ text_model = AutoModelForSequenceClassification.from_pretrained(text_model_name)
11
 
 
 
 
 
 
 
 
12
  image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
13
  image_processor = AutoImageProcessor.from_pretrained(image_model_name)
14
  image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
15
 
16
+ # Analyze function
17
+ def analyze(text, threshold, image):
18
+ result_html = ""
19
+
20
+ if text:
21
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
22
+ with torch.no_grad():
23
+ logits = text_model(**inputs).logits
24
+ probs = sigmoid(logits)[0]
25
+ top = torch.topk(probs, k=3)
26
+ top_emotions = [f"<li>{text_model.config.id2label[i]} ({probs[i]:.2f})</li>" for i in top.indices]
27
+ result_html += f"<div class='notion-card fade-in slide-up'><h3>📝 Text Emotion</h3><ul>{''.join(top_emotions)}</ul></div>"
28
+
29
+ if image:
30
+ inputs_image = image_processor(images=image, return_tensors="pt")
31
+ with torch.no_grad():
32
+ logits_img = image_model(**inputs_image).logits
33
+ probs_img = torch.nn.functional.softmax(logits_img, dim=1)[0]
34
+ img_idx = torch.argmax(probs_img).item()
35
+ img_label = image_model.config.id2label[img_idx]
36
+ confidence = probs_img[img_idx].item()
37
+ result_html += f"<div class='notion-card fade-in slide-up'><h3>🖼️ Image Emotion</h3><p>{img_label} ({confidence:.2f})</p></div>"
38
+
39
+ return result_html or "<div class='notion-card fade-in'><p>No input provided.</p></div>"
40
+
41
+ # CSS
42
+ custom_css = """
43
+ @keyframes fadeInPop {
44
+ 0% { opacity: 0; transform: scale(0.95); }
45
+ 100% { opacity: 1; transform: scale(1); }
46
+ }
47
+ .fade-in {
48
+ animation: fadeInPop 0.6s ease-out both;
49
  }
50
+ .slide-up {
51
+ animation: slideInUp 0.6s ease-out both;
52
+ }
53
+ @keyframes slideInUp {
54
+ from { transform: translateY(20px); opacity: 0; }
55
+ to { transform: translateY(0); opacity: 1; }
56
+ }
57
+ .notion-card {
58
+ background: white;
59
+ border-radius: 12px;
60
+ border: 1px solid #e5e7eb;
61
+ padding: 16px;
62
+ margin: 16px auto;
63
+ box-shadow: 0 6px 20px rgba(0,0,0,0.05);
64
+ max-width: 600px;
65
+ }
66
+ body {
67
+ background: #f9fafb;
68
+ font-family: 'Inter', sans-serif;
69
+ }
70
+ """
71
+
72
+ # UI
73
+ with gr.Blocks(css=custom_css) as demo:
74
+ gr.Markdown("# 🧠 EmotionLens")
75
+ gr.Markdown("Detect emotion from text and face image.")
76
+
77
+ with gr.Row():
78
+ text_input = gr.Textbox(label="Your Text", lines=3, placeholder="How do you feel?")
79
+ image_input = gr.Image(type="pil", label="Upload Face Photo")
80
+ threshold_slider = gr.Slider(0.1, 0.9, value=0.3, label="Threshold")
81
+ analyze_btn = gr.Button("Analyze")
82
+ output = gr.HTML()
83
+
84
+ analyze_btn.click(fn=analyze, inputs=[text_input, threshold_slider, image_input], outputs=output)
85
 
86
+ demo.launch()