Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
from torch.nn.functional import sigmoid
|
4 |
import torch
|
5 |
|
6 |
-
# Load
|
7 |
model_name = "SamLowe/roberta-base-go_emotions"
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
10 |
|
|
|
|
|
|
|
11 |
# Emotion label to icon mapping (subset)
|
12 |
emotion_icons = {
|
13 |
"admiration": "π",
|
@@ -40,7 +43,7 @@ emotion_icons = {
|
|
40 |
"neutral": "π"
|
41 |
}
|
42 |
|
43 |
-
#
|
44 |
def get_emotions(text, threshold):
|
45 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
46 |
with torch.no_grad():
|
@@ -52,6 +55,20 @@ def get_emotions(text, threshold):
|
|
52 |
|
53 |
return ", ".join(icons) if icons else "No strong emotion detected."
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
# Gradio UI
|
56 |
custom_css = """
|
57 |
body {
|
@@ -84,14 +101,18 @@ body {
|
|
84 |
"""
|
85 |
|
86 |
demo = gr.Interface(
|
87 |
-
fn=
|
88 |
inputs=[
|
89 |
gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
|
90 |
-
gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold")
|
|
|
|
|
|
|
|
|
|
|
91 |
],
|
92 |
-
|
93 |
-
|
94 |
-
description="Enter a sentence or paragraph to detect multiple emotions present in the text. Adjust the threshold to be more or less sensitive.",
|
95 |
theme="default",
|
96 |
css=custom_css
|
97 |
)
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
3 |
from torch.nn.functional import sigmoid
|
4 |
import torch
|
5 |
|
6 |
+
# Load text emotion model
|
7 |
model_name = "SamLowe/roberta-base-go_emotions"
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
10 |
|
11 |
+
# Load image emotion classification pipeline
|
12 |
+
image_emotion_pipeline = pipeline("image-classification", model="nateraw/ferplus-emo-resnet34")
|
13 |
+
|
14 |
# Emotion label to icon mapping (subset)
|
15 |
emotion_icons = {
|
16 |
"admiration": "π",
|
|
|
43 |
"neutral": "π"
|
44 |
}
|
45 |
|
46 |
+
# Analyze text emotion
|
47 |
def get_emotions(text, threshold):
|
48 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
49 |
with torch.no_grad():
|
|
|
55 |
|
56 |
return ", ".join(icons) if icons else "No strong emotion detected."
|
57 |
|
58 |
+
# Analyze image emotion
|
59 |
+
def analyze_image_emotion(image):
|
60 |
+
if image is None:
|
61 |
+
return "No image provided."
|
62 |
+
results = image_emotion_pipeline(image)
|
63 |
+
top = results[0]
|
64 |
+
return f"{top['label']} ({top['score']:.2f})"
|
65 |
+
|
66 |
+
# Combined analysis
|
67 |
+
def analyze_combined(text, threshold, image):
|
68 |
+
text_result = get_emotions(text, threshold)
|
69 |
+
image_result = analyze_image_emotion(image)
|
70 |
+
return text_result, image_result
|
71 |
+
|
72 |
# Gradio UI
|
73 |
custom_css = """
|
74 |
body {
|
|
|
101 |
"""
|
102 |
|
103 |
demo = gr.Interface(
|
104 |
+
fn=analyze_combined,
|
105 |
inputs=[
|
106 |
gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
|
107 |
+
gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold"),
|
108 |
+
gr.Image(type="filepath", label="Upload Face Photo")
|
109 |
+
],
|
110 |
+
outputs=[
|
111 |
+
gr.Textbox(label="Detected Text Emotions", elem_classes=["output-textbox"]),
|
112 |
+
gr.Textbox(label="Detected Photo Emotion", elem_classes=["output-textbox"])
|
113 |
],
|
114 |
+
title="π₯° Multi-Modal Emotion Detector",
|
115 |
+
description="Analyze emotion from both text and a facial photo. Adjust the threshold for text emotion sensitivity.",
|
|
|
116 |
theme="default",
|
117 |
css=custom_css
|
118 |
)
|