cs751 / app.py
fajarah's picture
Update app.py
04db36e verified
raw
history blame
3.07 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
from torch.nn.functional import sigmoid
import torch
from PIL import Image
# Load models
text_model_name = "SamLowe/roberta-base-go_emotions"
tokenizer = AutoTokenizer.from_pretrained(text_model_name)
text_model = AutoModelForSequenceClassification.from_pretrained(text_model_name)
image_model_name = "Celal11/resnet-50-finetuned-FER2013-0.001"
image_processor = AutoImageProcessor.from_pretrained(image_model_name)
image_model = AutoModelForImageClassification.from_pretrained(image_model_name)
# Analyze function
def analyze(text, threshold, image):
result_html = ""
if text:
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
logits = text_model(**inputs).logits
probs = sigmoid(logits)[0]
top = torch.topk(probs, k=3)
top_emotions = [f"<li>{text_model.config.id2label[i]} ({probs[i]:.2f})</li>" for i in top.indices]
result_html += f"<div class='notion-card fade-in slide-up'><h3>๐Ÿ“ Text Emotion</h3><ul>{''.join(top_emotions)}</ul></div>"
if image:
inputs_image = image_processor(images=image, return_tensors="pt")
with torch.no_grad():
logits_img = image_model(**inputs_image).logits
probs_img = torch.nn.functional.softmax(logits_img, dim=1)[0]
img_idx = torch.argmax(probs_img).item()
img_label = image_model.config.id2label[img_idx]
confidence = probs_img[img_idx].item()
result_html += f"<div class='notion-card fade-in slide-up'><h3>๐Ÿ–ผ๏ธ Image Emotion</h3><p>{img_label} ({confidence:.2f})</p></div>"
return result_html or "<div class='notion-card fade-in'><p>No input provided.</p></div>"
# CSS
custom_css = """
@keyframes fadeInPop {
0% { opacity: 0; transform: scale(0.95); }
100% { opacity: 1; transform: scale(1); }
}
.fade-in {
animation: fadeInPop 0.6s ease-out both;
}
.slide-up {
animation: slideInUp 0.6s ease-out both;
}
@keyframes slideInUp {
from { transform: translateY(20px); opacity: 0; }
to { transform: translateY(0); opacity: 1; }
}
.notion-card {
background: white;
border-radius: 12px;
border: 1px solid #e5e7eb;
padding: 16px;
margin: 16px auto;
box-shadow: 0 6px 20px rgba(0,0,0,0.05);
max-width: 600px;
}
body {
background: #f9fafb;
font-family: 'Inter', sans-serif;
}
"""
# UI
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("# ๐Ÿง  EmotionLens")
gr.Markdown("Detect emotion from text and face image.")
with gr.Row():
text_input = gr.Textbox(label="Your Text", lines=3, placeholder="How do you feel?")
image_input = gr.Image(type="pil", label="Upload Face Photo")
threshold_slider = gr.Slider(0.1, 0.9, value=0.3, label="Threshold")
analyze_btn = gr.Button("Analyze")
output = gr.HTML()
analyze_btn.click(fn=analyze, inputs=[text_input, threshold_slider, image_input], outputs=output)
demo.launch()