Spaces:
Build error
Build error
import gradio as gr | |
from transformers import pipeline | |
from deepface import DeepFace | |
import cv2 | |
import numpy as np | |
import tempfile | |
import moviepy.editor as mp | |
# Load Text Sentiment Model | |
sentiment_pipeline = pipeline("sentiment-analysis") | |
# 1. Text Sentiment Analysis | |
def analyze_text(text): | |
result = sentiment_pipeline(text)[0] | |
return f"{result['label']} ({result['score']*100:.2f}%)" | |
# 2. Face Emotion Detection | |
def analyze_face(image): | |
try: | |
analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False) | |
emotion = analysis[0]['dominant_emotion'] | |
return f"Detected Emotion: {emotion}" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# 3. Video Emotion Detection | |
def analyze_video(video_file): | |
temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
with open(temp_video_path, "wb") as f: | |
f.write(video_file.read()) | |
clip = mp.VideoFileClip(temp_video_path) | |
frame = clip.get_frame(clip.duration / 2) # Take middle frame | |
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
try: | |
analysis = DeepFace.analyze(frame_rgb, actions=['emotion'], enforce_detection=False) | |
emotion = analysis[0]['dominant_emotion'] | |
return f"Detected Emotion in Video: {emotion}" | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Gradio Interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# 🎯 Deep Learning Sentiment & Emotion Analyzer") | |
gr.Markdown("Analyze **Text**, **Face Image**, or **Video**!") | |
with gr.Tabs(): | |
with gr.TabItem("Text Sentiment"): | |
text_input = gr.Textbox(label="Enter Text") | |
text_output = gr.Label() | |
text_button = gr.Button("Analyze Text") | |
text_button.click(analyze_text, inputs=text_input, outputs=text_output) | |
with gr.TabItem("Face Emotion (Image)"): | |
image_input = gr.Image(type="numpy", label="Upload Face Image") | |
image_output = gr.Label() | |
image_button = gr.Button("Analyze Face Emotion") | |
image_button.click(analyze_face, inputs=image_input, outputs=image_output) | |
with gr.TabItem("Video Emotion"): | |
video_input = gr.File(label="Upload Video (.mp4)") | |
video_output = gr.Label() | |
video_button = gr.Button("Analyze Video Emotion") | |
video_button.click(analyze_video, inputs=video_input, outputs=video_output) | |
demo.launch() | |