File size: 2,460 Bytes
b832def
37aa19b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b832def
37aa19b
 
 
 
 
 
5152cc0
37aa19b
 
 
 
 
5152cc0
37aa19b
 
 
 
 
5152cc0
37aa19b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
from transformers import pipeline
from deepface import DeepFace
import cv2
import numpy as np
import tempfile
import moviepy.editor as mp

# Load Text Sentiment Model
sentiment_pipeline = pipeline("sentiment-analysis")

# 1. Text Sentiment Analysis
def analyze_text(text):
    result = sentiment_pipeline(text)[0]
    return f"{result['label']} ({result['score']*100:.2f}%)"

# 2. Face Emotion Detection
def analyze_face(image):
    try:
        analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
        emotion = analysis[0]['dominant_emotion']
        return f"Detected Emotion: {emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# 3. Video Emotion Detection
def analyze_video(video_file):
    temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    with open(temp_video_path, "wb") as f:
        f.write(video_file.read())

    clip = mp.VideoFileClip(temp_video_path)
    frame = clip.get_frame(clip.duration / 2)  # Take middle frame
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    try:
        analysis = DeepFace.analyze(frame_rgb, actions=['emotion'], enforce_detection=False)
        emotion = analysis[0]['dominant_emotion']
        return f"Detected Emotion in Video: {emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🎯 Deep Learning Sentiment & Emotion Analyzer")
    gr.Markdown("Analyze **Text**, **Face Image**, or **Video**!")

    with gr.Tabs():
        with gr.TabItem("Text Sentiment"):
            text_input = gr.Textbox(label="Enter Text")
            text_output = gr.Label()
            text_button = gr.Button("Analyze Text")
            text_button.click(analyze_text, inputs=text_input, outputs=text_output)

        with gr.TabItem("Face Emotion (Image)"):
            image_input = gr.Image(type="numpy", label="Upload Face Image")
            image_output = gr.Label()
            image_button = gr.Button("Analyze Face Emotion")
            image_button.click(analyze_face, inputs=image_input, outputs=image_output)

        with gr.TabItem("Video Emotion"):
            video_input = gr.File(label="Upload Video (.mp4)")
            video_output = gr.Label()
            video_button = gr.Button("Analyze Video Emotion")
            video_button.click(analyze_video, inputs=video_input, outputs=video_output)

demo.launch()