File size: 2,181 Bytes
c00e8ff
 
d1d98e8
 
 
 
 
c00e8ff
d1d98e8
c00e8ff
 
 
8505c64
 
d1d98e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from textblob import TextBlob
from deepface import DeepFace
import moviepy.editor as mp
import cv2
import tempfile
import os

def analyze_text(text):
    blob = TextBlob(text)
    polarity = blob.sentiment.polarity
    sentiment = "Positive" if polarity > 0 else "Negative" if polarity < 0 else "Neutral"
    return f"Text Sentiment: {sentiment} (Polarity: {polarity:.2f}
    
def analyze_image(image):
    try:
        result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Detected Emotion: {dominant_emotion}"
    except Exception as e:
        return f"Error: {str(e)}"
def analyze_video(video_file):
    try:
        tmpdir = tempfile.mkdtemp()
        clip = mp.VideoFileClip(video_file)
        frame = clip.get_frame(clip.duration / 2)
        frame_path = os.path.join(tmpdir, "frame.jpg")
        cv2.imwrite(frame_path, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        result = DeepFace.analyze(frame_path, actions=['emotion'], enforce_detection=False)
        dominant_emotion = result[0]['dominant_emotion']
        return f"Video Emotion: {dominant_emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

with gr.Blocks() as demo:
    gr.Markdown("# 🧠 Emotion and Sentiment Analyzer")
    with gr.Tab("Text Analysis"):
        text_input = gr.Textbox(label="Enter Text")
        text_output = gr.Textbox(label="Sentiment Result")
        text_btn = gr.Button("Analyze Text")
        text_btn.click(analyze_text, inputs=text_input, outputs=text_output)

    with gr.Tab("Image Analysis"):
        img_input = gr.Image(type="filepath", label="Upload Face Image")
        img_output = gr.Textbox(label="Emotion Result")
        img_btn = gr.Button("Analyze Image")
        img_btn.click(analyze_image, inputs=img_input, outputs=img_output)

    with gr.Tab("Video Analysis"):
        video_input = gr.Video(label="Upload Face Video")
        video_output = gr.Textbox(label="Emotion Result")
        video_btn = gr.Button("Analyze Video")
        video_btn.click(analyze_video, inputs=video_input, outputs=video_output)

demo.launch()