Spaces:
Running
Running
File size: 5,480 Bytes
7eeab7d c743d9c 7eeab7d 76f7973 c743d9c fd16b67 7eeab7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import gradio as gr
import datetime
import Implementation as imp
import time
import os
# ========== Fonctions Backend ==========
def detect_hate_speech(video_path, mode):
if mode == "Low 🌱":
Co2_release = "low"
elif mode == "Medium ♨️":
Co2_release = "medium"
else:
Co2_release = "high"
hate_speech_time, C02_emissions = imp.detectHateSpeechSmartFilter(video_path, Co2_release)
return hate_speech_time, C02_emissions
def detect_hate_speech_audio(audio_path, mode):
if mode == "Low 🌱":
Co2_release = "low"
elif mode == "Medium ♨️":
Co2_release = "medium"
else:
Co2_release = "high"
hate_speech_time, C02_emissions = imp.Detect_hate_speech_emo_hate_bert(audio_path, Co2_release)
return hate_speech_time, C02_emissions
def convertir_timestamp_en_secondes(timestamp):
h, m, s = map(int, timestamp.split(":"))
return h * 3600 + m * 60 + s
def analyser_media(fichier, mode, is_audio=False):
if not fichier:
return "<p style='color:red;'>❌ No files uploaded. Please add video or audio.</p>", ""
if is_audio:
timestamps, carbone = detect_hate_speech_audio(fichier, mode)
else:
timestamps, carbone = detect_hate_speech(fichier, mode)
liens = "<div style='line-height: 2; font-size: 16px;'>"
total_seconds = 0
for idx, (start_time, end_time) in enumerate(timestamps, 1):
secondes = convertir_timestamp_en_secondes(start_time)
liens += f'<button style="margin:5px; padding:8px 12px; border:none; border-radius:8px; background:#2d2d2d; color:white; cursor:pointer;" onclick="var player=document.getElementById(\'video-player\').querySelector(\'video, audio\'); if(player){{player.currentTime={secondes}; player.play();}}">🕑 Segment {idx} : {start_time} ➔ {end_time}</button><br>'
duree_segment = convertir_timestamp_en_secondes(end_time) - secondes
total_seconds += duree_segment
liens += "</div>"
nb_segments = len(timestamps)
duree_totale = str(datetime.timedelta(seconds=total_seconds))
resume = f"<div style='margin-top:20px; font-size:18px;'>🧮 <b>Segments detected</b>: {nb_segments}<br>⏳ <b>Total Hate Speech Duration</b>: {duree_totale} <br>♻️ <b>Carbon Footprint</b>: {carbone}</div>"
return liens, resume
def afficher_pipeline(show):
return gr.update(visible=show)
def show_loader():
return gr.update(visible=True), "", ""
def analyser_avec_loading(video, mode):
liens, resume = analyser_media(video, mode, is_audio=False)
return gr.update(visible=False), liens, resume
def analyser_audio_avec_loading(audio, mode):
liens, resume = analyser_media(audio, mode, is_audio=True)
return gr.update(visible=False), liens, resume
# ========== Interface Gradio ==========
with gr.Blocks(theme=gr.themes.Monochrome(), css="body {background-color: #121212; color: white;}") as demo:
# En-tête
gr.HTML("""
<div style='text-align: center; margin-bottom: 20px;'>
<h1 style='color: #00BFFF;'>🎓 EPFL Project –Emotion & Eco- Aware Hate Speech Detection in Video & Audio</h1>
<h3 style='color: #AAAAAA;'>Participants: Loris Alan Fabbro, Mohammed Al-Hussini, Loic Misenta</h3>
<a href='https://github.com/loris-fab/Deep_learning/blob/main/README.md'
target='_blank' style='color:#00BFFF; font-weight:bold;'>🔗 View on GitHub</a>
</div>
""")
# Affichage du pipeline
with gr.Row():
show_pipeline = gr.Checkbox(label="👀 Show Pipeline Overview", value=False)
pipeline_image = gr.Image(
value="pipeline.png",
label="Pipeline Overview",
show_label=True,
visible=False
)
show_pipeline.change(
afficher_pipeline,
inputs=[show_pipeline],
outputs=[pipeline_image]
)
gr.Markdown("# 🎥 Hate Speech Detector in Your Videos or Audio", elem_id="titre")
with gr.Row():
video_input = gr.Video(label="Upload your video", elem_id="video-player")
with gr.Row():
audio_input = gr.Audio(label="Upload your audio", type="filepath")
with gr.Row():
mode_selection = gr.Radio(["Low 🌱", "Medium ♨️", "High Consumption ⚠️"], label="Carbon Footprint Mode")
bouton_analyse_video = gr.Button("Detect Hate Speech in Video 🔥")
bouton_analyse_audio = gr.Button("Detect Hate Speech in Audio 🎧")
with gr.Column() as resultats:
loading_gif = gr.Image(
value="loading.gif",
visible=False,
show_label=False
)
liens_resultats = gr.HTML()
resume_resultats = gr.HTML()
bouton_analyse_video.click(
fn=show_loader,
inputs=[],
outputs=[loading_gif, liens_resultats, resume_resultats],
show_progress=False
)
bouton_analyse_video.click(
fn=analyser_avec_loading,
inputs=[video_input, mode_selection],
outputs=[loading_gif, liens_resultats, resume_resultats],
show_progress=True
)
bouton_analyse_audio.click(
fn=show_loader,
inputs=[],
outputs=[loading_gif, liens_resultats, resume_resultats],
show_progress=False
)
bouton_analyse_audio.click(
fn=analyser_audio_avec_loading,
inputs=[audio_input, mode_selection],
outputs=[loading_gif, liens_resultats, resume_resultats],
show_progress=True
)
demo.launch(share=True)
|