ameliabb0913 commited on
Commit
bcf2d8f
Β·
verified Β·
1 Parent(s): aba0045

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -11
app.py CHANGED
@@ -58,15 +58,29 @@ def classify_emotion(audio_file):
58
  return f"Predicted Emotion: {predicted_emotion} {emoji}"
59
 
60
 
61
- # Gradio Interface
62
- interface = gr.Interface(
63
- fn=classify_emotion,
64
- inputs=gr.Audio(type="filepath"),
65
- outputs="text",
66
- title="Speech Emotion Classifier 🎭",
67
- description="Upload an audio file and the model will classify its emotion (Neutral, Happy, Sad, Angry, Fearful, Disgust, Surprised)."
68
- )
69
-
70
- # Launch the app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  if __name__ == "__main__":
72
- interface.launch()
 
58
  return f"Predicted Emotion: {predicted_emotion} {emoji}"
59
 
60
 
61
+ # ===== Redesigned Gradio UI with Blocks =====
62
+ with gr.Blocks() as demo:
63
+ gr.Markdown("""
64
+ # 🎧 Speak Your Emotion | AI Emotion Detector
65
+
66
+ Upload a voice recording or speak directly into your mic.
67
+ This AI model will guess what **emotion** you're expressing.
68
+
69
+ βœ… Great for:
70
+ - Practicing emotional tone in speech
71
+ - Testing AI capabilities
72
+ - Language and drama learners
73
+
74
+ _Emotions detected_: 😐 Neutral, 😊 Happy, 😒 Sad, 😠 Angry, 😨 Fearful, 🀒 Disgusted, 😲 Surprised
75
+ """)
76
+
77
+ with gr.Row():
78
+ audio_input = gr.Audio(source="microphone", type="filepath", label="🎀 Upload or Speak")
79
+ result = gr.Textbox(label="🧠 AI Detected Emotion")
80
+
81
+ submit_btn = gr.Button("🎯 Analyze Emotion")
82
+ submit_btn.click(fn=classify_emotion, inputs=[audio_input], outputs=[result])
83
+
84
+ # Launch!
85
  if __name__ == "__main__":
86
+ demo.launch()