Update app.py
Browse files
app.py
CHANGED
@@ -58,15 +58,29 @@ def classify_emotion(audio_file):
|
|
58 |
return f"Predicted Emotion: {predicted_emotion} {emoji}"
|
59 |
|
60 |
|
61 |
-
# Gradio
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
if __name__ == "__main__":
|
72 |
-
|
|
|
58 |
return f"Predicted Emotion: {predicted_emotion} {emoji}"
|
59 |
|
60 |
|
61 |
+
# ===== Redesigned Gradio UI with Blocks =====
|
62 |
+
with gr.Blocks() as demo:
|
63 |
+
gr.Markdown("""
|
64 |
+
# π§ Speak Your Emotion | AI Emotion Detector
|
65 |
+
|
66 |
+
Upload a voice recording or speak directly into your mic.
|
67 |
+
This AI model will guess what **emotion** you're expressing.
|
68 |
+
|
69 |
+
β
Great for:
|
70 |
+
- Practicing emotional tone in speech
|
71 |
+
- Testing AI capabilities
|
72 |
+
- Language and drama learners
|
73 |
+
|
74 |
+
_Emotions detected_: π Neutral, π Happy, π’ Sad, π Angry, π¨ Fearful, π€’ Disgusted, π² Surprised
|
75 |
+
""")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
audio_input = gr.Audio(source="microphone", type="filepath", label="π€ Upload or Speak")
|
79 |
+
result = gr.Textbox(label="π§ AI Detected Emotion")
|
80 |
+
|
81 |
+
submit_btn = gr.Button("π― Analyze Emotion")
|
82 |
+
submit_btn.click(fn=classify_emotion, inputs=[audio_input], outputs=[result])
|
83 |
+
|
84 |
+
# Launch!
|
85 |
if __name__ == "__main__":
|
86 |
+
demo.launch()
|