Spaces:
Running
Running
Update requirements.txt
Browse files- requirements.txt +139 -12
requirements.txt
CHANGED
@@ -1,12 +1,139 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import speech_recognition as sr
|
3 |
+
from time import time
|
4 |
+
import threading
|
5 |
+
from pydub import AudioSegment
|
6 |
+
from pydub.playback import play
|
7 |
+
import io
|
8 |
+
|
9 |
+
# Global variables
|
10 |
+
is_recording = False
|
11 |
+
start_beep = AudioSegment.silent(duration=200).append(AudioSegment.from_wav(io.BytesIO(b''), crossfade=100)
|
12 |
+
end_beep = AudioSegment.silent(duration=200).append(AudioSegment.from_wav(io.BytesIO(b'')), crossfade=100)
|
13 |
+
|
14 |
+
def play_start_sound():
|
15 |
+
try:
|
16 |
+
play(start_beep)
|
17 |
+
except:
|
18 |
+
pass
|
19 |
+
|
20 |
+
def play_end_sound():
|
21 |
+
try:
|
22 |
+
play(end_beep)
|
23 |
+
except:
|
24 |
+
pass
|
25 |
+
|
26 |
+
def start_recording(audio_time_limit):
|
27 |
+
global is_recording
|
28 |
+
is_recording = True
|
29 |
+
recognizer = sr.Recognizer()
|
30 |
+
microphone = sr.Microphone()
|
31 |
+
|
32 |
+
play_start_sound()
|
33 |
+
|
34 |
+
with microphone as source:
|
35 |
+
recognizer.adjust_for_ambient_noise(source, duration=0.5)
|
36 |
+
try:
|
37 |
+
audio = recognizer.listen(source, timeout=3, phrase_time_limit=audio_time_limit)
|
38 |
+
text = recognizer.recognize_google(audio)
|
39 |
+
return text
|
40 |
+
except sr.WaitTimeoutError:
|
41 |
+
return ""
|
42 |
+
except sr.UnknownValueError:
|
43 |
+
return ""
|
44 |
+
except Exception as e:
|
45 |
+
print(f"Error: {str(e)}")
|
46 |
+
return ""
|
47 |
+
finally:
|
48 |
+
play_end_sound()
|
49 |
+
is_recording = False
|
50 |
+
|
51 |
+
def transcribe_audio(audio_time_limit=10):
|
52 |
+
def execute_recording():
|
53 |
+
nonlocal result
|
54 |
+
result = start_recording(audio_time_limit)
|
55 |
+
|
56 |
+
result = ""
|
57 |
+
recording_thread = threading.Thread(target=execute_recording)
|
58 |
+
recording_thread.start()
|
59 |
+
|
60 |
+
start_time = time()
|
61 |
+
while is_recording and (time() - start_time) < audio_time_limit:
|
62 |
+
time_elapsed = time() - start_time
|
63 |
+
time_left = max(0, audio_time_limit - time_elapsed)
|
64 |
+
progress = 1 - (time_left / audio_time_limit)
|
65 |
+
yield {"__type__": "update", "value": f"🎤 Recording... {time_left:.1f}s left", "visible": True}, {"__type__": "update", "value": "", "visible": True}
|
66 |
+
gr.sleep(0.1)
|
67 |
+
|
68 |
+
recording_thread.join()
|
69 |
+
yield {"__type__": "update", "value": "✅ Done!", "visible": True}, {"__type__": "update", "value": result, "visible": True}
|
70 |
+
|
71 |
+
def create_ui():
|
72 |
+
css = """
|
73 |
+
.mic-button {
|
74 |
+
background: linear-gradient(45deg, #FF3366, #BA265D) !important;
|
75 |
+
border: none !important;
|
76 |
+
color: white !important;
|
77 |
+
padding: 12px !important;
|
78 |
+
border-radius: 50% !important;
|
79 |
+
height: 50px !important;
|
80 |
+
width: 50px !important;
|
81 |
+
margin-left: 10px !important;
|
82 |
+
}
|
83 |
+
.mic-button:hover {
|
84 |
+
transform: scale(1.05) !important;
|
85 |
+
}
|
86 |
+
.input-with-mic {
|
87 |
+
display: flex !important;
|
88 |
+
align-items: center !important;
|
89 |
+
gap: 10px !important;
|
90 |
+
}
|
91 |
+
.status-message {
|
92 |
+
font-style: italic;
|
93 |
+
color: #666;
|
94 |
+
margin-top: 5px;
|
95 |
+
}
|
96 |
+
"""
|
97 |
+
|
98 |
+
with gr.Blocks(css=css) as demo:
|
99 |
+
gr.Markdown("## 🎤 Speech to Text Converter")
|
100 |
+
|
101 |
+
with gr.Group():
|
102 |
+
with gr.Row():
|
103 |
+
text_input = gr.Textbox(
|
104 |
+
label="Your Input",
|
105 |
+
placeholder="Click the mic button and speak...",
|
106 |
+
elem_classes=["input-box"],
|
107 |
+
scale=9
|
108 |
+
)
|
109 |
+
mic_button = gr.Button(
|
110 |
+
"🎤",
|
111 |
+
elem_classes=["mic-button"],
|
112 |
+
scale=1
|
113 |
+
)
|
114 |
+
|
115 |
+
status_display = gr.Textbox(
|
116 |
+
label="Status",
|
117 |
+
visible=False,
|
118 |
+
interactive=False,
|
119 |
+
elem_classes=["status-message"]
|
120 |
+
)
|
121 |
+
|
122 |
+
mic_button.click(
|
123 |
+
fn=transcribe_audio,
|
124 |
+
inputs=[gr.Slider(5, 30, value=10, label="Recording time limit (seconds)")],
|
125 |
+
outputs=[status_display, text_input],
|
126 |
+
show_progress="hidden"
|
127 |
+
)
|
128 |
+
|
129 |
+
gr.Examples(
|
130 |
+
examples=["Hello world", "How are you today?", "Please convert my speech to text"],
|
131 |
+
inputs=text_input,
|
132 |
+
label="Try these examples:"
|
133 |
+
)
|
134 |
+
|
135 |
+
return demo
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
demo = create_ui()
|
139 |
+
demo.launch(debug=True)
|