Add simple demo
Browse files- app.py +22 -0
- requirements.txt +1 -0
app.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
import onnx_asr
|
5 |
+
|
6 |
+
models = {name: onnx_asr.load_model(name) for name in ["alphacep/vosk-model-ru", "alphacep/vosk-model-small-ru"]}
|
7 |
+
|
8 |
+
|
9 |
+
def recoginize(audio: tuple[int, np.ndarray]):
|
10 |
+
sample_rate, waveform = audio
|
11 |
+
waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
|
12 |
+
return [[name, model.recognize(waveform, sample_rate=sample_rate)] for name, model in models.items()]
|
13 |
+
|
14 |
+
|
15 |
+
demo = gr.Interface(
|
16 |
+
fn=recoginize,
|
17 |
+
inputs=[gr.Audio(min_length=1, max_length=10)],
|
18 |
+
outputs=[gr.Dataframe(headers=["Model", "result"], wrap=True, show_fullscreen_button=True)],
|
19 |
+
flagging_mode="never",
|
20 |
+
)
|
21 |
+
|
22 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
onnx-asr[cpu,hub]>=0.4.0
|