Add models and description
Browse files
app.py
CHANGED
@@ -3,17 +3,39 @@ import numpy as np
|
|
3 |
|
4 |
import onnx_asr
|
5 |
|
6 |
-
models = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
def recoginize(audio: tuple[int, np.ndarray]):
|
10 |
sample_rate, waveform = audio
|
11 |
waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
|
12 |
-
return [[name, model.recognize(waveform, sample_rate=sample_rate)] for name, model in models.items()]
|
13 |
|
14 |
|
15 |
demo = gr.Interface(
|
16 |
fn=recoginize,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
inputs=[gr.Audio(min_length=1, max_length=10)],
|
18 |
outputs=[gr.Dataframe(headers=["Model", "result"], wrap=True, show_fullscreen_button=True)],
|
19 |
flagging_mode="never",
|
|
|
3 |
|
4 |
import onnx_asr
|
5 |
|
6 |
+
models = {
|
7 |
+
name: onnx_asr.load_model(name)
|
8 |
+
for name in [
|
9 |
+
"gigaam-v2-ctc",
|
10 |
+
"gigaam-v2-rnnt",
|
11 |
+
"nemo-fastconformer-ru-ctc",
|
12 |
+
"nemo-fastconformer-ru-rnnt",
|
13 |
+
"alphacep/vosk-model-ru",
|
14 |
+
"alphacep/vosk-model-small-ru",
|
15 |
+
"whisper-base",
|
16 |
+
]
|
17 |
+
}
|
18 |
|
19 |
|
20 |
def recoginize(audio: tuple[int, np.ndarray]):
|
21 |
sample_rate, waveform = audio
|
22 |
waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
|
23 |
+
return [[name, model.recognize(waveform, sample_rate=sample_rate, language="ru")] for name, model in models.items()]
|
24 |
|
25 |
|
26 |
demo = gr.Interface(
|
27 |
fn=recoginize,
|
28 |
+
title="ASR demo using onnx-asr (Russian models)",
|
29 |
+
description="""# Automatic Speech Recognition in Python using ONNX models - [onnx-asr](https://github.com/istupakov/onnx-asr)
|
30 |
+
## Models used in demo:
|
31 |
+
* `gigaam-v2-ctc` - Sber GigaAM v2 CTC ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
32 |
+
* `gigaam-v2-rnnt` - Sber GigaAM v2 RNN-T ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx))
|
33 |
+
* `nemo-fastconformer-ru-ctc` - Nvidia FastConformer-Hybrid Large (ru) with CTC decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
34 |
+
* `nemo-fastconformer-ru-rnnt` - Nvidia FastConformer-Hybrid Large (ru) with RNN-T decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx))
|
35 |
+
* `alphacep/vosk-model-ru` - Alpha Cephei Vosk 0.54-ru ([origin](https://huggingface.co/alphacep/vosk-model-ru))
|
36 |
+
* `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru))
|
37 |
+
* `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx))
|
38 |
+
""",
|
39 |
inputs=[gr.Audio(min_length=1, max_length=10)],
|
40 |
outputs=[gr.Dataframe(headers=["Model", "result"], wrap=True, show_fullscreen_button=True)],
|
41 |
flagging_mode="never",
|