|
import logging |
|
from importlib.metadata import version |
|
from timeit import default_timer as timer |
|
|
|
import gradio as gr |
|
import numpy as np |
|
|
|
import onnx_asr |
|
|
|
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=logging.WARNING) |
|
logger = logging.getLogger(__name__) |
|
logger.setLevel(logging.DEBUG) |
|
logger.info("onnx_asr version: %s", version("onnx_asr")) |
|
|
|
models = { |
|
name: onnx_asr.load_model(name) |
|
for name in [ |
|
"gigaam-v2-ctc", |
|
"gigaam-v2-rnnt", |
|
"nemo-fastconformer-ru-ctc", |
|
"nemo-fastconformer-ru-rnnt", |
|
"alphacep/vosk-model-ru", |
|
"alphacep/vosk-model-small-ru", |
|
"whisper-base", |
|
] |
|
} |
|
|
|
|
|
def recognize(audio: tuple[int, np.ndarray]): |
|
if audio is None: |
|
return None |
|
|
|
sample_rate, waveform = audio |
|
logger.debug("recognize: sample_rate %s, waveform.shape %s.", sample_rate, waveform.shape) |
|
try: |
|
waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1) |
|
if waveform.ndim == 2: |
|
waveform = waveform.mean(axis=1) |
|
|
|
results = [] |
|
for name, model in models.items(): |
|
start = timer() |
|
result = model.recognize(waveform, sample_rate=sample_rate, language="ru") |
|
time = timer() - start |
|
logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time) |
|
results.append([name, result, f"{time:.3f} s."]) |
|
except Exception as e: |
|
raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e |
|
else: |
|
return results |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(""" |
|
# ASR demo using onnx-asr (Russian models) |
|
**[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models. |
|
The package is written in pure Python with minimal dependencies (no `pytorch` or `transformers`). |
|
""") |
|
input = gr.Audio(min_length=1, max_length=20) |
|
with gr.Row(): |
|
gr.ClearButton(input) |
|
btn = gr.Button("Recognize", variant="primary") |
|
output = gr.Dataframe(headers=["model", "result", "time"], wrap=True) |
|
btn.click(fn=recognize, inputs=input, outputs=output) |
|
with gr.Accordion("ASR models used in this demo", open=False): |
|
gr.Markdown(""" |
|
* `gigaam-v2-ctc` - Sber GigaAM v2 CTC ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx)) |
|
* `gigaam-v2-rnnt` - Sber GigaAM v2 RNN-T ([origin](https://github.com/salute-developers/GigaAM), [onnx](https://huggingface.co/istupakov/gigaam-v2-onnx)) |
|
* `nemo-fastconformer-ru-ctc` - Nvidia FastConformer-Hybrid Large (ru) with CTC decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx)) |
|
* `nemo-fastconformer-ru-rnnt` - Nvidia FastConformer-Hybrid Large (ru) with RNN-T decoder ([origin](https://huggingface.co/nvidia/stt_ru_fastconformer_hybrid_large_pc), [onnx](https://huggingface.co/istupakov/stt_ru_fastconformer_hybrid_large_pc_onnx)) |
|
* `alphacep/vosk-model-ru` - Alpha Cephei Vosk 0.54-ru ([origin](https://huggingface.co/alphacep/vosk-model-ru)) |
|
* `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru)) |
|
* `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx)) |
|
""") |
|
|
|
demo.launch() |
|
|