Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,29 +2,22 @@ import gradio as gr
|
|
2 |
from transformers import AutoProcessor, AutoModelForCTC
|
3 |
import torch
|
4 |
import soundfile as sf
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
model =
|
9 |
|
10 |
# Function to transcribe audio
|
11 |
def transcribe_audio(audio_file):
|
12 |
-
|
13 |
-
|
14 |
-
with torch.no_grad():
|
15 |
-
logits = model(**inputs).logits
|
16 |
-
predicted_ids = torch.argmax(logits, dim=-1)
|
17 |
-
transcription = processor.batch_decode(predicted_ids)[0]
|
18 |
-
return transcription
|
19 |
|
20 |
-
#
|
21 |
iface = gr.Interface(
|
22 |
fn=transcribe_audio,
|
23 |
inputs=gr.Audio(type="filepath"),
|
24 |
outputs="text",
|
25 |
-
title="Real-Time Transcription with FastConformer"
|
26 |
-
description="Upload an audio file to transcribe it using NVIDIA FastConformer."
|
27 |
)
|
28 |
|
29 |
-
# Launch the app
|
30 |
iface.launch()
|
|
|
2 |
from transformers import AutoProcessor, AutoModelForCTC
|
3 |
import torch
|
4 |
import soundfile as sf
|
5 |
+
import nemo.collections.asr as nemo_asr
|
6 |
+
import gradio as gr
|
7 |
+
# Load the model
|
8 |
+
model = nemo_asr.models.ASRModel.from_pretrained("nvidia/stt_en_fastconformer_hybrid_large_pc")
|
9 |
|
10 |
# Function to transcribe audio
|
11 |
def transcribe_audio(audio_file):
|
12 |
+
transcription = model.transcribe([audio_file])
|
13 |
+
return transcription[0]
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# Gradio interface
|
16 |
iface = gr.Interface(
|
17 |
fn=transcribe_audio,
|
18 |
inputs=gr.Audio(type="filepath"),
|
19 |
outputs="text",
|
20 |
+
title="Real-Time Transcription with FastConformer"
|
|
|
21 |
)
|
22 |
|
|
|
23 |
iface.launch()
|