reallynicejam commited on
Commit
5c2580e
·
verified ·
1 Parent(s): fa084e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -18
app.py CHANGED
@@ -1,22 +1,26 @@
1
  import gradio as gr
2
- from transformers import pipeline, Wav2Vec2ForCTC, Wav2Vec2Processor
 
3
 
4
- # Load the ASR model and processor
5
- model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
6
- processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
7
 
8
- def transcribe(audio):
9
- # Process the audio and transcribe using the Hugging Face ASR model
10
- input_values = processor(audio, return_tensors="pt", sampling_rate=16000).input_values
11
- logits = model(input_values).logits
12
- predicted_ids = torch.argmax(logits, dim=-1)
13
- transcription = processor.batch_decode(predicted_ids)
14
-
15
- return transcription[0]
16
 
17
- gr.Interface(
18
- fn=transcribe,
19
- inputs=gr.Audio(source="microphone", type="filepath"),
20
- outputs="text",
21
- live=True
22
- ).launch()
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+ import numpy as np
4
 
5
+ transcriber = pipeline("automatic-speech-recognition", model="facebook/xm_transformer_s2ut_hk-en")
 
 
6
 
7
+ def transcribe(stream, new_chunk):
8
+ sr, y = new_chunk
9
+ y = y.astype(np.float32)
10
+ y /= np.max(np.abs(y))
 
 
 
 
11
 
12
+ if stream is not None:
13
+ stream = np.concatenate([stream, y])
14
+ else:
15
+ stream = y
16
+ return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
17
+
18
+
19
+ demo = gr.Interface(
20
+ transcribe,
21
+ ["state", gr.Audio(sources=["microphone"], streaming=True)],
22
+ ["state", "text"],
23
+ live=True,
24
+ )
25
+
26
+ demo.launch()