KDM999 commited on
Commit
da0f868
·
verified ·
1 Parent(s): a611f4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -21,17 +21,16 @@ genders = sorted(set(entry["gender"] for entry in data))
21
  accents = sorted(set(entry["accent"] for entry in data))
22
 
23
  # Load ASR pipelines
24
- device = 0 if torch.cuda.is_available() else -1
25
- pipe_whisper_tiny = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=device)
26
- pipe_whisper_tiny_en = pipeline("automatic-speech-recognition", model="openai/whisper-tiny.en", device=device)
27
- pipe_whisper_base = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
28
- pipe_whisper_base_en = pipeline("automatic-speech-recognition", model="openai/whisper-base.en", device=device)
29
- pipe_whisper_medium = pipeline("automatic-speech-recognition", model="openai/whisper-medium", device=device)
30
- pipe_whisper_medium_en = pipeline("automatic-speech-recognition", model="openai/whisper-medium.en", device=device)
31
- pipe_distil_whisper_large = pipeline("automatic-speech-recognition", model="distil-whisper/distil-large-v3.5", device=device)
32
- pipe_wav2vec2_base_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=device)
33
- pipe_wav2vec2_large_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-960h", device=device)
34
- pipe_hubert_large_ls960_ft = pipeline("automatic-speech-recognition", model="facebook/hubert-large-ls960-ft", device=device)
35
 
36
  # Functions
37
  def convert_to_wav(file_path):
 
21
  accents = sorted(set(entry["accent"] for entry in data))
22
 
23
  # Load ASR pipelines
24
+ pipe_whisper_tiny = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
25
+ pipe_whisper_tiny_en = pipeline("automatic-speech-recognition", model="openai/whisper-tiny.en")
26
+ pipe_whisper_base = pipeline("automatic-speech-recognition", model="openai/whisper-base")
27
+ pipe_whisper_base_en = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
28
+ pipe_whisper_medium = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
29
+ pipe_whisper_medium_en = pipeline("automatic-speech-recognition", model="openai/whisper-medium.en")
30
+ pipe_distil_whisper_large = pipeline("automatic-speech-recognition", model="distil-whisper/distil-large-v3.5")
31
+ pipe_wav2vec2_base_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
32
+ pipe_wav2vec2_large_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-960h")
33
+ pipe_hubert_large_ls960_ft = pipeline("automatic-speech-recognition", model="facebook/hubert-large-ls960-ft")
 
34
 
35
  # Functions
36
  def convert_to_wav(file_path):