CJJ-on-HF commited on
Commit
ae62a6e
·
verified ·
1 Parent(s): 378e14d

Update app.py

Browse files

importing asyncio and using async & await to avoid gradio "Error" before a process ends

Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -38,6 +38,8 @@ from src.vad import AbstractTranscription, NonSpeechStrategy, PeriodicTranscript
38
  from src.whisper.abstractWhisperContainer import AbstractWhisperContainer
39
  from src.whisper.whisperFactory import create_whisper_container
40
 
 
 
41
  # Configure more application defaults in config.json5
42
 
43
  # Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself
@@ -116,7 +118,7 @@ class WhisperTranscriber:
116
  diarization, diarization_speakers)
117
 
118
  # Entry function for the simple tab progress
119
- def transcribe_webui_simple_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
120
  vad, vadMergeWindow, vadMaxMergeSize,
121
  word_timestamps: bool = False, highlight_words: bool = False,
122
  diarization: bool = False, diarization_speakers: int = 2,
@@ -129,7 +131,7 @@ class WhisperTranscriber:
129
  else:
130
  self.unset_diarization()
131
 
132
- return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions,
133
  word_timestamps=word_timestamps, highlight_words=highlight_words, progress=progress)
134
 
135
  # Entry function for the full tab
@@ -153,7 +155,7 @@ class WhisperTranscriber:
153
  diarization_min_speakers, diarization_max_speakers)
154
 
155
  # Entry function for the full tab with progress
156
- def transcribe_webui_full_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
157
  vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
158
  # Word timestamps
159
  word_timestamps: bool, highlight_words: bool, prepend_punctuations: str, append_punctuations: str,
@@ -179,7 +181,7 @@ class WhisperTranscriber:
179
  else:
180
  self.unset_diarization()
181
 
182
- return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions,
183
  initial_prompt=initial_prompt, temperature=temperature, best_of=best_of, beam_size=beam_size, patience=patience, length_penalty=length_penalty, suppress_tokens=suppress_tokens,
184
  condition_on_previous_text=condition_on_previous_text, fp16=fp16,
185
  compression_ratio_threshold=compression_ratio_threshold, logprob_threshold=logprob_threshold, no_speech_threshold=no_speech_threshold,
@@ -187,7 +189,7 @@ class WhisperTranscriber:
187
  progress=progress)
188
 
189
  # Perform diarization given a specific input audio file and whisper file
190
- def perform_extra(self, languageName, urlData, singleFile, whisper_file: str,
191
  highlight_words: bool = False,
192
  diarization: bool = False, diarization_speakers: int = 2, diarization_min_speakers = 1, diarization_max_speakers = 5, progress=gr.Progress()):
193
 
@@ -215,11 +217,11 @@ class WhisperTranscriber:
215
  multipleFiles = [singleFile] if singleFile else None
216
 
217
  # Will return download, text, vtt
218
- return self.transcribe_webui("base", "", urlData, multipleFiles, None, None, None,
219
  progress=progress,highlight_words=highlight_words,
220
  override_transcribe_file=custom_transcribe_file, override_max_sources=1)
221
 
222
- def transcribe_webui(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
223
  vadOptions: VadOptions, progress: gr.Progress = None, highlight_words: bool = False,
224
  override_transcribe_file: Callable[[AudioSource], dict] = None, override_max_sources = None,
225
  **decodeOptions: dict):
 
38
  from src.whisper.abstractWhisperContainer import AbstractWhisperContainer
39
  from src.whisper.whisperFactory import create_whisper_container
40
 
41
+ import asyncio
42
+
43
  # Configure more application defaults in config.json5
44
 
45
  # Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself
 
118
  diarization, diarization_speakers)
119
 
120
  # Entry function for the simple tab progress
121
+ async def transcribe_webui_simple_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
122
  vad, vadMergeWindow, vadMaxMergeSize,
123
  word_timestamps: bool = False, highlight_words: bool = False,
124
  diarization: bool = False, diarization_speakers: int = 2,
 
131
  else:
132
  self.unset_diarization()
133
 
134
+ return await self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions,
135
  word_timestamps=word_timestamps, highlight_words=highlight_words, progress=progress)
136
 
137
  # Entry function for the full tab
 
155
  diarization_min_speakers, diarization_max_speakers)
156
 
157
  # Entry function for the full tab with progress
158
+ async def transcribe_webui_full_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
159
  vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
160
  # Word timestamps
161
  word_timestamps: bool, highlight_words: bool, prepend_punctuations: str, append_punctuations: str,
 
181
  else:
182
  self.unset_diarization()
183
 
184
+ return await self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions,
185
  initial_prompt=initial_prompt, temperature=temperature, best_of=best_of, beam_size=beam_size, patience=patience, length_penalty=length_penalty, suppress_tokens=suppress_tokens,
186
  condition_on_previous_text=condition_on_previous_text, fp16=fp16,
187
  compression_ratio_threshold=compression_ratio_threshold, logprob_threshold=logprob_threshold, no_speech_threshold=no_speech_threshold,
 
189
  progress=progress)
190
 
191
  # Perform diarization given a specific input audio file and whisper file
192
+ async def perform_extra(self, languageName, urlData, singleFile, whisper_file: str,
193
  highlight_words: bool = False,
194
  diarization: bool = False, diarization_speakers: int = 2, diarization_min_speakers = 1, diarization_max_speakers = 5, progress=gr.Progress()):
195
 
 
217
  multipleFiles = [singleFile] if singleFile else None
218
 
219
  # Will return download, text, vtt
220
+ return await self.transcribe_webui("base", "", urlData, multipleFiles, None, None, None,
221
  progress=progress,highlight_words=highlight_words,
222
  override_transcribe_file=custom_transcribe_file, override_max_sources=1)
223
 
224
+ async def transcribe_webui(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
225
  vadOptions: VadOptions, progress: gr.Progress = None, highlight_words: bool = False,
226
  override_transcribe_file: Callable[[AudioSource], dict] = None, override_max_sources = None,
227
  **decodeOptions: dict):