abreza commited on
Commit
5f64318
·
1 Parent(s): ee46811

Refactor app.py by removing example section

Browse files
Files changed (1) hide show
  1. app.py +0 -16
app.py CHANGED
@@ -25,7 +25,6 @@ for lang_region, names in LANGUAGE_REGION_CODES.items():
25
  language_to_regions[lang].append((f"{region}: {names[0]}", region))
26
 
27
 
28
-
29
  def update_regions(language):
30
  if language and language in language_to_regions:
31
  regions = language_to_regions[language]
@@ -34,7 +33,6 @@ def update_regions(language):
34
  return gr.Dropdown.update(choices=[], value=None, visible=False)
35
 
36
 
37
-
38
  @spaces.GPU
39
  def transcribe_audio(audio_file, model_name, language, region, predict_timestamps, padding_speech):
40
  model_key = MODELS[model_name]
@@ -133,20 +131,6 @@ with gr.Blocks(title="Dolphin Speech Recognition") as demo:
133
  outputs=[output_text, language_info]
134
  )
135
 
136
- gr.Examples(
137
- inputs=[
138
- audio_input,
139
- model_dropdown,
140
- language_dropdown,
141
- region_dropdown,
142
- timestamp_checkbox,
143
- padding_checkbox
144
- ],
145
- outputs=[output_text, language_info],
146
- fn=transcribe_audio,
147
- cache_examples=True,
148
- )
149
-
150
  gr.Markdown("""
151
 
152
  - The model supports 40 Eastern languages and 22 Chinese dialects
 
25
  language_to_regions[lang].append((f"{region}: {names[0]}", region))
26
 
27
 
 
28
  def update_regions(language):
29
  if language and language in language_to_regions:
30
  regions = language_to_regions[language]
 
33
  return gr.Dropdown.update(choices=[], value=None, visible=False)
34
 
35
 
 
36
  @spaces.GPU
37
  def transcribe_audio(audio_file, model_name, language, region, predict_timestamps, padding_speech):
38
  model_key = MODELS[model_name]
 
131
  outputs=[output_text, language_info]
132
  )
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  gr.Markdown("""
135
 
136
  - The model supports 40 Eastern languages and 22 Chinese dialects