RSHVR commited on
Commit
e562b67
·
verified ·
1 Parent(s): 6745b37

Update stt.py

Browse files
Files changed (1) hide show
  1. stt.py +10 -4
stt.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import torch
3
  import torchaudio
4
- import spaces # Import spaces module for Zero-GPU
5
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
6
 
7
  # Create directories
@@ -20,8 +20,9 @@ WHISPER_MODEL_SIZES = {
20
  'large': 'openai/whisper-large-v3',
21
  }
22
 
23
- @spaces.GPU # Add spaces.GPU decorator for Zero-GPU support
24
- async def transcribe_audio(audio_file_path, model_size="base", language="en"):
 
25
  global whisper_model, whisper_processor
26
 
27
  try:
@@ -73,4 +74,9 @@ async def transcribe_audio(audio_file_path, model_size="base", language="en"):
73
 
74
  except Exception as e:
75
  print(f"Error during transcription: {str(e)}")
76
- return ""
 
 
 
 
 
 
1
  import os
2
  import torch
3
  import torchaudio
4
+ import spaces
5
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
6
 
7
  # Create directories
 
20
  'large': 'openai/whisper-large-v3',
21
  }
22
 
23
+ # Synchronous function with GPU decorator
24
+ @spaces.GPU
25
+ def _transcribe_audio_gpu(audio_file_path, model_size="base", language="en"):
26
  global whisper_model, whisper_processor
27
 
28
  try:
 
74
 
75
  except Exception as e:
76
  print(f"Error during transcription: {str(e)}")
77
+ return ""
78
+
79
+ # Async wrapper that calls the GPU function
80
+ async def transcribe_audio(audio_file_path, model_size="base", language="en"):
81
+ # Call the GPU-decorated function
82
+ return _transcribe_audio_gpu(audio_file_path, model_size, language)