File size: 997 Bytes
b4cbbe2
 
 
 
 
 
a3d265e
b4cbbe2
 
 
 
 
 
 
083ff5f
b4cbbe2
 
 
 
 
 
 
 
 
 
0e0471b
 
 
a3d265e
 
 
 
 
 
 
f2021d6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#uvicorn app:app --host 0.0.0.0 --port 8000 --reload


from fastapi import FastAPI
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import librosa
import uvicorn

app = FastAPI()

processor = WhisperProcessor.from_pretrained("openai/whisper-small")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
model.config.forced_decoder_ids = None

audio_file_path = "output.mp3"

audio_data, _ = librosa.load(audio_file_path, sr=16000)

@app.get("/")
def transcribe_audio():
        input_features = processor(audio_data.tolist(), return_tensors="pt").input_features
        
        predicted_ids = model.generate(input_features)
        transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
        
        return {"transcription": transcription[0]}


# if __name__ == "__main__":
#     import uvicorn
#     uvicorn.run(app, host="0.0.0.0", port=8000)


if __name__=='__main__':
    uvicorn.run('main:app', reload=True)