File size: 1,461 Bytes
b4cbbe2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#uvicorn app:app --host 0.0.0.0 --port 8000 --reload

# from fastapi import FastAPI
# from transformers import pipeline

# pipe = pipeline("automatic-speech-recognition", model="Pranjal12345/whisper-small-ne-pranjal")

# audio_path = "/home/pranjal/Downloads/chinese_audio.mp3"

# with open("/home/pranjal/Downloads/chinese_audio.mp3", "rb") as audio_file:
#     audio_data = audio_file.read()

# app = FastAPI()

# @app.get("/")
# def hello():
#     output = pipe(input)
#     return {"Output": output}






from fastapi import FastAPI
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import librosa

app = FastAPI()

# Load model and processor
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
model.config.forced_decoder_ids = None

# Path to your audio file
audio_file_path = "/home/pranjal/Downloads/output.mp3"

# Read the audio file
audio_data, _ = librosa.load(audio_file_path, sr=16000)

@app.get("/")
def transcribe_audio():
        # Process the audio data using the Whisper processor
        input_features = processor(audio_data.tolist(), return_tensors="pt").input_features
        
        # Generate transcription
        predicted_ids = model.generate(input_features)
        transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
        
        return {"transcription": transcription[0]}