medimage / app.py
hubsnippetai's picture
Update app.py
09f794c verified
raw
history blame
1.92 kB
import torch
# from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from transformers import pipeline
import gradio as gr
import datetime
"""
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model_id = "distil-whisper/distil-small.en"
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id, torch_dtype=torch_dtype, use_safetensors=True
)
model.to(device)
processor = AutoProcessor.from_pretrained(model_id)
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
torch_dtype=torch_dtype,
device=device,
)
"""
# call a text generation model to display the audio content after identifying the word(s) in the text output
#import torch
#from transformers import pipeline
#from datasets import load_dataset
device = "cuda:0" if torch.cuda.is_available() else "cpu"
pipe = pipeline(
"automatic-speech-recognition",
# model="openai/whisper-base",
model = "microsoft/whisper-base-webnn",
chunk_length_s=30,
device=device,
)
# ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# sample = ds[0]["audio"]
# prediction = pipe(sample.copy(), batch_size=8)["text"]
# we can also return timestamps for the predictions
#prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"]
def audio2text(audio_file, prompt : str | list):
prediction = pipe(audio_file, batch_size=8, return_timestamps=True)["chunks"]
#prediction=pipe(audio_file)
return prediction['text']
gr.Interface(fn=audio2text, inputs=[gr.Audio(label='upload your audio file', sources='upload', type='filepath'), gr.Textbox(label="provide word(s) to search for")], outputs=[gr.Textbox(label="transcription")]).launch()