import torch from transformers import pipeline import gradio as gr def transcript_audio(audio_file): # Initialize the speech recognition pipeline pipe = pipeline( "automatic-speech-recognition", model="openai/whisper-tiny.en", chunk_length_s=30, ) # Transcribe the audio file and return the result result = pipe(audio_file, batch_size=8)["text"] return result audio_input = gr.Audio(sources="upload", type="filepath") # Audio input output_text = gr.Textbox() # Text output iface = gr.Interface(fn=transcript_audio, inputs=audio_input, outputs=output_text, title="Audio Transcription App: Summarize your audio - Created by Nabeel", description="Upload the audio file") iface.launch(server_name="0.0.0.0", server_port=7860,share=True)