Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import pipeline | |
# Load models | |
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if torch.cuda.is_available() else -1) | |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") | |
# Function to process audio | |
def process_audio(audio_file): | |
# Step 1: Transcribe audio | |
transcription = transcriber(audio_file)["text"] | |
# Step 2: Summarize transcription | |
summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"] | |
return transcription, summary | |
# Gradio Interface with Horizontal Layout | |
with gr.Blocks() as interface: | |
with gr.Row(): | |
# Upload button on the left | |
with gr.Column(): | |
audio_input = gr.Audio(type="filepath", label="Upload Audio File") | |
process_button = gr.Button("Process Audio") | |
# Output text box on the right | |
with gr.Column(): | |
transcription_output = gr.Textbox(label="Full Transcription", lines=10) | |
summary_output = gr.Textbox(label="Summary", lines=5) | |
# Link the button to the function | |
process_button.click( | |
process_audio, | |
inputs=[audio_input], | |
outputs=[transcription_output, summary_output] | |
) | |
# Launch the interface with SSR disabled and optional public sharing | |
interface.launch | |