|
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq |
|
|
|
|
|
processor = AutoProcessor.from_pretrained("AqeelShafy7/AudioSangraha-Audio_to_Text") |
|
model = AutoModelForSpeechSeq2Seq.from_pretrained("AqeelShafy7/AudioSangraha-Audio_to_Text") |
|
|
|
|
|
import torch |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
print(f"Model loaded on {device}") |
|
|
|
from datasets import load_dataset |
|
import torchaudio |
|
import torch |
|
|
|
|
|
import fsspec |
|
|
|
|
|
fsspec.config.conf["timeout"] = 20000 |
|
|
|
dataset = load_dataset("librispeech_asr", "clean", split="train", trust_remote_code=True) |
|
|
|
|
|
|
|
|
|
def preprocess_audio(batch): |
|
audio = batch["audio"] |
|
waveform, sample_rate = torchaudio.load(audio["path"]) |
|
|
|
|
|
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform) |
|
|
|
|
|
batch["input_values"] = processor(waveform.squeeze().numpy(), sampling_rate=16000).input_values[0] |
|
batch["labels"] = processor.tokenizer(batch["text"]).input_ids |
|
return batch |
|
|
|
|
|
dataset = dataset.map(preprocess_audio, remove_columns=["audio"]) |
|
|
|
from transformers import TrainingArguments, Trainer, DataCollatorForSeq2Seq |
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir="./asr_model_finetuned", |
|
evaluation_strategy="epoch", |
|
save_strategy="epoch", |
|
learning_rate=5e-5, |
|
per_device_train_batch_size=8, |
|
per_device_eval_batch_size=8, |
|
num_train_epochs=3, |
|
weight_decay=0.01, |
|
logging_dir="./logs", |
|
logging_steps=500, |
|
save_total_limit=2, |
|
push_to_hub=True, |
|
) |
|
|
|
|
|
data_collator = DataCollatorForSeq2Seq(processor.tokenizer, model=model) |
|
|
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=dataset, |
|
eval_dataset=None, |
|
tokenizer=processor.feature_extractor, |
|
data_collator=data_collator, |
|
) |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
import streamlit as st |
|
import soundfile as sf |
|
import numpy as np |
|
|
|
st.title("ποΈ Automatic Speech Recognition with Fine-Tuning πΆ") |
|
|
|
|
|
audio_file = st.file_uploader("Upload an audio file", type=["wav", "mp3", "flac"]) |
|
|
|
if audio_file: |
|
|
|
with open("temp_audio.wav", "wb") as f: |
|
f.write(audio_file.read()) |
|
|
|
waveform, sample_rate = torchaudio.load("temp_audio.wav") |
|
|
|
|
|
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform) |
|
|
|
|
|
input_values = processor(waveform.squeeze().numpy(), sampling_rate=16000).input_values[0] |
|
|
|
|
|
with torch.no_grad(): |
|
input_tensor = torch.tensor([input_values]).to(device) |
|
logits = model(input_tensor).logits |
|
predicted_ids = torch.argmax(logits, dim=-1) |
|
transcription = processor.batch_decode(predicted_ids)[0] |
|
|
|
|
|
st.success("Transcription:") |
|
st.write(transcription) |
|
|
|
|
|
user_correction = st.text_area("Correct the transcription (if needed):") |
|
|
|
if st.button("Fine-Tune Model"): |
|
if user_correction: |
|
|
|
corrected_input = processor.tokenizer(user_correction).input_ids |
|
|
|
|
|
dataset = dataset.add_item({"input_values": input_values, "labels": corrected_input}) |
|
|
|
|
|
trainer.train() |
|
|
|
st.success("Model fine-tuned successfully! Try another audio file.") |
|
|
|
|