|
import os |
|
import tarfile |
|
import torch |
|
import torchaudio |
|
import numpy as np |
|
import streamlit as st |
|
from transformers import ( |
|
AutoProcessor, |
|
AutoModelForSpeechSeq2Seq, |
|
TrainingArguments, |
|
Trainer, |
|
DataCollatorForSeq2Seq, |
|
) |
|
|
|
|
|
|
|
|
|
MODEL_NAME = "AqeelShafy7/AudioSangraha-Audio_to_Text" |
|
|
|
|
|
processor = AutoProcessor.from_pretrained(MODEL_NAME) |
|
model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
print(f"β
Model loaded on {device}") |
|
|
|
|
|
|
|
|
|
DATASET_TAR_PATH = "dev-clean.tar.gz" |
|
EXTRACT_PATH = "./librispeech_dev_clean" |
|
|
|
|
|
if not os.path.exists(EXTRACT_PATH): |
|
print("π Extracting dataset...") |
|
with tarfile.open(DATASET_TAR_PATH, "r:gz") as tar: |
|
tar.extractall(EXTRACT_PATH) |
|
print("β
Extraction complete.") |
|
else: |
|
print("β
Dataset already extracted.") |
|
|
|
|
|
AUDIO_FOLDER = os.path.join(EXTRACT_PATH, "LibriSpeech", "dev-clean") |
|
|
|
|
|
def find_audio_files(base_folder): |
|
"""Recursively search for all .flac files in subdirectories.""" |
|
audio_files = [] |
|
for root, _, files in os.walk(base_folder): |
|
for file in files: |
|
if file.endswith(".flac"): |
|
audio_files.append(os.path.join(root, file)) |
|
return audio_files |
|
|
|
|
|
audio_files = find_audio_files(AUDIO_FOLDER) |
|
|
|
if not audio_files: |
|
raise FileNotFoundError(f"β No .flac files found in {AUDIO_FOLDER}. Check dataset structure!") |
|
|
|
print(f"β
Found {len(audio_files)} audio files in dataset!") |
|
|
|
|
|
|
|
|
|
def load_and_process_audio(audio_path): |
|
"""Loads and processes a single audio file into model format.""" |
|
waveform, sample_rate = torchaudio.load(audio_path) |
|
|
|
|
|
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform) |
|
|
|
|
|
input_features = processor(waveform.squeeze().numpy(), sampling_rate=16000, return_tensors="pt").input_features[0] |
|
|
|
return input_features |
|
|
|
|
|
dataset = [{"input_features": load_and_process_audio(f), "labels": []} for f in audio_files[:100]] |
|
|
|
|
|
train_size = int(0.9 * len(dataset)) |
|
train_dataset = dataset[:train_size] |
|
eval_dataset = dataset[train_size:] |
|
|
|
print(f"β
Dataset Loaded! Training: {len(train_dataset)}, Evaluation: {len(eval_dataset)}") |
|
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir="./asr_model_finetuned", |
|
eval_strategy="epoch", |
|
save_strategy="epoch", |
|
learning_rate=5e-5, |
|
per_device_train_batch_size=8, |
|
per_device_eval_batch_size=8, |
|
num_train_epochs=3, |
|
weight_decay=0.01, |
|
logging_dir="./logs", |
|
logging_steps=500, |
|
save_total_limit=2, |
|
push_to_hub=True, |
|
) |
|
|
|
|
|
data_collator = DataCollatorForSeq2Seq(processor, model=model) |
|
|
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=train_dataset, |
|
eval_dataset=eval_dataset, |
|
processing_class=processor, |
|
data_collator=data_collator, |
|
) |
|
|
|
|
|
|
|
|
|
if st.button("Start Fine-Tuning"): |
|
with st.spinner("Fine-tuning in progress... Please wait!"): |
|
trainer.train() |
|
st.success("β
Fine-Tuning Completed! Model updated.") |
|
|
|
|
|
|
|
|
|
st.title("ποΈ Speech-to-Text ASR with Fine-Tuning πΆ") |
|
|
|
|
|
audio_file = st.file_uploader("Upload an audio file", type=["wav", "mp3", "flac"]) |
|
|
|
if audio_file: |
|
|
|
audio_path = "temp_audio.wav" |
|
with open(audio_path, "wb") as f: |
|
f.write(audio_file.read()) |
|
|
|
|
|
waveform, sample_rate = torchaudio.load(audio_path) |
|
waveform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform) |
|
|
|
|
|
input_features = processor(waveform.squeeze().numpy(), sampling_rate=16000, return_tensors="pt").input_features[0] |
|
|
|
|
|
with torch.no_grad(): |
|
input_tensor = torch.tensor([input_features]).to(device) |
|
logits = model(input_tensor).logits |
|
predicted_ids = torch.argmax(logits, dim=-1) |
|
transcription = processor.batch_decode(predicted_ids)[0] |
|
|
|
|
|
st.success("π Transcription:") |
|
st.write(transcription) |
|
|
|
|
|
|
|
|
|
user_correction = st.text_area("π§ Correct the transcription (if needed):", transcription) |
|
|
|
if st.button("Fine-Tune with Correction"): |
|
if user_correction: |
|
corrected_input = processor.tokenizer(user_correction).input_ids |
|
|
|
|
|
dataset.append({"input_features": input_features, "labels": corrected_input}) |
|
|
|
|
|
trainer.args.num_train_epochs = 1 |
|
trainer.train() |
|
|
|
st.success("β
Model fine-tuned with new correction! Try another audio file.") |
|
|