File size: 6,774 Bytes
63da647
060f709
7edc82e
63da647
 
 
 
 
8cc7c73
a635c25
 
63da647
 
 
 
 
a635c25
63da647
 
 
 
a635c25
060f709
b6c2d8b
 
 
 
 
 
 
a635c25
b6c2d8b
a635c25
 
 
 
 
 
 
 
 
 
63da647
8cc7c73
 
63da647
 
 
 
 
 
a635c25
63da647
a635c25
63da647
 
 
 
a635c25
 
 
 
 
 
 
63da647
 
 
 
 
a635c25
63da647
 
a635c25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6c2d8b
 
 
 
 
 
a635c25
b6c2d8b
a635c25
 
 
 
 
 
 
 
 
 
 
 
63da647
a635c25
99eb4a3
 
 
 
 
 
63da647
 
 
 
 
 
a635c25
 
 
 
 
 
 
 
b6c2d8b
a635c25
b6c2d8b
 
 
 
 
 
 
 
a635c25
 
 
 
 
 
 
 
b6c2d8b
 
 
 
 
 
 
 
a635c25
 
 
63da647
8cc7c73
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import gradio as gr
import torch
from accelerate import init_empty_weights
import random
import json
from difflib import SequenceMatcher
from jiwer import wer
import torchaudio
from transformers import pipeline
import os
import string

# Load metadata
with open("common_voice_en_validated_249_hf_ready.json") as f:
    data = json.load(f)

# Prepare dropdown options
ages = sorted(set(entry["age"] for entry in data))
genders = sorted(set(entry["gender"] for entry in data))
accents = sorted(set(entry["accent"] for entry in data))

# Load ASR pipelines
device = 0 if torch.cuda.is_available() else -1
pipe_whisper_tiny = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=device)
pipe_whisper_tiny_en = pipeline("automatic-speech-recognition", model="openai/whisper-tiny.en", device=device)
pipe_whisper_base = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
pipe_whisper_base_en = pipeline("automatic-speech-recognition", model="openai/whisper-base.en", device=device)
pipe_whisper_medium = pipeline("automatic-speech-recognition", model="openai/whisper-medium", device=device)
pipe_whisper_medium_en = pipeline("automatic-speech-recognition", model="openai/whisper-medium.en", device=device)
pipe_distil_whisper_large = pipeline("automatic-speech-recognition", model="distil-whisper/distil-large-v3.5", device=device)
pipe_wav2vec2_base_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=device)
pipe_wav2vec2_large_960h = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-960h", device=device)
pipe_hubert_large_ls960_ft = pipeline("automatic-speech-recognition", model="facebook/hubert-large-ls960-ft", device=device)

# Functions
def convert_to_wav(file_path):
    wav_path = file_path.replace(".mp3", ".wav")
    if not os.path.exists(wav_path):
        waveform, sample_rate = torchaudio.load(file_path)
        waveform = waveform.mean(dim=0, keepdim=True)
        torchaudio.save(wav_path, waveform, sample_rate)
    return wav_path

def transcribe(pipe, file_path):
    result = pipe(file_path)
    return result["text"].strip().lower()

def highlight_differences(ref, hyp):
    sm = SequenceMatcher(None, ref.split(), hyp.split())
    result = []
    for opcode, i1, i2, j1, j2 in sm.get_opcodes():
        if opcode == "equal":
            result.extend(hyp.split()[j1:j2])
        else:
            wrong = hyp.split()[j1:j2]
            result.extend([f"<span style='color:red'>{w}</span>" for w in wrong])
    return " ".join(result)

def normalize(text):
    text = text.lower()
    text = text.translate(str.maketrans('', '', string.punctuation))
    return text.strip()

# Generate Audio
def generate_audio(age, gender, accent):
    filtered = [
        entry for entry in data
        if entry["age"] == age and entry["gender"] == gender and entry["accent"] == accent
    ]
    if not filtered:
        return None, "No matching sample."
    sample = random.choice(filtered)
    file_path = os.path.join("common_voice_en_validated_249", sample["path"])
    wav_file_path = convert_to_wav(file_path)
    return wav_file_path, wav_file_path

# Transcribe & Compare
def transcribe_audio(file_path):
    if not file_path:
        return "No file selected.", "", "", "", "", "", ""

    filename_mp3 = os.path.basename(file_path).replace(".wav", ".mp3")
    gold = ""
    for entry in data:
        if entry["path"].endswith(filename_mp3):
            gold = normalize(entry["sentence"])
            break
    if not gold:
        return "Reference not found.", "", "", "", "", "", ""

    outputs = {}
    models = {
        "openai/whisper-tiny": pipe_whisper_tiny,
        "openai/whisper-tiny.en": pipe_whisper_tiny_en,
        "openai/whisper-base": pipe_whisper_base,
        "openai/whisper-base.en": pipe_whisper_base_en,
        "openai/whisper-medium": pipe_whisper_medium,
        "openai/whisper-medium.en": pipe_whisper_medium_en,
        "distil-whisper/distil-large-v3.5": pipe_distil_whisper_large,
        "facebook/wav2vec2-base-960h": pipe_wav2vec2_base_960h,
        "facebook/wav2vec2-large-960h": pipe_wav2vec2_large_960h,
        "facebook/hubert-large-ls960-ft": pipe_hubert_large_ls960_ft,
    }

    for name, model in models.items():
        text = transcribe(model, file_path)
        clean = normalize(text)
        wer_score = wer(gold, clean)
        outputs[name] = f"<b>{name} (WER: {wer_score:.2f}):</b><br>{highlight_differences(gold, clean)}"

    return (gold, *outputs.values())

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# Comparing ASR Models on Diverse English Speech Samples")
    gr.Markdown("""
        This demo compares the transcription performance of several automatic speech recognition (ASR) models.
        Users can select age, gender, and accent to generate diverse English audio samples.
        The models are evaluated on their ability to transcribe those samples.
        Data is sourced from 249 validated entries in the Common Voice English Delta Segment 21.0 release.
        """)

    with gr.Row():
        age = gr.Dropdown(choices=ages, label="Age")
        gender = gr.Dropdown(choices=genders, label="Gender")
        accent = gr.Dropdown(choices=accents, label="Accent")

    generate_btn = gr.Button("Get Audio")
    audio_output = gr.Audio(label="Audio", type="filepath", interactive=False)
    file_path_output = gr.Textbox(label="Audio File Path", visible=False)

    generate_btn.click(generate_audio, [age, gender, accent], [audio_output, file_path_output])

    transcribe_btn = gr.Button("Transcribe with All Models")
    gold_text = gr.Textbox(label="Reference (Gold Standard)")
    
    whisper_tiny_html = gr.HTML(label="Whisper Tiny")
    whisper_tiny_en_html = gr.HTML(label="Whisper Tiny English")
    whisper_base_html = gr.HTML(label="Whisper Base")
    whisper_base_en_html = gr.HTML(label="Whisper Base English")
    whisper_medium_html = gr.HTML(label="Whisper Medium")
    whisper_medium_en_html = gr.HTML(label="Whisper Medium English")
    distil_html = gr.HTML(label="Distil-Whisper Large")
    wav2vec_base_html = gr.HTML(label="Wav2Vec2 Base")
    wav2vec_large_html = gr.HTML(label="Wav2Vec2 Large")
    hubert_html = gr.HTML(label="HuBERT Large")

    transcribe_btn.click(
        transcribe_audio,
        inputs=[file_path_output],
        outputs=[
            gold_text,
            whisper_tiny_html,
            whisper_tiny_en_html,
            whisper_base_html,
            whisper_base_en_html,
            whisper_medium_html,
            whisper_medium_en_html,
            distil_html,
            wav2vec_base_html,
            wav2vec_large_html,
            hubert_html,
        ],
    )

demo.launch()