File size: 2,978 Bytes
41d4efd
 
 
 
817f64d
41d4efd
 
 
 
 
 
 
 
 
 
 
817f64d
 
 
 
 
 
 
 
 
 
 
 
41d4efd
 
 
 
a6fde96
 
41d4efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
817f64d
 
41d4efd
817f64d
5de0202
 
16d09d3
 
 
 
817f64d
 
41d4efd
817f64d
acba8bc
dd50305
817f64d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import numpy as np
import librosa
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
# Constants
MAX_TIME_STEPS = 109
SAMPLE_RATE = 16000
DURATION = 5
N_MELS = 128
MODEL_PATH = "audio_classifier.h5"  # Replace with the actual path to your saved model

# Load the pre-trained model
model = load_model(MODEL_PATH, compile=False)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

def visualize(mel_spectrogram):
    median_decibels = np.median(mel_spectrogram)
    median_human_voice_range = -65
    diff_decibels = abs(median_decibels - median_human_voice_range)
    plt.figure(figsize=(12, 6))
    plt.subplot(1, 1, 1)
    librosa.display.specshow(mel_spectrogram, sr=SAMPLE_RATE, x_axis='time', y_axis='mel')
    plt.colorbar(format='%+2.0f dB')
    plt.title(f'Difference from Median Human Voice Range: {diff_decibels:.2f} dB')
    plt.savefig("mel_spectrogram.png")  # Save the image
    plt.close()

def classify_audio(audio):
    # Convert the audio data to NumPy array
    rate, ar = audio
    arone = ar.astype(np.float32)
    if arone.ndim > 1:
        arone = arone.flatten()
    mel_spectrogram = librosa.feature.melspectrogram(y=arone, sr=SAMPLE_RATE, n_mels=N_MELS)
    mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)

    # Ensure all spectrograms have the same width (time steps)
    if mel_spectrogram.shape[1] < MAX_TIME_STEPS:
        mel_spectrogram = np.pad(mel_spectrogram, ((0, 0), (0, MAX_TIME_STEPS - mel_spectrogram.shape[1])), mode='constant')
    else:
        mel_spectrogram = mel_spectrogram[:, :MAX_TIME_STEPS]

    # Reshape for the model
    X_test = np.expand_dims(mel_spectrogram, axis=-1)
    X_test = np.expand_dims(X_test, axis=0)

    # Predict using the loaded model
    y_pred = model.predict(X_test)

    # Convert probabilities to predicted classes
    y_pred_classes = np.argmax(y_pred, axis=1)

    if y_pred_classes[0] == 1:
        prediction = "Not Spoof : High chances of original voice"
    else:
        prediction = "Spoof : Possible voice cloning"
    
    median_decibels = np.median(mel_spectrogram)
    median_human_voice_range = -65
    diff_decibels = abs(median_decibels - median_human_voice_range)
    if diff_decibels<3 and y_pred_classes[0] == 0 :
        prediction = "Not Spoof : High chances of original voice"
    visualize(mel_spectrogram)
    return prediction,"mel_spectrogram.png"
    
title=" Group-2 Audio Spoof detection using CNN"
description="The model was trained on the ASVspoof 2019 dataset with an aim to detect spoof audios through deep learning.To use it please upload an audio file of suitable length. The Mel spectrogram used for inferencing is also available for the user to understand the classification and compare it with the median Human decibel range."
 
iface = gr.Interface(classify_audio, inputs=["audio"], outputs=["text","image"],title=title,description=description)
iface.launch()