File size: 7,046 Bytes
47e4eb2
 
 
002641d
47e4eb2
bc088ef
47e4eb2
e14ac4e
4be553d
 
47e4eb2
e9976a8
bc088ef
 
e9976a8
 
 
bc088ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47e4eb2
 
 
 
 
 
 
919d6ca
002641d
 
 
919d6ca
002641d
47e4eb2
e14ac4e
 
 
0d4447b
 
e14ac4e
 
 
 
 
 
 
0d4447b
bc088ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
002641d
47e4eb2
 
 
 
 
 
bc088ef
 
 
 
 
 
 
e14ac4e
47e4eb2
 
bc088ef
47e4eb2
 
bc088ef
 
 
 
 
 
 
c658ee4
bc088ef
 
 
 
 
 
 
 
 
 
 
c658ee4
bc088ef
 
 
 
 
 
 
 
47e4eb2
 
e14ac4e
002641d
 
 
 
 
 
 
e14ac4e
4a824b2
002641d
 
 
4a824b2
002641d
4a824b2
 
 
47e4eb2
 
002641d
 
 
 
 
 
 
 
 
e14ac4e
002641d
4a824b2
002641d
 
 
 
 
 
4a824b2
 
002641d
47e4eb2
 
bc088ef
 
e14ac4e
2cfc7a9
919d6ca
2cfc7a9
919d6ca
282a71f
919d6ca
e14ac4e
47e4eb2
919d6ca
282a71f
919d6ca
e14ac4e
42b5b0a
282a71f
 
 
 
 
919d6ca
 
282a71f
e14ac4e
919d6ca
002641d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import gradio as gr
import cv2
import numpy as np
import tensorflow as tf
import os
import pennylane as qml

# Load the trained models
cnn_model = tf.keras.models.load_model('noq_c_model.h5')
qcnn_model = tf.keras.models.load_model('q_model.h5')

# Define the quanvolutional layer
n_layers = 3  # Number of quantum layers
dev = qml.device("default.qubit", wires=2)

# Set random number seed
np.random.seed(0)
rand_params = np.random.uniform(high=2 * np.pi, size=(n_layers, 2, 2))

@qml.qnode(dev)
def circuit(phi):
    for j in range(2):
        qml.RY(np.pi * phi[j], wires=j)
    for layer in range(n_layers):
        qml.templates.layers.RandomLayers(weights=rand_params[layer], wires=list(range(2)))
    return [qml.expval(qml.PauliZ(j)) for j in range(2)]

def quanv(image):
    out = np.zeros((14, 14, 2))
    for j in range(0, 28, 2):
        for k in range(0, 28, 2):
            q_results = circuit([
                image[j, k, 0],
                image[j, k + 1, 0]
            ])
            for c in range(2):
                out[j // 2, k // 2, c] = q_results[c]
    return out

# Directories containing example videos
examples_dir = 'examples'
original_dir = os.path.join(examples_dir, 'Original')
deepfake_roop_dir = os.path.join(examples_dir, 'DeepfakeRoop')
deepfake_web_dir = os.path.join(examples_dir, 'DeepfakeWeb')

# Function to get video paths from a directory
def get_video_paths(directory):
    videos = []
    for vid in os.listdir(directory):
        if vid.endswith('.mp4'):
            videos.append(os.path.join(directory, vid))
    return videos

examples_original = get_video_paths(original_dir)
examples_deepfake_roop = get_video_paths(deepfake_roop_dir)
examples_deepfake_web = get_video_paths(deepfake_web_dir)

# Map from example video path to label
example_videos_dict = {}
for vid in examples_original:
    example_videos_dict[vid] = 'Original'
for vid in examples_deepfake_roop:
    example_videos_dict[vid] = 'DeepfakeRoop'
for vid in examples_deepfake_web:
    example_videos_dict[vid] = 'DeepfakeWeb'

def process_frame(frame):
    # Convert to grayscale
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # Resize to 28x28
    resized_frame = cv2.resize(gray_frame, (28, 28))
    # Normalize pixel values
    normalized_frame = resized_frame / 255.0
    # Add channel dimension
    normalized_frame = np.expand_dims(normalized_frame, axis=-1)
    # Apply quantum convolution
    q_frame = quanv(normalized_frame)
    # Reshape for model prediction
    q_frame = np.expand_dims(q_frame, axis=0)
    return q_frame

def process_video(video_path, true_label=None):
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    if fps == 0 or np.isnan(fps):
        fps = 30
    frame_interval = max(int(round(fps / 30)), 1)
    frame_count = 0
    total_frames = 0
    cnn_correct = 0
    qcnn_correct = 0
    cnn_class0 = 0
    cnn_class1 = 0
    qcnn_class0 = 0
    qcnn_class1 = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret or total_frames >= 30:
            break
        if frame_count % frame_interval == 0:
            # Process frame for cnn_model
            cnn_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            cnn_frame = cv2.resize(cnn_frame, (28, 28))
            cnn_frame = cnn_frame / 255.0
            cnn_frame = cnn_frame.reshape(1, 28, 28, 1)
            # Predict with cnn_model
            cnn_pred = cnn_model.predict(cnn_frame)
            cnn_label = (cnn_pred > 0.5).astype(int)
            if cnn_label == 0:
                cnn_class0 += 1
            else:
                cnn_class1 += 1
            if true_label is not None and cnn_label == true_label:
                cnn_correct += 1

            # Process frame for qcnn_model
            q_frame = process_frame(frame)
            # Predict with qcnn_model
            qcnn_pred = qcnn_model.predict(q_frame)
            qcnn_label = (qcnn_pred > 0.5).astype(int)
            if qcnn_label == 0:
                qcnn_class0 += 1
            else:
                qcnn_class1 += 1
            if true_label is not None and qcnn_label == true_label:
                qcnn_correct += 1

            total_frames += 1
        frame_count += 1
    cap.release()

    if total_frames > 0:
        cnn_class0_percent = (cnn_class0 / total_frames) * 100
        cnn_class1_percent = (cnn_class1 / total_frames) * 100
        qcnn_class0_percent = (qcnn_class0 / total_frames) * 100
        qcnn_class1_percent = (qcnn_class1 / total_frames) * 100
    else:
        cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0

    result = ""
    if true_label is not None:
        cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
        qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
        result += f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
        result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
    
    result += f"CNN Model Predictions:\nOriginal: {cnn_class0_percent:.2f}%\nFake: {cnn_class1_percent:.2f}%\n"
    result += f"QCNN Model Predictions:\nOriginal: {qcnn_class0_percent:.2f}%\nFake: {qcnn_class1_percent:.2f}%"
    return result

def predict(video_input):
    if video_input is None:
        return "Please upload a video or select an example."
    if isinstance(video_input, dict):
        video_path = video_input['name']
    elif isinstance(video_input, str):
        video_path = video_input
    else:
        return "Invalid video input."

    # Check if video is an example
    true_label = None
    if video_path in example_videos_dict:
        label = example_videos_dict[video_path]
        if label == 'Original':
            true_label = 0
        else:
            true_label = 1

    result = process_video(video_path, true_label=true_label)
    return result

with gr.Blocks() as demo:
    gr.HTML("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
    gr.HTML("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")

    with gr.Row():
        with gr.Column():
            video_input = gr.Video(label="Upload Video")
            examples_original = gr.Examples(
                label="Training: Original Videos",
                inputs=video_input,
                examples=examples_original,
            )
            examples_deepfake_web = gr.Examples(
                label="Training: Deepfake Videos Generated Using Deepfakesweb",
                inputs=video_input,
                examples=examples_deepfake_web,
            )
            examples_deepfake_roop = gr.Examples(
                label="Testing: Deepfake Videos Generated Using Roop",
                inputs=video_input,
                examples=examples_deepfake_roop,
            )
        with gr.Column():
            output = gr.Textbox(label="Result")
            predict_button = gr.Button("Predict")

    predict_button.click(fn=predict, inputs=video_input, outputs=output)
    demo.launch()