Update app.py
Browse files
app.py
CHANGED
@@ -3,11 +3,37 @@ import cv2
|
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
import os
|
|
|
6 |
|
7 |
# Load the trained models
|
8 |
cnn_model = tf.keras.models.load_model('noq_c_model.h5')
|
9 |
qcnn_model = tf.keras.models.load_model('q_model.h5')
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Directories containing example videos
|
12 |
examples_dir = 'examples'
|
13 |
original_dir = os.path.join(examples_dir, 'Original')
|
@@ -22,7 +48,6 @@ def get_video_paths(directory):
|
|
22 |
videos.append(os.path.join(directory, vid))
|
23 |
return videos
|
24 |
|
25 |
-
# Get example videos for each category
|
26 |
examples_original = get_video_paths(original_dir)
|
27 |
examples_deepfake_roop = get_video_paths(deepfake_roop_dir)
|
28 |
examples_deepfake_web = get_video_paths(deepfake_web_dir)
|
@@ -36,6 +61,21 @@ for vid in examples_deepfake_roop:
|
|
36 |
for vid in examples_deepfake_web:
|
37 |
example_videos_dict[vid] = 'DeepfakeWeb'
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
def process_video(video_path, true_label=None):
|
40 |
cap = cv2.VideoCapture(video_path)
|
41 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
@@ -43,47 +83,50 @@ def process_video(video_path, true_label=None):
|
|
43 |
fps = 30
|
44 |
frame_interval = max(int(round(fps / 30)), 1)
|
45 |
frame_count = 0
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
while cap.isOpened():
|
49 |
ret, frame = cap.read()
|
50 |
-
if not ret:
|
51 |
break
|
52 |
if frame_count % frame_interval == 0:
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
frame_count += 1
|
59 |
cap.release()
|
60 |
|
61 |
-
cnn_correct = 0
|
62 |
-
qcnn_correct = 0
|
63 |
-
cnn_class0 = 0
|
64 |
-
cnn_class1 = 0
|
65 |
-
qcnn_class0 = 0
|
66 |
-
qcnn_class1 = 0
|
67 |
-
total_frames = len(sampled_frames)
|
68 |
-
for frame in sampled_frames:
|
69 |
-
cnn_pred = cnn_model.predict(frame)
|
70 |
-
cnn_label = np.argmax(cnn_pred)
|
71 |
-
if cnn_label == 0:
|
72 |
-
cnn_class0 += 1
|
73 |
-
else:
|
74 |
-
cnn_class1 += 1
|
75 |
-
if true_label is not None and cnn_label == true_label:
|
76 |
-
cnn_correct += 1
|
77 |
-
|
78 |
-
qcnn_pred = qcnn_model.predict(frame)
|
79 |
-
qcnn_label = np.argmax(qcnn_pred)
|
80 |
-
if qcnn_label == 0:
|
81 |
-
qcnn_class0 += 1
|
82 |
-
else:
|
83 |
-
qcnn_class1 += 1
|
84 |
-
if true_label is not None and qcnn_label == true_label:
|
85 |
-
qcnn_correct += 1
|
86 |
-
|
87 |
if total_frames > 0:
|
88 |
cnn_class0_percent = (cnn_class0 / total_frames) * 100
|
89 |
cnn_class1_percent = (cnn_class1 / total_frames) * 100
|
@@ -93,13 +136,11 @@ def process_video(video_path, true_label=None):
|
|
93 |
cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0
|
94 |
|
95 |
if true_label is not None:
|
96 |
-
# Calculate accuracy if true_label is provided (example video)
|
97 |
cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
98 |
qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
99 |
result = f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
|
100 |
result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
|
101 |
else:
|
102 |
-
# Display percent of frames classified from each class
|
103 |
result = f"CNN Model Predictions:\nClass 0: {cnn_class0_percent:.2f}%\nClass 1: {cnn_class1_percent:.2f}%\n"
|
104 |
result += f"QCNN Model Predictions:\nClass 0: {qcnn_class0_percent:.2f}%\nClass 1: {qcnn_class1_percent:.2f}%"
|
105 |
return result
|
@@ -128,14 +169,12 @@ def predict(video_input):
|
|
128 |
return result
|
129 |
|
130 |
with gr.Blocks() as demo:
|
131 |
-
gr.
|
132 |
-
gr.
|
133 |
|
134 |
with gr.Row(elem_classes="content-container"):
|
135 |
with gr.Column():
|
136 |
-
video_input = gr.Video(label="Upload Video")
|
137 |
-
with gr.Row(elem_classes="control-panel"):
|
138 |
-
pass # Add any controls if needed
|
139 |
examples_original = gr.Examples(
|
140 |
label="Original Videos",
|
141 |
inputs=video_input,
|
|
|
3 |
import numpy as np
|
4 |
import tensorflow as tf
|
5 |
import os
|
6 |
+
import pennylane as qml
|
7 |
|
8 |
# Load the trained models
|
9 |
cnn_model = tf.keras.models.load_model('noq_c_model.h5')
|
10 |
qcnn_model = tf.keras.models.load_model('q_model.h5')
|
11 |
|
12 |
+
# Define the quanvolutional layer as per CNN_QCNN.ipynb
|
13 |
+
n_layers = 3 # Number of quantum layers
|
14 |
+
dev = qml.device("default.qubit", wires=2)
|
15 |
+
rand_params = np.random.uniform(high=2 * np.pi, size=(n_layers, 2, 2))
|
16 |
+
|
17 |
+
@qml.qnode(dev)
|
18 |
+
def circuit(phi):
|
19 |
+
for j in range(2):
|
20 |
+
qml.RY(np.pi * phi[j], wires=j)
|
21 |
+
for layer in range(n_layers):
|
22 |
+
qml.templates.layers.RandomLayers(weights=rand_params[layer], wires=list(range(2)))
|
23 |
+
return [qml.expval(qml.PauliZ(j)) for j in range(2)]
|
24 |
+
|
25 |
+
def quanv(image):
|
26 |
+
out = np.zeros((14, 14, 2))
|
27 |
+
for j in range(0, 28, 2):
|
28 |
+
for k in range(0, 28, 2):
|
29 |
+
q_results = circuit([
|
30 |
+
image[j, k, 0],
|
31 |
+
image[j, k + 1, 0]
|
32 |
+
])
|
33 |
+
for c in range(2):
|
34 |
+
out[j // 2, k // 2, c] = q_results[c]
|
35 |
+
return out
|
36 |
+
|
37 |
# Directories containing example videos
|
38 |
examples_dir = 'examples'
|
39 |
original_dir = os.path.join(examples_dir, 'Original')
|
|
|
48 |
videos.append(os.path.join(directory, vid))
|
49 |
return videos
|
50 |
|
|
|
51 |
examples_original = get_video_paths(original_dir)
|
52 |
examples_deepfake_roop = get_video_paths(deepfake_roop_dir)
|
53 |
examples_deepfake_web = get_video_paths(deepfake_web_dir)
|
|
|
61 |
for vid in examples_deepfake_web:
|
62 |
example_videos_dict[vid] = 'DeepfakeWeb'
|
63 |
|
64 |
+
def process_frame(frame):
|
65 |
+
# Convert to grayscale
|
66 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
67 |
+
# Resize to 28x28
|
68 |
+
resized_frame = cv2.resize(gray_frame, (28, 28))
|
69 |
+
# Normalize pixel values
|
70 |
+
normalized_frame = resized_frame / 255.0
|
71 |
+
# Add channel dimension
|
72 |
+
normalized_frame = np.expand_dims(normalized_frame, axis=-1)
|
73 |
+
# Apply quantum convolution
|
74 |
+
q_frame = quanv(normalized_frame)
|
75 |
+
# Reshape for model prediction
|
76 |
+
q_frame = np.expand_dims(q_frame, axis=0)
|
77 |
+
return q_frame
|
78 |
+
|
79 |
def process_video(video_path, true_label=None):
|
80 |
cap = cv2.VideoCapture(video_path)
|
81 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
|
83 |
fps = 30
|
84 |
frame_interval = max(int(round(fps / 30)), 1)
|
85 |
frame_count = 0
|
86 |
+
total_frames = 0
|
87 |
+
cnn_correct = 0
|
88 |
+
qcnn_correct = 0
|
89 |
+
cnn_class0 = 0
|
90 |
+
cnn_class1 = 0
|
91 |
+
qcnn_class0 = 0
|
92 |
+
qcnn_class1 = 0
|
93 |
|
94 |
while cap.isOpened():
|
95 |
ret, frame = cap.read()
|
96 |
+
if not ret or total_frames >= 30:
|
97 |
break
|
98 |
if frame_count % frame_interval == 0:
|
99 |
+
# Process frame for cnn_model
|
100 |
+
cnn_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
101 |
+
cnn_frame = cv2.resize(cnn_frame, (28, 28))
|
102 |
+
cnn_frame = cnn_frame / 255.0
|
103 |
+
cnn_frame = cnn_frame.reshape(1, 28, 28, 1)
|
104 |
+
# Predict with cnn_model
|
105 |
+
cnn_pred = cnn_model.predict(cnn_frame)
|
106 |
+
cnn_label = np.argmax(cnn_pred)
|
107 |
+
if cnn_label == 0:
|
108 |
+
cnn_class0 += 1
|
109 |
+
else:
|
110 |
+
cnn_class1 += 1
|
111 |
+
if true_label is not None and cnn_label == true_label:
|
112 |
+
cnn_correct += 1
|
113 |
+
|
114 |
+
# Process frame for qcnn_model
|
115 |
+
q_frame = process_frame(frame)
|
116 |
+
# Predict with qcnn_model
|
117 |
+
qcnn_pred = qcnn_model.predict(q_frame)
|
118 |
+
qcnn_label = np.argmax(qcnn_pred)
|
119 |
+
if qcnn_label == 0:
|
120 |
+
qcnn_class0 += 1
|
121 |
+
else:
|
122 |
+
qcnn_class1 += 1
|
123 |
+
if true_label is not None and qcnn_label == true_label:
|
124 |
+
qcnn_correct += 1
|
125 |
+
|
126 |
+
total_frames += 1
|
127 |
frame_count += 1
|
128 |
cap.release()
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
if total_frames > 0:
|
131 |
cnn_class0_percent = (cnn_class0 / total_frames) * 100
|
132 |
cnn_class1_percent = (cnn_class1 / total_frames) * 100
|
|
|
136 |
cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0
|
137 |
|
138 |
if true_label is not None:
|
|
|
139 |
cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
140 |
qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
141 |
result = f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
|
142 |
result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
|
143 |
else:
|
|
|
144 |
result = f"CNN Model Predictions:\nClass 0: {cnn_class0_percent:.2f}%\nClass 1: {cnn_class1_percent:.2f}%\n"
|
145 |
result += f"QCNN Model Predictions:\nClass 0: {qcnn_class0_percent:.2f}%\nClass 1: {qcnn_class1_percent:.2f}%"
|
146 |
return result
|
|
|
169 |
return result
|
170 |
|
171 |
with gr.Blocks() as demo:
|
172 |
+
gr.HTML("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
|
173 |
+
gr.HTML("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")
|
174 |
|
175 |
with gr.Row(elem_classes="content-container"):
|
176 |
with gr.Column():
|
177 |
+
video_input = gr.Video(label="Upload Video", type="filepath")
|
|
|
|
|
178 |
examples_original = gr.Examples(
|
179 |
label="Original Videos",
|
180 |
inputs=video_input,
|