Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import numpy as np
|
|
4 |
import tensorflow as tf
|
5 |
import os
|
6 |
|
7 |
-
# Load the trained models
|
8 |
cnn_model = tf.keras.models.load_model('noq_c_model.h5')
|
9 |
qcnn_model = tf.keras.models.load_model('q_model.h5')
|
10 |
|
@@ -22,16 +22,19 @@ def get_video_paths(directory):
|
|
22 |
videos.append(os.path.join(directory, vid))
|
23 |
return videos
|
24 |
|
25 |
-
# Get
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
# Combine all examples
|
31 |
-
examples = original_videos + deepfake_roop_videos + deepfake_web_videos
|
32 |
|
33 |
# Map from example video path to label
|
34 |
-
example_videos_dict = {
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def process_video(video_path, true_label=None):
|
37 |
cap = cv2.VideoCapture(video_path)
|
@@ -41,7 +44,7 @@ def process_video(video_path, true_label=None):
|
|
41 |
frame_interval = max(int(round(fps / 30)), 1)
|
42 |
frame_count = 0
|
43 |
sampled_frames = []
|
44 |
-
|
45 |
while cap.isOpened():
|
46 |
ret, frame = cap.read()
|
47 |
if not ret:
|
@@ -54,7 +57,7 @@ def process_video(video_path, true_label=None):
|
|
54 |
sampled_frames.append(normalized_frame)
|
55 |
frame_count += 1
|
56 |
cap.release()
|
57 |
-
|
58 |
cnn_correct = 0
|
59 |
qcnn_correct = 0
|
60 |
cnn_class0 = 0
|
@@ -71,7 +74,7 @@ def process_video(video_path, true_label=None):
|
|
71 |
cnn_class1 += 1
|
72 |
if true_label is not None and cnn_label == true_label:
|
73 |
cnn_correct += 1
|
74 |
-
|
75 |
qcnn_pred = qcnn_model.predict(frame)
|
76 |
qcnn_label = np.argmax(qcnn_pred)
|
77 |
if qcnn_label == 0:
|
@@ -80,7 +83,7 @@ def process_video(video_path, true_label=None):
|
|
80 |
qcnn_class1 += 1
|
81 |
if true_label is not None and qcnn_label == true_label:
|
82 |
qcnn_correct += 1
|
83 |
-
|
84 |
if total_frames > 0:
|
85 |
cnn_class0_percent = (cnn_class0 / total_frames) * 100
|
86 |
cnn_class1_percent = (cnn_class1 / total_frames) * 100
|
@@ -88,7 +91,7 @@ def process_video(video_path, true_label=None):
|
|
88 |
qcnn_class1_percent = (qcnn_class1 / total_frames) * 100
|
89 |
else:
|
90 |
cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0
|
91 |
-
|
92 |
if true_label is not None:
|
93 |
# Calculate accuracy if true_label is provided (example video)
|
94 |
cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
@@ -110,7 +113,7 @@ def predict(video_input):
|
|
110 |
video_path = video_input
|
111 |
else:
|
112 |
return "Invalid video input."
|
113 |
-
|
114 |
# Check if video is an example
|
115 |
if video_path in example_videos_dict:
|
116 |
label = example_videos_dict[video_path]
|
@@ -125,30 +128,32 @@ def predict(video_input):
|
|
125 |
return result
|
126 |
|
127 |
with gr.Blocks() as demo:
|
128 |
-
gr.
|
129 |
-
gr.
|
130 |
-
|
131 |
-
with gr.Row():
|
132 |
with gr.Column():
|
133 |
-
video_input = gr.Video(label="Upload Video",)
|
|
|
|
|
134 |
examples_original = gr.Examples(
|
135 |
label="Original Videos",
|
136 |
inputs=video_input,
|
137 |
-
examples=
|
138 |
)
|
139 |
examples_deepfake_roop = gr.Examples(
|
140 |
label="Deepfake Roop Videos",
|
141 |
inputs=video_input,
|
142 |
-
examples=
|
143 |
)
|
144 |
examples_deepfake_web = gr.Examples(
|
145 |
label="Deepfake Web Videos",
|
146 |
inputs=video_input,
|
147 |
-
examples=
|
148 |
)
|
149 |
with gr.Column():
|
150 |
output = gr.Textbox(label="Result")
|
151 |
-
predict_button = gr.Button("Predict")
|
152 |
-
|
153 |
predict_button.click(fn=predict, inputs=video_input, outputs=output)
|
154 |
demo.launch()
|
|
|
4 |
import tensorflow as tf
|
5 |
import os
|
6 |
|
7 |
+
# Load the trained models
|
8 |
cnn_model = tf.keras.models.load_model('noq_c_model.h5')
|
9 |
qcnn_model = tf.keras.models.load_model('q_model.h5')
|
10 |
|
|
|
22 |
videos.append(os.path.join(directory, vid))
|
23 |
return videos
|
24 |
|
25 |
+
# Get example videos for each category
|
26 |
+
examples_original = get_video_paths(original_dir)
|
27 |
+
examples_deepfake_roop = get_video_paths(deepfake_roop_dir)
|
28 |
+
examples_deepfake_web = get_video_paths(deepfake_web_dir)
|
|
|
|
|
|
|
29 |
|
30 |
# Map from example video path to label
|
31 |
+
example_videos_dict = {}
|
32 |
+
for vid in examples_original:
|
33 |
+
example_videos_dict[vid] = 'Original'
|
34 |
+
for vid in examples_deepfake_roop:
|
35 |
+
example_videos_dict[vid] = 'DeepfakeRoop'
|
36 |
+
for vid in examples_deepfake_web:
|
37 |
+
example_videos_dict[vid] = 'DeepfakeWeb'
|
38 |
|
39 |
def process_video(video_path, true_label=None):
|
40 |
cap = cv2.VideoCapture(video_path)
|
|
|
44 |
frame_interval = max(int(round(fps / 30)), 1)
|
45 |
frame_count = 0
|
46 |
sampled_frames = []
|
47 |
+
|
48 |
while cap.isOpened():
|
49 |
ret, frame = cap.read()
|
50 |
if not ret:
|
|
|
57 |
sampled_frames.append(normalized_frame)
|
58 |
frame_count += 1
|
59 |
cap.release()
|
60 |
+
|
61 |
cnn_correct = 0
|
62 |
qcnn_correct = 0
|
63 |
cnn_class0 = 0
|
|
|
74 |
cnn_class1 += 1
|
75 |
if true_label is not None and cnn_label == true_label:
|
76 |
cnn_correct += 1
|
77 |
+
|
78 |
qcnn_pred = qcnn_model.predict(frame)
|
79 |
qcnn_label = np.argmax(qcnn_pred)
|
80 |
if qcnn_label == 0:
|
|
|
83 |
qcnn_class1 += 1
|
84 |
if true_label is not None and qcnn_label == true_label:
|
85 |
qcnn_correct += 1
|
86 |
+
|
87 |
if total_frames > 0:
|
88 |
cnn_class0_percent = (cnn_class0 / total_frames) * 100
|
89 |
cnn_class1_percent = (cnn_class1 / total_frames) * 100
|
|
|
91 |
qcnn_class1_percent = (qcnn_class1 / total_frames) * 100
|
92 |
else:
|
93 |
cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0
|
94 |
+
|
95 |
if true_label is not None:
|
96 |
# Calculate accuracy if true_label is provided (example video)
|
97 |
cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
|
|
113 |
video_path = video_input
|
114 |
else:
|
115 |
return "Invalid video input."
|
116 |
+
|
117 |
# Check if video is an example
|
118 |
if video_path in example_videos_dict:
|
119 |
label = example_videos_dict[video_path]
|
|
|
128 |
return result
|
129 |
|
130 |
with gr.Blocks() as demo:
|
131 |
+
gr.Markdown("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
|
132 |
+
gr.Markdown("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")
|
133 |
+
|
134 |
+
with gr.Row(elem_classes="content-container"):
|
135 |
with gr.Column():
|
136 |
+
video_input = gr.Video(label="Upload Video", type="filepath")
|
137 |
+
with gr.Row(elem_classes="control-panel"):
|
138 |
+
pass # Add any controls if needed
|
139 |
examples_original = gr.Examples(
|
140 |
label="Original Videos",
|
141 |
inputs=video_input,
|
142 |
+
examples=examples_original,
|
143 |
)
|
144 |
examples_deepfake_roop = gr.Examples(
|
145 |
label="Deepfake Roop Videos",
|
146 |
inputs=video_input,
|
147 |
+
examples=examples_deepfake_roop,
|
148 |
)
|
149 |
examples_deepfake_web = gr.Examples(
|
150 |
label="Deepfake Web Videos",
|
151 |
inputs=video_input,
|
152 |
+
examples=examples_deepfake_web,
|
153 |
)
|
154 |
with gr.Column():
|
155 |
output = gr.Textbox(label="Result")
|
156 |
+
predict_button = gr.Button("Predict", elem_classes="gr-button")
|
157 |
+
|
158 |
predict_button.click(fn=predict, inputs=video_input, outputs=output)
|
159 |
demo.launch()
|