Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import pickle
|
5 |
+
import os
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
|
9 |
+
# Directories containing example videos
|
10 |
+
examples_dir = 'examples'
|
11 |
+
original_dir = os.path.join(examples_dir, 'Original')
|
12 |
+
deepfake_roop_dir = os.path.join(examples_dir, 'DeepfakeRoop')
|
13 |
+
deepfake_web_dir = os.path.join(examples_dir, 'DeepfakeWeb')
|
14 |
+
|
15 |
+
# Function to get video paths from a directory
|
16 |
+
def get_video_paths(directory):
|
17 |
+
return [os.path.join(directory, vid) for vid in os.listdir(directory) if vid.endswith('.mp4')]
|
18 |
+
|
19 |
+
# Get video paths for each category
|
20 |
+
original_videos = get_video_paths(original_dir)
|
21 |
+
deepfake_roop_videos = get_video_paths(deepfake_roop_dir)
|
22 |
+
deepfake_web_videos = get_video_paths(deepfake_web_dir)
|
23 |
+
|
24 |
+
# Function to process video and calculate accuracies
|
25 |
+
def process_video(video_path, true_label):
|
26 |
+
cap = cv2.VideoCapture(video_path)
|
27 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
28 |
+
if fps == 0 or np.isnan(fps):
|
29 |
+
fps = 30
|
30 |
+
frame_interval = max(int(round(fps / 30)), 1)
|
31 |
+
frame_count = 0
|
32 |
+
sampled_frames = []
|
33 |
+
|
34 |
+
while cap.isOpened():
|
35 |
+
ret, frame = cap.read()
|
36 |
+
if not ret:
|
37 |
+
break
|
38 |
+
if frame_count % frame_interval == 0:
|
39 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
40 |
+
resized_frame = cv2.resize(gray_frame, (28, 28))
|
41 |
+
normalized_frame = resized_frame / 255.0
|
42 |
+
normalized_frame = normalized_frame.reshape(1, 28, 28, 1)
|
43 |
+
sampled_frames.append(normalized_frame)
|
44 |
+
frame_count += 1
|
45 |
+
cap.release()
|
46 |
+
|
47 |
+
cnn_correct = 0
|
48 |
+
qcnn_correct = 0
|
49 |
+
total_frames = len(sampled_frames)
|
50 |
+
for frame in sampled_frames:
|
51 |
+
cnn_pred = cnn_model.predict(frame)
|
52 |
+
cnn_label = np.argmax(cnn_pred)
|
53 |
+
if cnn_label == true_label:
|
54 |
+
cnn_correct += 1
|
55 |
+
qcnn_pred = qcnn_model.predict(frame)
|
56 |
+
qcnn_label = np.argmax(qcnn_pred)
|
57 |
+
if qcnn_label == true_label:
|
58 |
+
qcnn_correct += 1
|
59 |
+
|
60 |
+
cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
61 |
+
qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
|
62 |
+
result_label = "Original" if true_label == 0 else "Deepfake"
|
63 |
+
|
64 |
+
result = f"Result: {result_label}\n"
|
65 |
+
result += f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
|
66 |
+
result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
|
67 |
+
return result
|
68 |
+
|
69 |
+
# Prediction functions for each step
|
70 |
+
def predict_step1(video_file):
|
71 |
+
if video_file is None:
|
72 |
+
return "Please select a video."
|
73 |
+
return process_video(video_file, true_label=0)
|
74 |
+
|
75 |
+
def predict_step2(video_file):
|
76 |
+
if video_file is None:
|
77 |
+
return "Please select a video."
|
78 |
+
return process_video(video_file, true_label=1)
|
79 |
+
|
80 |
+
def predict_step3(video_file):
|
81 |
+
if video_file is None:
|
82 |
+
return "Please select a video."
|
83 |
+
return process_video(video_file, true_label=1)
|
84 |
+
|
85 |
+
# Create Gradio interface
|
86 |
+
with gr.Blocks() as demo:
|
87 |
+
# Centered title
|
88 |
+
gr.HTML("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
|
89 |
+
gr.HTML("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")
|
90 |
+
|
91 |
+
# Step 1: Original Videos
|
92 |
+
gr.Markdown("## Step 1: Select an Original Video")
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column():
|
95 |
+
original_video = gr.Dropdown(
|
96 |
+
choices=original_videos,
|
97 |
+
label="Select Original Video",
|
98 |
+
interactive=True
|
99 |
+
)
|
100 |
+
predict_button1 = gr.Button("Predict")
|
101 |
+
with gr.Column():
|
102 |
+
output1 = gr.Textbox(label="Result")
|
103 |
+
predict_button1.click(fn=predict_step1, inputs=original_video, outputs=output1)
|
104 |
+
|
105 |
+
# Step 2: Deepfake Videos from DeepfakeWeb
|
106 |
+
gr.Markdown("## Step 2: Select a Deepfake Video Generated Using DeepfakeWeb")
|
107 |
+
with gr.Row():
|
108 |
+
with gr.Column():
|
109 |
+
deepfake_web_video = gr.Dropdown(
|
110 |
+
choices=deepfake_web_videos,
|
111 |
+
label="Select DeepfakeWeb Video",
|
112 |
+
interactive=True
|
113 |
+
)
|
114 |
+
predict_button2 = gr.Button("Predict")
|
115 |
+
with gr.Column():
|
116 |
+
output2 = gr.Textbox(label="Result")
|
117 |
+
predict_button2.click(fn=predict_step2, inputs=deepfake_web_video, outputs=output2)
|
118 |
+
|
119 |
+
# Step 3: Deepfake Videos from Roop Method
|
120 |
+
gr.Markdown("## Step 3: Select a Deepfake Video Generated Using the Roop Method")
|
121 |
+
with gr.Row():
|
122 |
+
with gr.Column():
|
123 |
+
deepfake_roop_video = gr.Dropdown(
|
124 |
+
choices=deepfake_roop_videos,
|
125 |
+
label="Select DeepfakeRoop Video",
|
126 |
+
interactive=True
|
127 |
+
)
|
128 |
+
predict_button3 = gr.Button("Predict")
|
129 |
+
with gr.Column():
|
130 |
+
output3 = gr.Textbox(label="Result")
|
131 |
+
predict_button3.click(fn=predict_step3, inputs=deepfake_roop_video, outputs=output3)
|
132 |
+
|
133 |
+
# Examples section mimicking the design of the provided application
|
134 |
+
gr.Markdown("## Examples")
|
135 |
+
gr.Markdown("### Original Videos")
|
136 |
+
gr.Examples(
|
137 |
+
examples=original_videos,
|
138 |
+
inputs=original_video,
|
139 |
+
label="Original Video Examples"
|
140 |
+
)
|
141 |
+
gr.Markdown("### DeepfakeWeb Videos")
|
142 |
+
gr.Examples(
|
143 |
+
examples=deepfake_web_videos,
|
144 |
+
inputs=deepfake_web_video,
|
145 |
+
label="DeepfakeWeb Video Examples"
|
146 |
+
)
|
147 |
+
gr.Markdown("### DeepfakeRoop Videos")
|
148 |
+
gr.Examples(
|
149 |
+
examples=deepfake_roop_videos,
|
150 |
+
inputs=deepfake_roop_video,
|
151 |
+
label="DeepfakeRoop Video Examples"
|
152 |
+
)
|
153 |
+
|
154 |
+
demo.launch()
|