AI-RESEARCHER-2024 commited on
Commit
002641d
·
verified ·
1 Parent(s): 47e4eb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -97
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
- import pickle
5
  import os
6
- from PIL import Image
7
 
 
 
 
8
 
9
  # Directories containing example videos
10
  examples_dir = 'examples'
@@ -13,16 +15,25 @@ deepfake_roop_dir = os.path.join(examples_dir, 'DeepfakeRoop')
13
  deepfake_web_dir = os.path.join(examples_dir, 'DeepfakeWeb')
14
 
15
  # Function to get video paths from a directory
16
- def get_video_paths(directory):
17
- return [os.path.join(directory, vid) for vid in os.listdir(directory) if vid.endswith('.mp4')]
 
 
 
 
18
 
19
  # Get video paths for each category
20
- original_videos = get_video_paths(original_dir)
21
- deepfake_roop_videos = get_video_paths(deepfake_roop_dir)
22
- deepfake_web_videos = get_video_paths(deepfake_web_dir)
23
 
24
- # Function to process video and calculate accuracies
25
- def process_video(video_path, true_label):
 
 
 
 
 
26
  cap = cv2.VideoCapture(video_path)
27
  fps = cap.get(cv2.CAP_PROP_FPS)
28
  if fps == 0 or np.isnan(fps):
@@ -46,109 +57,89 @@ def process_video(video_path, true_label):
46
 
47
  cnn_correct = 0
48
  qcnn_correct = 0
 
 
 
 
49
  total_frames = len(sampled_frames)
50
  for frame in sampled_frames:
51
  cnn_pred = cnn_model.predict(frame)
52
  cnn_label = np.argmax(cnn_pred)
53
- if cnn_label == true_label:
 
 
 
 
54
  cnn_correct += 1
 
55
  qcnn_pred = qcnn_model.predict(frame)
56
  qcnn_label = np.argmax(qcnn_pred)
57
- if qcnn_label == true_label:
 
 
 
 
58
  qcnn_correct += 1
59
 
60
- cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
61
- qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
62
- result_label = "Original" if true_label == 0 else "Deepfake"
 
 
 
 
63
 
64
- result = f"Result: {result_label}\n"
65
- result += f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
66
- result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
 
 
 
 
 
 
 
67
  return result
68
 
69
- # Prediction functions for each step
70
- def predict_step1(video_file):
71
- if video_file is None:
72
- return "Please select a video."
73
- return process_video(video_file, true_label=0)
74
-
75
- def predict_step2(video_file):
76
- if video_file is None:
77
- return "Please select a video."
78
- return process_video(video_file, true_label=1)
79
-
80
- def predict_step3(video_file):
81
- if video_file is None:
82
- return "Please select a video."
83
- return process_video(video_file, true_label=1)
 
 
 
 
 
 
 
84
 
85
- # Create Gradio interface
86
  with gr.Blocks() as demo:
87
- # Centered title
88
- gr.HTML("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
89
- gr.HTML("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")
90
-
91
- # Step 1: Original Videos
92
- gr.Markdown("## Step 1: Select an Original Video")
93
- with gr.Row():
94
- with gr.Column():
95
- original_video = gr.Dropdown(
96
- choices=original_videos,
97
- label="Select Original Video",
98
- interactive=True
99
- )
100
- predict_button1 = gr.Button("Predict")
101
- with gr.Column():
102
- output1 = gr.Textbox(label="Result")
103
- predict_button1.click(fn=predict_step1, inputs=original_video, outputs=output1)
104
-
105
- # Step 2: Deepfake Videos from DeepfakeWeb
106
- gr.Markdown("## Step 2: Select a Deepfake Video Generated Using DeepfakeWeb")
107
- with gr.Row():
108
- with gr.Column():
109
- deepfake_web_video = gr.Dropdown(
110
- choices=deepfake_web_videos,
111
- label="Select DeepfakeWeb Video",
112
- interactive=True
113
- )
114
- predict_button2 = gr.Button("Predict")
115
- with gr.Column():
116
- output2 = gr.Textbox(label="Result")
117
- predict_button2.click(fn=predict_step2, inputs=deepfake_web_video, outputs=output2)
118
-
119
- # Step 3: Deepfake Videos from Roop Method
120
- gr.Markdown("## Step 3: Select a Deepfake Video Generated Using the Roop Method")
121
  with gr.Row():
122
  with gr.Column():
123
- deepfake_roop_video = gr.Dropdown(
124
- choices=deepfake_roop_videos,
125
- label="Select DeepfakeRoop Video",
126
- interactive=True
 
127
  )
128
- predict_button3 = gr.Button("Predict")
129
  with gr.Column():
130
- output3 = gr.Textbox(label="Result")
131
- predict_button3.click(fn=predict_step3, inputs=deepfake_roop_video, outputs=output3)
132
-
133
- # Examples section mimicking the design of the provided application
134
- gr.Markdown("## Examples")
135
- gr.Markdown("### Original Videos")
136
- gr.Examples(
137
- examples=original_videos,
138
- inputs=original_video,
139
- label="Original Video Examples"
140
- )
141
- gr.Markdown("### DeepfakeWeb Videos")
142
- gr.Examples(
143
- examples=deepfake_web_videos,
144
- inputs=deepfake_web_video,
145
- label="DeepfakeWeb Video Examples"
146
- )
147
- gr.Markdown("### DeepfakeRoop Videos")
148
- gr.Examples(
149
- examples=deepfake_roop_videos,
150
- inputs=deepfake_roop_video,
151
- label="DeepfakeRoop Video Examples"
152
- )
153
-
154
- demo.launch()
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
+ import tensorflow as tf
5
  import os
 
6
 
7
+ # Load the trained models using Keras
8
+ cnn_model = tf.keras.models.load_model('cnn_model.h5')
9
+ qcnn_model = tf.keras.models.load_model('qcnn_model.h5')
10
 
11
  # Directories containing example videos
12
  examples_dir = 'examples'
 
15
  deepfake_web_dir = os.path.join(examples_dir, 'DeepfakeWeb')
16
 
17
  # Function to get video paths from a directory
18
+ def get_video_paths(directory, label):
19
+ videos = []
20
+ for vid in os.listdir(directory):
21
+ if vid.endswith('.mp4'):
22
+ videos.append({'path': os.path.join(directory, vid), 'label': label})
23
+ return videos
24
 
25
  # Get video paths for each category
26
+ original_videos = get_video_paths(original_dir, 'Original')
27
+ deepfake_roop_videos = get_video_paths(deepfake_roop_dir, 'DeepfakeRoop')
28
+ deepfake_web_videos = get_video_paths(deepfake_web_dir, 'DeepfakeWeb')
29
 
30
+ # Combine all examples
31
+ examples = original_videos + deepfake_roop_videos + deepfake_web_videos
32
+
33
+ # Map from example video path to label
34
+ example_videos_dict = {example['path']: example['label'] for example in examples}
35
+
36
+ def process_video(video_path, true_label=None):
37
  cap = cv2.VideoCapture(video_path)
38
  fps = cap.get(cv2.CAP_PROP_FPS)
39
  if fps == 0 or np.isnan(fps):
 
57
 
58
  cnn_correct = 0
59
  qcnn_correct = 0
60
+ cnn_class0 = 0
61
+ cnn_class1 = 0
62
+ qcnn_class0 = 0
63
+ qcnn_class1 = 0
64
  total_frames = len(sampled_frames)
65
  for frame in sampled_frames:
66
  cnn_pred = cnn_model.predict(frame)
67
  cnn_label = np.argmax(cnn_pred)
68
+ if cnn_label == 0:
69
+ cnn_class0 += 1
70
+ else:
71
+ cnn_class1 += 1
72
+ if true_label is not None and cnn_label == true_label:
73
  cnn_correct += 1
74
+
75
  qcnn_pred = qcnn_model.predict(frame)
76
  qcnn_label = np.argmax(qcnn_pred)
77
+ if qcnn_label == 0:
78
+ qcnn_class0 += 1
79
+ else:
80
+ qcnn_class1 += 1
81
+ if true_label is not None and qcnn_label == true_label:
82
  qcnn_correct += 1
83
 
84
+ if total_frames > 0:
85
+ cnn_class0_percent = (cnn_class0 / total_frames) * 100
86
+ cnn_class1_percent = (cnn_class1 / total_frames) * 100
87
+ qcnn_class0_percent = (qcnn_class0 / total_frames) * 100
88
+ qcnn_class1_percent = (qcnn_class1 / total_frames) * 100
89
+ else:
90
+ cnn_class0_percent = cnn_class1_percent = qcnn_class0_percent = qcnn_class1_percent = 0
91
 
92
+ if true_label is not None:
93
+ # Calculate accuracy if true_label is provided (example video)
94
+ cnn_accuracy = (cnn_correct / total_frames) * 100 if total_frames > 0 else 0
95
+ qcnn_accuracy = (qcnn_correct / total_frames) * 100 if total_frames > 0 else 0
96
+ result = f"CNN Model Accuracy: {cnn_accuracy:.2f}%\n"
97
+ result += f"QCNN Model Accuracy: {qcnn_accuracy:.2f}%"
98
+ else:
99
+ # Display percent of frames classified from each class
100
+ result = f"CNN Model Predictions:\nClass 0: {cnn_class0_percent:.2f}%\nClass 1: {cnn_class1_percent:.2f}%\n"
101
+ result += f"QCNN Model Predictions:\nClass 0: {qcnn_class0_percent:.2f}%\nClass 1: {qcnn_class1_percent:.2f}%"
102
  return result
103
 
104
+ def predict(video_input):
105
+ if video_input is None:
106
+ return "Please upload a video or select an example."
107
+ if isinstance(video_input, dict):
108
+ video_path = video_input['name']
109
+ elif isinstance(video_input, str):
110
+ video_path = video_input
111
+ else:
112
+ return "Invalid video input."
113
+
114
+ # Check if video is an example
115
+ if video_path in example_videos_dict:
116
+ label = example_videos_dict[video_path]
117
+ if label == 'Original':
118
+ true_label = 0
119
+ else:
120
+ true_label = 1
121
+ result = process_video(video_path, true_label=true_label)
122
+ result = f"Example Video Detected ({label})\n" + result
123
+ else:
124
+ result = process_video(video_path)
125
+ return result
126
 
 
127
  with gr.Blocks() as demo:
128
+ gr.Markdown("<h1 style='text-align: center;'>Quanvolutional Neural Networks for Deepfake Detection</h1>")
129
+ gr.Markdown("<h2 style='text-align: center;'>Steven Fernandes, Ph.D.</h2>")
130
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  with gr.Row():
132
  with gr.Column():
133
+ video_input = gr.Video(label="Upload Video or Select an Example", type="filepath")
134
+ gr.Examples(
135
+ examples=[example['path'] for example in examples],
136
+ inputs=video_input,
137
+ label="Examples"
138
  )
139
+ predict_button = gr.Button("Predict")
140
  with gr.Column():
141
+ output = gr.Textbox(label="Result")
142
+
143
+ predict_button.click(fn=predict, inputs=video_input, outputs=output)
144
+
145
+ demo.launch()