Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,7 @@
|
|
1 |
-
#
|
2 |
-
|
3 |
-
drive.mount('/content/drive')
|
4 |
-
!pip install deepface==0.0.79 tensorflow==2.10.0 opencv-python-headless==4.7.0.72
|
5 |
|
6 |
import gradio as gr
|
7 |
-
import json
|
8 |
import cv2
|
9 |
import numpy as np
|
10 |
from deepface import DeepFace
|
@@ -12,105 +9,113 @@ import matplotlib.pyplot as plt
|
|
12 |
from PIL import Image
|
13 |
import tempfile
|
14 |
import os
|
15 |
-
import pandas as pd
|
16 |
import shutil
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
def verify_faces(img1, img2, threshold=0.
|
19 |
temp_dir = tempfile.mkdtemp()
|
20 |
-
img1_path = os.path.join(temp_dir, "image1.jpg")
|
21 |
-
img2_path = os.path.join(temp_dir, "image2.jpg")
|
22 |
-
|
23 |
try:
|
24 |
# Save images
|
|
|
|
|
25 |
Image.fromarray(img1).save(img1_path) if isinstance(img1, np.ndarray) else img1.save(img1_path)
|
26 |
Image.fromarray(img2).save(img2_path) if isinstance(img2, np.ndarray) else img2.save(img2_path)
|
27 |
|
28 |
-
# Verify faces
|
29 |
result = DeepFace.verify(
|
30 |
-
img1_path=img1_path,
|
31 |
img2_path=img2_path,
|
32 |
model_name=model,
|
33 |
-
distance_metric="cosine"
|
34 |
-
threshold=threshold
|
35 |
)
|
36 |
-
|
37 |
# Create visualization
|
38 |
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
|
39 |
-
for
|
40 |
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
|
41 |
-
ax[
|
42 |
-
ax[
|
43 |
-
ax[
|
44 |
-
|
45 |
-
confidence = round((1 - result["distance"]) * 100, 2)
|
46 |
-
plt.suptitle(f"{'β
MATCH' if result['verified'] else 'β NO MATCH'}\nConfidence: {confidence}%",
|
47 |
-
fontsize=14, y=1.05)
|
48 |
|
|
|
|
|
49 |
return fig, result
|
50 |
|
51 |
except Exception as e:
|
52 |
return None, {"error": str(e)}
|
53 |
-
|
54 |
finally:
|
55 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
56 |
|
57 |
-
def find_faces(query_img,
|
58 |
temp_dir = tempfile.mkdtemp()
|
59 |
-
query_path = os.path.join(temp_dir, "query.jpg")
|
60 |
-
|
61 |
try:
|
62 |
# Save query image
|
|
|
63 |
Image.fromarray(query_img).save(query_path) if isinstance(query_img, np.ndarray) else query_img.save(query_path)
|
64 |
|
65 |
-
# Handle database
|
66 |
-
if isinstance(
|
67 |
-
db_path =
|
68 |
else:
|
69 |
db_path = os.path.join(temp_dir, "db")
|
70 |
os.makedirs(db_path, exist_ok=True)
|
71 |
-
for i, file in enumerate(
|
72 |
ext = os.path.splitext(file.name)[1]
|
73 |
shutil.copy(file.name, os.path.join(db_path, f"img_{i}{ext}"))
|
74 |
|
75 |
-
# Find faces
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
86 |
df = dfs[0] if isinstance(dfs, list) else dfs
|
87 |
df = df[df['distance'] <= threshold].sort_values('distance')
|
88 |
-
|
89 |
# Create visualization
|
90 |
-
|
91 |
-
axes
|
92 |
-
axes[0].set_title("Query Image")
|
93 |
-
|
94 |
-
for idx, (_, row) in enumerate(df.head(3).iterrows()):
|
95 |
-
if idx >= len(axes)-1: break
|
96 |
-
match_img = cv2.cvtColor(cv2.imread(row['identity']), cv2.COLOR_BGR2RGB)
|
97 |
-
axes[idx+1].imshow(match_img)
|
98 |
-
axes[idx+1].set_title(f"Match {idx+1}\n{row['distance']:.2f}")
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
return fig, df[['identity', 'distance']].to_dict('records')
|
101 |
|
102 |
except Exception as e:
|
103 |
return None, {"error": str(e)}
|
104 |
-
|
105 |
finally:
|
106 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
107 |
|
108 |
-
def analyze_face(img, actions=['age', 'gender', '
|
109 |
temp_dir = tempfile.mkdtemp()
|
110 |
-
img_path = os.path.join(temp_dir, "analyze.jpg")
|
111 |
-
|
112 |
try:
|
113 |
# Save image
|
|
|
114 |
Image.fromarray(img).save(img_path) if isinstance(img, np.ndarray) else img.save(img_path)
|
115 |
|
116 |
# Analyze face
|
@@ -120,80 +125,85 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
|
|
120 |
enforce_detection=False,
|
121 |
detector_backend='opencv'
|
122 |
)
|
123 |
-
|
124 |
# Process results
|
125 |
results = results if isinstance(results, list) else [results]
|
126 |
fig = plt.figure(figsize=(10, 5))
|
127 |
|
128 |
-
#
|
129 |
plt.subplot(121)
|
130 |
-
|
|
|
131 |
plt.title("Input Image")
|
132 |
plt.axis('off')
|
133 |
-
|
134 |
-
#
|
135 |
plt.subplot(122)
|
136 |
-
|
137 |
-
plt.barh(list(
|
138 |
plt.title("Analysis Results")
|
139 |
-
|
|
|
140 |
return fig, results
|
141 |
|
142 |
except Exception as e:
|
143 |
return None, {"error": str(e)}
|
144 |
-
|
145 |
finally:
|
146 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
147 |
|
148 |
-
# Gradio
|
149 |
-
with gr.Blocks(title="Face
|
150 |
-
gr.Markdown("#
|
151 |
|
152 |
with gr.Tabs():
|
153 |
with gr.Tab("Verify Faces"):
|
154 |
-
gr.Markdown("## Compare two faces")
|
155 |
with gr.Row():
|
156 |
-
img1 = gr.Image(
|
157 |
-
img2 = gr.Image(
|
158 |
-
|
159 |
-
|
160 |
-
verify_btn = gr.Button("
|
161 |
-
|
162 |
-
|
163 |
|
164 |
verify_btn.click(
|
165 |
-
verify_faces,
|
166 |
-
[img1, img2,
|
167 |
-
[
|
168 |
)
|
169 |
-
|
170 |
with gr.Tab("Find Faces"):
|
171 |
-
gr.
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
178 |
|
179 |
find_btn.click(
|
180 |
-
find_faces,
|
181 |
-
[
|
182 |
-
[
|
183 |
)
|
184 |
-
|
185 |
-
|
186 |
with gr.Tab("Analyze Face"):
|
187 |
-
gr.
|
188 |
-
|
|
|
|
|
|
|
|
|
189 |
analyze_btn = gr.Button("Analyze")
|
190 |
-
|
191 |
-
|
192 |
|
193 |
analyze_btn.click(
|
194 |
-
analyze_face,
|
195 |
-
[
|
196 |
-
[
|
197 |
)
|
198 |
|
199 |
demo.launch()
|
|
|
1 |
+
# Installation commands (run these first in Colab)
|
2 |
+
# !pip install deepface==0.0.79 tensorflow==2.10.0 opencv-python-headless==4.7.0.72 gradio==3.50.2
|
|
|
|
|
3 |
|
4 |
import gradio as gr
|
|
|
5 |
import cv2
|
6 |
import numpy as np
|
7 |
from deepface import DeepFace
|
|
|
9 |
from PIL import Image
|
10 |
import tempfile
|
11 |
import os
|
|
|
12 |
import shutil
|
13 |
+
import pandas as pd
|
14 |
+
|
15 |
+
# Google Drive integration (for Colab)
|
16 |
+
try:
|
17 |
+
from google.colab import drive
|
18 |
+
drive.mount('/content/drive')
|
19 |
+
except:
|
20 |
+
pass
|
21 |
|
22 |
+
def verify_faces(img1, img2, threshold=0.6, model="VGG-Face"):
|
23 |
temp_dir = tempfile.mkdtemp()
|
|
|
|
|
|
|
24 |
try:
|
25 |
# Save images
|
26 |
+
img1_path = os.path.join(temp_dir, "img1.jpg")
|
27 |
+
img2_path = os.path.join(temp_dir, "img2.jpg")
|
28 |
Image.fromarray(img1).save(img1_path) if isinstance(img1, np.ndarray) else img1.save(img1_path)
|
29 |
Image.fromarray(img2).save(img2_path) if isinstance(img2, np.ndarray) else img2.save(img2_path)
|
30 |
|
31 |
+
# Verify faces
|
32 |
result = DeepFace.verify(
|
33 |
+
img1_path=img1_path,
|
34 |
img2_path=img2_path,
|
35 |
model_name=model,
|
36 |
+
distance_metric="cosine"
|
|
|
37 |
)
|
38 |
+
|
39 |
# Create visualization
|
40 |
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
|
41 |
+
for i, path in enumerate([img1_path, img2_path]):
|
42 |
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
|
43 |
+
ax[i].imshow(img)
|
44 |
+
ax[i].axis('off')
|
45 |
+
ax[i].set_title(f"Image {i+1}")
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
verified = result['distance'] <= threshold
|
48 |
+
plt.suptitle(f"{'β
MATCH' if verified else 'β NO MATCH'}\nDistance: {result['distance']:.4f}")
|
49 |
return fig, result
|
50 |
|
51 |
except Exception as e:
|
52 |
return None, {"error": str(e)}
|
|
|
53 |
finally:
|
54 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
55 |
|
56 |
+
def find_faces(query_img, db_input, threshold=0.6, model="VGG-Face"):
|
57 |
temp_dir = tempfile.mkdtemp()
|
|
|
|
|
58 |
try:
|
59 |
# Save query image
|
60 |
+
query_path = os.path.join(temp_dir, "query.jpg")
|
61 |
Image.fromarray(query_img).save(query_path) if isinstance(query_img, np.ndarray) else query_img.save(query_path)
|
62 |
|
63 |
+
# Handle database input
|
64 |
+
if isinstance(db_input, str):
|
65 |
+
db_path = db_input
|
66 |
else:
|
67 |
db_path = os.path.join(temp_dir, "db")
|
68 |
os.makedirs(db_path, exist_ok=True)
|
69 |
+
for i, file in enumerate(db_input):
|
70 |
ext = os.path.splitext(file.name)[1]
|
71 |
shutil.copy(file.name, os.path.join(db_path, f"img_{i}{ext}"))
|
72 |
|
73 |
+
# Find faces
|
74 |
+
try:
|
75 |
+
dfs = DeepFace.find(
|
76 |
+
img_path=query_path,
|
77 |
+
db_path=db_path,
|
78 |
+
model_name=model,
|
79 |
+
distance_metric="cosine",
|
80 |
+
silent=True
|
81 |
+
)
|
82 |
+
except:
|
83 |
+
return None, {"error": "No faces found in database"}
|
84 |
+
|
85 |
df = dfs[0] if isinstance(dfs, list) else dfs
|
86 |
df = df[df['distance'] <= threshold].sort_values('distance')
|
87 |
+
|
88 |
# Create visualization
|
89 |
+
num_matches = min(4, len(df))
|
90 |
+
fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
+
# Show query image
|
93 |
+
query_img = cv2.cvtColor(cv2.imread(query_path), cv2.COLOR_BGR2RGB)
|
94 |
+
axes[0].imshow(query_img)
|
95 |
+
axes[0].set_title("Query")
|
96 |
+
axes[0].axis('off')
|
97 |
+
|
98 |
+
# Show matches
|
99 |
+
for i in range(num_matches):
|
100 |
+
if i >= len(df): break
|
101 |
+
match_path = df.iloc[i]['identity']
|
102 |
+
match_img = cv2.cvtColor(cv2.imread(match_path), cv2.COLOR_BGR2RGB)
|
103 |
+
axes[i+1].imshow(match_img)
|
104 |
+
axes[i+1].set_title(f"Match {i+1}\n{df.iloc[i]['distance']:.4f}")
|
105 |
+
axes[i+1].axis('off')
|
106 |
+
|
107 |
return fig, df[['identity', 'distance']].to_dict('records')
|
108 |
|
109 |
except Exception as e:
|
110 |
return None, {"error": str(e)}
|
|
|
111 |
finally:
|
112 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
113 |
|
114 |
+
def analyze_face(img, actions=['age', 'gender', 'emotion']):
|
115 |
temp_dir = tempfile.mkdtemp()
|
|
|
|
|
116 |
try:
|
117 |
# Save image
|
118 |
+
img_path = os.path.join(temp_dir, "analyze.jpg")
|
119 |
Image.fromarray(img).save(img_path) if isinstance(img, np.ndarray) else img.save(img_path)
|
120 |
|
121 |
# Analyze face
|
|
|
125 |
enforce_detection=False,
|
126 |
detector_backend='opencv'
|
127 |
)
|
128 |
+
|
129 |
# Process results
|
130 |
results = results if isinstance(results, list) else [results]
|
131 |
fig = plt.figure(figsize=(10, 5))
|
132 |
|
133 |
+
# Show image
|
134 |
plt.subplot(121)
|
135 |
+
img_display = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
|
136 |
+
plt.imshow(img_display)
|
137 |
plt.title("Input Image")
|
138 |
plt.axis('off')
|
139 |
+
|
140 |
+
# Show attributes
|
141 |
plt.subplot(122)
|
142 |
+
attributes = {k: v for res in results for k, v in res.items() if k != 'region'}
|
143 |
+
plt.barh(list(attributes.keys()), list(attributes.values()))
|
144 |
plt.title("Analysis Results")
|
145 |
+
plt.tight_layout()
|
146 |
+
|
147 |
return fig, results
|
148 |
|
149 |
except Exception as e:
|
150 |
return None, {"error": str(e)}
|
|
|
151 |
finally:
|
152 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
153 |
|
154 |
+
# Gradio Interface
|
155 |
+
with gr.Blocks(title="Face Recognition Toolkit", theme=gr.themes.Soft()) as demo:
|
156 |
+
gr.Markdown("# π§π» Face Recognition Toolkit")
|
157 |
|
158 |
with gr.Tabs():
|
159 |
with gr.Tab("Verify Faces"):
|
|
|
160 |
with gr.Row():
|
161 |
+
img1 = gr.Image(label="First Image", type="pil")
|
162 |
+
img2 = gr.Image(label="Second Image", type="pil")
|
163 |
+
verify_threshold = gr.Slider(0.1, 1.0, 0.6, label="Match Threshold")
|
164 |
+
verify_model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace"], value="VGG-Face")
|
165 |
+
verify_btn = gr.Button("Verify Faces")
|
166 |
+
verify_output = gr.Plot()
|
167 |
+
verify_json = gr.JSON()
|
168 |
|
169 |
verify_btn.click(
|
170 |
+
verify_faces,
|
171 |
+
[img1, img2, verify_threshold, verify_model],
|
172 |
+
[verify_output, verify_json]
|
173 |
)
|
174 |
+
|
175 |
with gr.Tab("Find Faces"):
|
176 |
+
query_img = gr.Image(label="Query Image", type="pil")
|
177 |
+
db_input = gr.Textbox("/content/drive/MyDrive/db", label="Database Path")
|
178 |
+
db_files = gr.File(file_count="multiple", label="Or Upload Images")
|
179 |
+
find_threshold = gr.Slider(0.1, 1.0, 0.6, label="Similarity Threshold")
|
180 |
+
find_model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace"], value="VGG-Face")
|
181 |
+
find_btn = gr.Button("Find Matches")
|
182 |
+
find_output = gr.Plot()
|
183 |
+
find_json = gr.JSON()
|
184 |
|
185 |
find_btn.click(
|
186 |
+
find_faces,
|
187 |
+
[query_img, db_input, find_threshold, find_model],
|
188 |
+
[find_output, find_json]
|
189 |
)
|
190 |
+
db_files.change(lambda x: None, db_files, db_input)
|
191 |
+
|
192 |
with gr.Tab("Analyze Face"):
|
193 |
+
analyze_img = gr.Image(label="Input Image", type="pil")
|
194 |
+
analyze_actions = gr.CheckboxGroup(
|
195 |
+
["age", "gender", "emotion", "race"],
|
196 |
+
value=["age", "gender", "emotion"],
|
197 |
+
label="Analysis Features"
|
198 |
+
)
|
199 |
analyze_btn = gr.Button("Analyze")
|
200 |
+
analyze_output = gr.Plot()
|
201 |
+
analyze_json = gr.JSON()
|
202 |
|
203 |
analyze_btn.click(
|
204 |
+
analyze_face,
|
205 |
+
[analyze_img, analyze_actions],
|
206 |
+
[analyze_output, analyze_json]
|
207 |
)
|
208 |
|
209 |
demo.launch()
|