Natwar commited on
Commit
ef07f3f
Β·
verified Β·
1 Parent(s): f333914

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +491 -0
app.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Install required packages
2
+ import subprocess
3
+ import sys
4
+
5
+ # Check if running in a standard environment (not Colab/Jupyter)
6
+ # and install packages if needed
7
+ if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
8
+ try:
9
+ import gradio
10
+ import deepface
11
+ import cv2
12
+ import numpy as np
13
+ import matplotlib
14
+ import PIL
15
+ except ImportError:
16
+ print("Installing required packages...")
17
+ subprocess.check_call([sys.executable, "-m", "pip", "install",
18
+ "gradio", "deepface", "opencv-python-headless", "numpy", "matplotlib", "pillow"])
19
+
20
+ import gradio as gr
21
+ import json
22
+ import cv2
23
+ import numpy as np
24
+ from deepface import DeepFace
25
+ import matplotlib.pyplot as plt
26
+ from PIL import Image
27
+ import tempfile
28
+ import os
29
+ import pandas as pd
30
+ import shutil
31
+
32
+ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
33
+ # Save uploaded images to temporary files
34
+ temp_dir = tempfile.mkdtemp()
35
+ img1_path = os.path.join(temp_dir, "image1.jpg")
36
+ img2_path = os.path.join(temp_dir, "image2.jpg")
37
+
38
+ # Convert to PIL Images and save
39
+ if isinstance(img1, np.ndarray):
40
+ Image.fromarray(img1).save(img1_path)
41
+ else:
42
+ img1.save(img1_path)
43
+
44
+ if isinstance(img2, np.ndarray):
45
+ Image.fromarray(img2).save(img2_path)
46
+ else:
47
+ img2.save(img2_path)
48
+
49
+ # Perform face verification
50
+ try:
51
+ result = DeepFace.verify(
52
+ img1_path=img1_path,
53
+ img2_path=img2_path,
54
+ model_name=model,
55
+ distance_metric="cosine",
56
+ threshold=threshold
57
+ )
58
+
59
+ # Create comparison visualization
60
+ fig, ax = plt.subplots(1, 2, figsize=(10, 5))
61
+
62
+ # Display images
63
+ img1_display = cv2.imread(img1_path)
64
+ img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
65
+ img2_display = cv2.imread(img2_path)
66
+ img2_display = cv2.cvtColor(img2_display, cv2.COLOR_BGR2RGB)
67
+
68
+ ax[0].imshow(img1_display)
69
+ ax[0].set_title("Image 1")
70
+ ax[0].axis("off")
71
+
72
+ ax[1].imshow(img2_display)
73
+ ax[1].set_title("Image 2")
74
+ ax[1].axis("off")
75
+
76
+ # Create result message
77
+ verification_result = "βœ… FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
78
+ confidence = round((1 - result["distance"]) * 100, 2)
79
+
80
+ plt.suptitle(f"{verification_result}\nConfidence: {confidence}%\nDistance: {result['distance']:.4f}",
81
+ fontsize=16, fontweight='bold',
82
+ color='green' if result["verified"] else 'red')
83
+
84
+ plt.tight_layout()
85
+
86
+ # Clean up temporary files
87
+ os.remove(img1_path)
88
+ os.remove(img2_path)
89
+ os.rmdir(temp_dir)
90
+
91
+ return fig, json.dumps(result, indent=2)
92
+
93
+ except Exception as e:
94
+ # Clean up temporary files
95
+ if os.path.exists(img1_path):
96
+ os.remove(img1_path)
97
+ if os.path.exists(img2_path):
98
+ os.remove(img2_path)
99
+ if os.path.exists(temp_dir):
100
+ os.rmdir(temp_dir)
101
+
102
+ error_msg = f"Error: {str(e)}"
103
+ if "No face detected" in str(e):
104
+ error_msg = "No face detected in one or both images. Please try different images."
105
+
106
+ return None, error_msg
107
+
108
+ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
109
+ # Create temp directory
110
+ temp_dir = tempfile.mkdtemp()
111
+ query_path = os.path.join(temp_dir, "query.jpg")
112
+
113
+ # Save query image
114
+ if isinstance(query_img, np.ndarray):
115
+ Image.fromarray(query_img).save(query_path)
116
+ else:
117
+ query_img.save(query_path)
118
+
119
+ # If db_folder is just a string, assume it's a folder path the user entered
120
+ if isinstance(db_folder, str):
121
+ db_path = db_folder
122
+ else:
123
+ # Handling for folder upload (creates a temp directory with the images)
124
+ db_path = os.path.join(temp_dir, "db")
125
+ os.makedirs(db_path, exist_ok=True)
126
+
127
+ for i, file in enumerate(db_folder):
128
+ file_ext = os.path.splitext(file.name)[1]
129
+ shutil.copy(file.name, os.path.join(db_path, f"image_{i}{file_ext}"))
130
+
131
+ try:
132
+ # Find matching faces
133
+ dfs = DeepFace.find(
134
+ img_path=query_path,
135
+ db_path=db_path,
136
+ model_name=model,
137
+ distance_metric="cosine",
138
+ threshold=threshold
139
+ )
140
+
141
+ if isinstance(dfs, list):
142
+ # Handle case where multiple faces are found in query image
143
+ if len(dfs) == 0:
144
+ return None, "No matching faces found in the database."
145
+ df = dfs[0] # Take first face results
146
+ else:
147
+ df = dfs
148
+
149
+ # Check if any matches were found
150
+ if df.empty:
151
+ return None, "No matching faces found in the database."
152
+
153
+ # Sort by similarity (lowest distance first)
154
+ df = df.sort_values(by=["distance"])
155
+
156
+ # Create visualization for top matches (up to 4)
157
+ num_matches = min(4, len(df))
158
+ fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
159
+
160
+ # Display query image
161
+ query_display = cv2.imread(query_path)
162
+ query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
163
+ axes[0].imshow(query_display)
164
+ axes[0].set_title("Query Image")
165
+ axes[0].axis("off")
166
+
167
+ # Display matches
168
+ for i in range(num_matches):
169
+ match_path = df.iloc[i]["identity"]
170
+ distance = df.iloc[i]["distance"]
171
+ confidence = round((1 - distance) * 100, 2)
172
+
173
+ match_img = cv2.imread(match_path)
174
+ match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
175
+
176
+ axes[i+1].imshow(match_img)
177
+ axes[i+1].set_title(f"Match #{i+1}\nConfidence: {confidence}%")
178
+ axes[i+1].axis("off")
179
+
180
+ plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
181
+ plt.tight_layout()
182
+
183
+ # Format results for display
184
+ results = df[["identity", "distance"]].copy()
185
+ results["confidence"] = (1 - results["distance"]) * 100
186
+ results["confidence"] = results["confidence"].round(2)
187
+ results = results.rename(columns={"identity": "Image Path"})
188
+
189
+ # Clean up temp files
190
+ os.remove(query_path)
191
+ # Don't remove temp DB folder if it came from user input
192
+ if not isinstance(db_folder, str):
193
+ shutil.rmtree(db_path)
194
+
195
+ return fig, results.to_dict('records')
196
+
197
+ except Exception as e:
198
+ # Clean up temp files
199
+ if os.path.exists(query_path):
200
+ os.remove(query_path)
201
+
202
+ error_msg = f"Error: {str(e)}"
203
+ if "No face detected" in str(e):
204
+ error_msg = "No face detected in the query image. Please try a different image."
205
+
206
+ return None, error_msg
207
+
208
+ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
209
+ # Create temp directory and save image
210
+ temp_dir = tempfile.mkdtemp()
211
+ img_path = os.path.join(temp_dir, "analyze.jpg")
212
+
213
+ if isinstance(img, np.ndarray):
214
+ Image.fromarray(img).save(img_path)
215
+ else:
216
+ img.save(img_path)
217
+
218
+ try:
219
+ # Analyze facial attributes
220
+ results = DeepFace.analyze(
221
+ img_path=img_path,
222
+ actions=actions,
223
+ enforce_detection=True,
224
+ detector_backend='opencv'
225
+ )
226
+
227
+ # Handle both single face and multiple faces results
228
+ if isinstance(results, list):
229
+ num_faces = len(results)
230
+ else:
231
+ num_faces = 1
232
+ results = [results]
233
+
234
+ # Create visualization
235
+ fig = plt.figure(figsize=(14, 7))
236
+
237
+ # Load the image for display
238
+ img_display = cv2.imread(img_path)
239
+ img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
240
+
241
+ # Main image display
242
+ main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
243
+ main_ax.imshow(img_display)
244
+ main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
245
+ main_ax.axis('off')
246
+
247
+ # Create a results summary for each face
248
+ for i, face_result in enumerate(results):
249
+ if i >= 4: # Limit to 4 faces for display
250
+ break
251
+
252
+ # Get main results
253
+ age = face_result.get('age', 'N/A')
254
+ gender = face_result.get('dominant_gender', 'N/A')
255
+ race = face_result.get('dominant_race', 'N/A')
256
+ emotion = face_result.get('dominant_emotion', 'N/A')
257
+
258
+ # Gender confidence
259
+ gender_conf = 'N/A'
260
+ if 'gender' in face_result and isinstance(face_result['gender'], dict):
261
+ for g, conf in face_result['gender'].items():
262
+ if g.lower() == gender.lower():
263
+ gender_conf = f"{conf:.1f}%"
264
+ break
265
+
266
+ # Race confidence
267
+ race_conf = 'N/A'
268
+ if 'race' in face_result and isinstance(face_result['race'], dict):
269
+ for r, conf in face_result['race'].items():
270
+ if r.lower() == race.lower():
271
+ race_conf = f"{conf:.1f}%"
272
+ break
273
+
274
+ # Emotion confidence
275
+ emotion_conf = 'N/A'
276
+ if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
277
+ for e, conf in face_result['emotion'].items():
278
+ if e.lower() == emotion.lower():
279
+ emotion_conf = f"{conf:.1f}%"
280
+ break
281
+
282
+ # Create subplot for this face's results
283
+ ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
284
+
285
+ # Format text for subplot
286
+ text = (
287
+ f"Face #{i+1}\n\n"
288
+ f"Age: {age}\n\n"
289
+ f"Gender: {gender} ({gender_conf})\n\n"
290
+ f"Race: {race} ({race_conf})\n\n"
291
+ f"Emotion: {emotion} ({emotion_conf})"
292
+ )
293
+
294
+ ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
295
+ ax.axis('off')
296
+
297
+ plt.tight_layout()
298
+
299
+ # Clean up temp files
300
+ os.remove(img_path)
301
+ os.rmdir(temp_dir)
302
+
303
+ # Format results for display in JSON
304
+ formatted_results = []
305
+ for i, res in enumerate(results[:8]): # Limit to 8 faces for JSON display
306
+ face_data = {
307
+ "face_number": i+1,
308
+ "age": res.get("age", "N/A"),
309
+ "gender": {
310
+ "dominant": res.get("dominant_gender", "N/A"),
311
+ "confidence": res.get("gender", {})
312
+ },
313
+ "race": {
314
+ "dominant": res.get("dominant_race", "N/A"),
315
+ "confidence": res.get("race", {})
316
+ },
317
+ "emotion": {
318
+ "dominant": res.get("dominant_emotion", "N/A"),
319
+ "confidence": res.get("emotion", {})
320
+ }
321
+ }
322
+ formatted_results.append(face_data)
323
+
324
+ return fig, formatted_results
325
+
326
+ except Exception as e:
327
+ # Clean up temp files
328
+ if os.path.exists(img_path):
329
+ os.remove(img_path)
330
+ if os.path.exists(temp_dir):
331
+ os.rmdir(temp_dir)
332
+
333
+ error_msg = f"Error: {str(e)}"
334
+ if "No face detected" in str(e):
335
+ error_msg = "No face detected in the image. Please try a different image."
336
+
337
+ return None, error_msg
338
+
339
+ # Create Gradio interface
340
+ with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
341
+ gr.Markdown("""
342
+ # πŸ” Complete Face Recognition Tool
343
+
344
+ This tool provides three face recognition features:
345
+ - **Verify Faces**: Compare two specific images to check if they contain the same person
346
+ - **Find Faces**: Search for matching faces in a database/folder
347
+ - **Analyze Face**: Determine age, gender, race, and emotion from a facial image
348
+ """)
349
+
350
+ with gr.Tabs():
351
+ with gr.TabItem("Verify Faces"):
352
+ with gr.Row():
353
+ with gr.Column():
354
+ img1_input = gr.Image(label="First Image", type="pil")
355
+ with gr.Column():
356
+ img2_input = gr.Image(label="Second Image", type="pil")
357
+
358
+ with gr.Row():
359
+ with gr.Column():
360
+ verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
361
+ label="Similarity Threshold (lower = stricter matching)")
362
+ with gr.Column():
363
+ verify_model = gr.Dropdown(
364
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
365
+ value="VGG-Face",
366
+ label="Face Recognition Model"
367
+ )
368
+
369
+ verify_button = gr.Button("Verify Faces", variant="primary")
370
+
371
+ with gr.Row():
372
+ verify_result_plot = gr.Plot(label="Verification Result")
373
+
374
+ with gr.Row():
375
+ verify_json = gr.JSON(label="Technical Details")
376
+
377
+ # Set up the verification function
378
+ verify_button.click(
379
+ verify_faces,
380
+ inputs=[img1_input, img2_input, verify_threshold, verify_model],
381
+ outputs=[verify_result_plot, verify_json]
382
+ )
383
+
384
+ gr.Markdown("""
385
+ ### πŸ“‹ How to use Face Verification:
386
+ 1. Upload two facial images
387
+ 2. Adjust the similarity threshold if needed
388
+ 3. Select a face recognition model
389
+ 4. Click "Verify Faces" button
390
+ 5. View the results below
391
+ """)
392
+
393
+ with gr.TabItem("Find Faces"):
394
+ with gr.Row():
395
+ query_img = gr.Image(label="Query Image (Face to find)", type="pil")
396
+
397
+ with gr.Row():
398
+ with gr.Column():
399
+ db_path_input = gr.Textbox(label="Database Path (folder containing images to search in)")
400
+ db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
401
+
402
+ with gr.Row():
403
+ with gr.Column():
404
+ find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
405
+ label="Similarity Threshold (lower = stricter matching)")
406
+ with gr.Column():
407
+ find_model = gr.Dropdown(
408
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
409
+ value="VGG-Face",
410
+ label="Face Recognition Model"
411
+ )
412
+
413
+ find_button = gr.Button("Find Matching Faces", variant="primary")
414
+
415
+ with gr.Row():
416
+ find_result_plot = gr.Plot(label="Search Results")
417
+
418
+ with gr.Row():
419
+ find_results_table = gr.JSON(label="Detailed Results")
420
+
421
+ # Connect function to button
422
+ find_button.click(
423
+ find_faces,
424
+ inputs=[query_img, db_path_input, find_threshold, find_model],
425
+ outputs=[find_result_plot, find_results_table]
426
+ )
427
+
428
+ # Also connect with files input
429
+ db_files_input.change(
430
+ lambda x: "", # Clear the text input when files are uploaded
431
+ inputs=db_files_input,
432
+ outputs=db_path_input
433
+ )
434
+
435
+ gr.Markdown("""
436
+ ### πŸ“‹ How to use Face Finding:
437
+ 1. Upload a query image containing the face you want to find
438
+ 2. Either:
439
+ - Enter the path to a folder containing images to search through, or
440
+ - Upload multiple images to create a temporary database
441
+ 3. Adjust the similarity threshold if needed
442
+ 4. Select a face recognition model
443
+ 5. Click "Find Matching Faces" button
444
+ 6. View the results showing the most similar faces
445
+ """)
446
+
447
+ with gr.TabItem("Analyze Face"):
448
+ with gr.Row():
449
+ analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
450
+
451
+ with gr.Row():
452
+ actions_checkboxes = gr.CheckboxGroup(
453
+ choices=["age", "gender", "race", "emotion"],
454
+ value=["age", "gender", "race", "emotion"],
455
+ label="Select Attributes to Analyze"
456
+ )
457
+
458
+ analyze_button = gr.Button("Analyze Face", variant="primary")
459
+
460
+ with gr.Row():
461
+ analyze_result_plot = gr.Plot(label="Analysis Results")
462
+
463
+ with gr.Row():
464
+ analyze_json = gr.JSON(label="Detailed Analysis")
465
+
466
+ # Connect function to button
467
+ analyze_button.click(
468
+ analyze_face,
469
+ inputs=[analyze_img, actions_checkboxes],
470
+ outputs=[analyze_result_plot, analyze_json]
471
+ )
472
+
473
+ gr.Markdown("""
474
+ ### πŸ“‹ How to use Facial Analysis:
475
+ 1. Upload an image containing one or more faces
476
+ 2. Select which attributes you want to analyze
477
+ 3. Click "Analyze Face" button
478
+ 4. View the visual results and detailed JSON data
479
+
480
+ ### πŸ“Š Understanding the results:
481
+ - The tool can detect multiple faces in a single image
482
+ - For each face, it provides:
483
+ - Estimated age
484
+ - Predicted gender with confidence
485
+ - Predicted race/ethnicity with confidence
486
+ - Detected emotional expression with confidence
487
+ - The JSON output provides detailed confidence scores for all categories
488
+ """)
489
+
490
+ # Launch the app
491
+ demo.launch()