Natwar commited on
Commit
4c6ee84
Β·
verified Β·
1 Parent(s): 32c08bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -155
app.py CHANGED
@@ -8,8 +8,12 @@ import pkg_resources
8
  def install_package(package, version=None):
9
  package_spec = f"{package}=={version}" if version else package
10
  print(f"Installing {package_spec}...")
11
- subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
12
-
 
 
 
 
13
  def ensure_package(package, version=None):
14
  try:
15
  if version:
@@ -17,26 +21,20 @@ def ensure_package(package, version=None):
17
  else:
18
  importlib.import_module(package)
19
  print(f"{package} is already installed with the correct version.")
20
- except (ImportError, pkg_resources.VersionConflict):
 
21
  install_package(package, version)
22
 
23
  # Check if running in a standard environment (not Colab/Jupyter)
24
  if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
25
  print("Setting up environment...")
26
 
27
- # First, uninstall problematic packages to start clean
28
- for pkg in ["tensorflow", "numpy", "deepface", "ml-dtypes"]:
29
- try:
30
- subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "-y", pkg])
31
- except:
32
- pass
33
-
34
  # Install packages in the correct order with compatible versions
35
- ensure_package("numpy", "1.22.4") # Lower version than before to avoid ml_dtypes issues
36
- ensure_package("protobuf", "3.20.3") # Important for TensorFlow compatibility
37
- ensure_package("tensorflow", "2.10.1") # Earlier version with fewer dependencies
38
 
39
- # Install other dependencies
40
  for pkg in ["gradio", "opencv-python-headless", "matplotlib", "pillow", "pandas"]:
41
  ensure_package(pkg)
42
 
@@ -58,12 +56,10 @@ import matplotlib.pyplot as plt
58
  from deepface import DeepFace
59
 
60
  def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
61
- # Save uploaded images to temporary files
62
  temp_dir = tempfile.mkdtemp()
63
  img1_path = os.path.join(temp_dir, "image1.jpg")
64
  img2_path = os.path.join(temp_dir, "image2.jpg")
65
 
66
- # Convert to PIL Images and save
67
  if isinstance(img1, np.ndarray):
68
  Image.fromarray(img1).save(img1_path)
69
  else:
@@ -74,7 +70,6 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
74
  else:
75
  img2.save(img2_path)
76
 
77
- # Perform face verification
78
  try:
79
  result = DeepFace.verify(
80
  img1_path=img1_path,
@@ -84,10 +79,8 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
84
  threshold=threshold
85
  )
86
 
87
- # Create comparison visualization
88
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
89
 
90
- # Display images
91
  img1_display = cv2.imread(img1_path)
92
  img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
93
  img2_display = cv2.imread(img2_path)
@@ -101,7 +94,6 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
101
  ax[1].set_title("Image 2")
102
  ax[1].axis("off")
103
 
104
- # Create result message
105
  verification_result = "βœ… FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
106
  confidence = round((1 - result["distance"]) * 100, 2)
107
 
@@ -111,7 +103,6 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
111
 
112
  plt.tight_layout()
113
 
114
- # Clean up temporary files
115
  os.remove(img1_path)
116
  os.remove(img2_path)
117
  os.rmdir(temp_dir)
@@ -119,7 +110,6 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
119
  return fig, json.dumps(result, indent=2)
120
 
121
  except Exception as e:
122
- # Clean up temporary files
123
  if os.path.exists(img1_path):
124
  os.remove(img1_path)
125
  if os.path.exists(img2_path):
@@ -134,21 +124,17 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
134
  return None, error_msg
135
 
136
  def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
137
- # Create temp directory
138
  temp_dir = tempfile.mkdtemp()
139
  query_path = os.path.join(temp_dir, "query.jpg")
140
 
141
- # Save query image
142
  if isinstance(query_img, np.ndarray):
143
  Image.fromarray(query_img).save(query_path)
144
  else:
145
  query_img.save(query_path)
146
 
147
- # If db_folder is just a string, assume it's a folder path the user entered
148
  if isinstance(db_folder, str):
149
  db_path = db_folder
150
  else:
151
- # Handling for folder upload (creates a temp directory with the images)
152
  db_path = os.path.join(temp_dir, "db")
153
  os.makedirs(db_path, exist_ok=True)
154
 
@@ -157,7 +143,6 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
157
  shutil.copy(file.name, os.path.join(db_path, f"image_{i}{file_ext}"))
158
 
159
  try:
160
- # Find matching faces
161
  dfs = DeepFace.find(
162
  img_path=query_path,
163
  db_path=db_path,
@@ -167,32 +152,26 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
167
  )
168
 
169
  if isinstance(dfs, list):
170
- # Handle case where multiple faces are found in query image
171
  if len(dfs) == 0:
172
  return None, "No matching faces found in the database."
173
- df = dfs[0] # Take first face results
174
  else:
175
  df = dfs
176
 
177
- # Check if any matches were found
178
  if df.empty:
179
  return None, "No matching faces found in the database."
180
 
181
- # Sort by similarity (lowest distance first)
182
  df = df.sort_values(by=["distance"])
183
 
184
- # Create visualization for top matches (up to 4)
185
  num_matches = min(4, len(df))
186
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
187
 
188
- # Display query image
189
  query_display = cv2.imread(query_path)
190
  query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
191
  axes[0].imshow(query_display)
192
  axes[0].set_title("Query Image")
193
  axes[0].axis("off")
194
 
195
- # Display matches
196
  for i in range(num_matches):
197
  match_path = df.iloc[i]["identity"]
198
  distance = df.iloc[i]["distance"]
@@ -208,22 +187,18 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
208
  plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
209
  plt.tight_layout()
210
 
211
- # Format results for display
212
  results = df[["identity", "distance"]].copy()
213
  results["confidence"] = (1 - results["distance"]) * 100
214
  results["confidence"] = results["confidence"].round(2)
215
  results = results.rename(columns={"identity": "Image Path"})
216
 
217
- # Clean up temp files
218
  os.remove(query_path)
219
- # Don't remove temp DB folder if it came from user input
220
  if not isinstance(db_folder, str):
221
  shutil.rmtree(db_path)
222
 
223
  return fig, results.to_dict('records')
224
 
225
  except Exception as e:
226
- # Clean up temp files
227
  if os.path.exists(query_path):
228
  os.remove(query_path)
229
 
@@ -234,7 +209,6 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
234
  return None, error_msg
235
 
236
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
237
- # Create temp directory and save image
238
  temp_dir = tempfile.mkdtemp()
239
  img_path = os.path.join(temp_dir, "analyze.jpg")
240
 
@@ -244,7 +218,6 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
244
  img.save(img_path)
245
 
246
  try:
247
- # Analyze facial attributes
248
  results = DeepFace.analyze(
249
  img_path=img_path,
250
  actions=actions,
@@ -252,38 +225,31 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
252
  detector_backend='opencv'
253
  )
254
 
255
- # Handle both single face and multiple faces results
256
  if isinstance(results, list):
257
  num_faces = len(results)
258
  else:
259
  num_faces = 1
260
  results = [results]
261
 
262
- # Create visualization
263
  fig = plt.figure(figsize=(14, 7))
264
 
265
- # Load the image for display
266
  img_display = cv2.imread(img_path)
267
  img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
268
 
269
- # Main image display
270
  main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
271
  main_ax.imshow(img_display)
272
  main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
273
  main_ax.axis('off')
274
 
275
- # Create a results summary for each face
276
  for i, face_result in enumerate(results):
277
- if i >= 4: # Limit to 4 faces for display
278
  break
279
 
280
- # Get main results
281
  age = face_result.get('age', 'N/A')
282
  gender = face_result.get('dominant_gender', 'N/A')
283
  race = face_result.get('dominant_race', 'N/A')
284
  emotion = face_result.get('dominant_emotion', 'N/A')
285
 
286
- # Gender confidence
287
  gender_conf = 'N/A'
288
  if 'gender' in face_result and isinstance(face_result['gender'], dict):
289
  for g, conf in face_result['gender'].items():
@@ -291,7 +257,6 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
291
  gender_conf = f"{conf:.1f}%"
292
  break
293
 
294
- # Race confidence
295
  race_conf = 'N/A'
296
  if 'race' in face_result and isinstance(face_result['race'], dict):
297
  for r, conf in face_result['race'].items():
@@ -299,7 +264,6 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
299
  race_conf = f"{conf:.1f}%"
300
  break
301
 
302
- # Emotion confidence
303
  emotion_conf = 'N/A'
304
  if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
305
  for e, conf in face_result['emotion'].items():
@@ -307,10 +271,8 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
307
  emotion_conf = f"{conf:.1f}%"
308
  break
309
 
310
- # Create subplot for this face's results
311
  ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
312
 
313
- # Format text for subplot
314
  text = (
315
  f"Face #{i+1}\n\n"
316
  f"Age: {age}\n\n"
@@ -324,13 +286,11 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
324
 
325
  plt.tight_layout()
326
 
327
- # Clean up temp files
328
  os.remove(img_path)
329
  os.rmdir(temp_dir)
330
 
331
- # Format results for display in JSON
332
  formatted_results = []
333
- for i, res in enumerate(results[:8]): # Limit to 8 faces for JSON display
334
  face_data = {
335
  "face_number": i+1,
336
  "age": res.get("age", "N/A"),
@@ -352,7 +312,6 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
352
  return fig, formatted_results
353
 
354
  except Exception as e:
355
- # Clean up temp files
356
  if os.path.exists(img_path):
357
  os.remove(img_path)
358
  if os.path.exists(temp_dir):
@@ -364,11 +323,9 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
364
 
365
  return None, error_msg
366
 
367
- # Create Gradio interface
368
  with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
369
  gr.Markdown("""
370
  # πŸ” Complete Face Recognition Tool
371
-
372
  This tool provides three face recognition features:
373
  - **Verify Faces**: Compare two specific images to check if they contain the same person
374
  - **Find Faces**: Search for matching faces in a database/folder
@@ -378,142 +335,77 @@ with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) a
378
  with gr.Tabs():
379
  with gr.TabItem("Verify Faces"):
380
  with gr.Row():
381
- with gr.Column():
382
- img1_input = gr.Image(label="First Image", type="pil")
383
- with gr.Column():
384
- img2_input = gr.Image(label="Second Image", type="pil")
385
 
386
  with gr.Row():
387
- with gr.Column():
388
- verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
389
- label="Similarity Threshold (lower = stricter matching)")
390
- with gr.Column():
391
- verify_model = gr.Dropdown(
392
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
393
- value="VGG-Face",
394
- label="Face Recognition Model"
395
- )
396
 
397
  verify_button = gr.Button("Verify Faces", variant="primary")
398
 
399
- with gr.Row():
400
- verify_result_plot = gr.Plot(label="Verification Result")
401
 
402
- with gr.Row():
403
- verify_json = gr.JSON(label="Technical Details")
404
-
405
- # Set up the verification function
406
  verify_button.click(
407
  verify_faces,
408
  inputs=[img1_input, img2_input, verify_threshold, verify_model],
409
  outputs=[verify_result_plot, verify_json]
410
  )
411
 
412
- gr.Markdown("""
413
- ### πŸ“‹ How to use Face Verification:
414
- 1. Upload two facial images
415
- 2. Adjust the similarity threshold if needed
416
- 3. Select a face recognition model
417
- 4. Click "Verify Faces" button
418
- 5. View the results below
419
- """)
420
-
421
  with gr.TabItem("Find Faces"):
422
- with gr.Row():
423
- query_img = gr.Image(label="Query Image (Face to find)", type="pil")
424
-
425
- with gr.Row():
426
- with gr.Column():
427
- db_path_input = gr.Textbox(label="Database Path (folder containing images to search in)")
428
- db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
429
 
430
  with gr.Row():
431
- with gr.Column():
432
- find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
433
- label="Similarity Threshold (lower = stricter matching)")
434
- with gr.Column():
435
- find_model = gr.Dropdown(
436
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
437
- value="VGG-Face",
438
- label="Face Recognition Model"
439
- )
440
 
441
  find_button = gr.Button("Find Matching Faces", variant="primary")
442
 
443
- with gr.Row():
444
- find_result_plot = gr.Plot(label="Search Results")
445
-
446
- with gr.Row():
447
- find_results_table = gr.JSON(label="Detailed Results")
448
 
449
- # Connect function to button
450
  find_button.click(
451
  find_faces,
452
  inputs=[query_img, db_path_input, find_threshold, find_model],
453
  outputs=[find_result_plot, find_results_table]
454
  )
455
 
456
- # Also connect with files input
457
  db_files_input.change(
458
- lambda x: "", # Clear the text input when files are uploaded
459
  inputs=db_files_input,
460
  outputs=db_path_input
461
  )
462
 
463
- gr.Markdown("""
464
- ### πŸ“‹ How to use Face Finding:
465
- 1. Upload a query image containing the face you want to find
466
- 2. Either:
467
- - Enter the path to a folder containing images to search through, or
468
- - Upload multiple images to create a temporary database
469
- 3. Adjust the similarity threshold if needed
470
- 4. Select a face recognition model
471
- 5. Click "Find Matching Faces" button
472
- 6. View the results showing the most similar faces
473
- """)
474
-
475
  with gr.TabItem("Analyze Face"):
476
- with gr.Row():
477
- analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
478
-
479
- with gr.Row():
480
- actions_checkboxes = gr.CheckboxGroup(
481
- choices=["age", "gender", "race", "emotion"],
482
- value=["age", "gender", "race", "emotion"],
483
- label="Select Attributes to Analyze"
484
- )
485
 
486
  analyze_button = gr.Button("Analyze Face", variant="primary")
487
 
488
- with gr.Row():
489
- analyze_result_plot = gr.Plot(label="Analysis Results")
490
-
491
- with gr.Row():
492
- analyze_json = gr.JSON(label="Detailed Analysis")
493
 
494
- # Connect function to button
495
  analyze_button.click(
496
  analyze_face,
497
  inputs=[analyze_img, actions_checkboxes],
498
  outputs=[analyze_result_plot, analyze_json]
499
  )
500
 
501
- gr.Markdown("""
502
- ### πŸ“‹ How to use Facial Analysis:
503
- 1. Upload an image containing one or more faces
504
- 2. Select which attributes you want to analyze
505
- 3. Click "Analyze Face" button
506
- 4. View the visual results and detailed JSON data
507
-
508
- ### πŸ“Š Understanding the results:
509
- - The tool can detect multiple faces in a single image
510
- - For each face, it provides:
511
- - Estimated age
512
- - Predicted gender with confidence
513
- - Predicted race/ethnicity with confidence
514
- - Detected emotional expression with confidence
515
- - The JSON output provides detailed confidence scores for all categories
516
- """)
517
-
518
- # Launch the app
519
  demo.launch()
 
8
  def install_package(package, version=None):
9
  package_spec = f"{package}=={version}" if version else package
10
  print(f"Installing {package_spec}...")
11
+ try:
12
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
13
+ except subprocess.CalledProcessError as e:
14
+ print(f"Failed to install {package_spec}: {e}")
15
+ raise
16
+
17
  def ensure_package(package, version=None):
18
  try:
19
  if version:
 
21
  else:
22
  importlib.import_module(package)
23
  print(f"{package} is already installed with the correct version.")
24
+ except (ImportError, pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:
25
+ print(f"Package requirement failed: {e}")
26
  install_package(package, version)
27
 
28
  # Check if running in a standard environment (not Colab/Jupyter)
29
  if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
30
  print("Setting up environment...")
31
 
 
 
 
 
 
 
 
32
  # Install packages in the correct order with compatible versions
33
+ ensure_package("numpy", "1.23.5") # Compatible with TensorFlow 2.10
34
+ ensure_package("protobuf", "3.20.3") # Critical for TensorFlow compatibility
35
+ ensure_package("tensorflow", "2.10.0") # Stable version with good compatibility
36
 
37
+ # Install core dependencies
38
  for pkg in ["gradio", "opencv-python-headless", "matplotlib", "pillow", "pandas"]:
39
  ensure_package(pkg)
40
 
 
56
  from deepface import DeepFace
57
 
58
  def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
 
59
  temp_dir = tempfile.mkdtemp()
60
  img1_path = os.path.join(temp_dir, "image1.jpg")
61
  img2_path = os.path.join(temp_dir, "image2.jpg")
62
 
 
63
  if isinstance(img1, np.ndarray):
64
  Image.fromarray(img1).save(img1_path)
65
  else:
 
70
  else:
71
  img2.save(img2_path)
72
 
 
73
  try:
74
  result = DeepFace.verify(
75
  img1_path=img1_path,
 
79
  threshold=threshold
80
  )
81
 
 
82
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
83
 
 
84
  img1_display = cv2.imread(img1_path)
85
  img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
86
  img2_display = cv2.imread(img2_path)
 
94
  ax[1].set_title("Image 2")
95
  ax[1].axis("off")
96
 
 
97
  verification_result = "βœ… FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
98
  confidence = round((1 - result["distance"]) * 100, 2)
99
 
 
103
 
104
  plt.tight_layout()
105
 
 
106
  os.remove(img1_path)
107
  os.remove(img2_path)
108
  os.rmdir(temp_dir)
 
110
  return fig, json.dumps(result, indent=2)
111
 
112
  except Exception as e:
 
113
  if os.path.exists(img1_path):
114
  os.remove(img1_path)
115
  if os.path.exists(img2_path):
 
124
  return None, error_msg
125
 
126
  def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
 
127
  temp_dir = tempfile.mkdtemp()
128
  query_path = os.path.join(temp_dir, "query.jpg")
129
 
 
130
  if isinstance(query_img, np.ndarray):
131
  Image.fromarray(query_img).save(query_path)
132
  else:
133
  query_img.save(query_path)
134
 
 
135
  if isinstance(db_folder, str):
136
  db_path = db_folder
137
  else:
 
138
  db_path = os.path.join(temp_dir, "db")
139
  os.makedirs(db_path, exist_ok=True)
140
 
 
143
  shutil.copy(file.name, os.path.join(db_path, f"image_{i}{file_ext}"))
144
 
145
  try:
 
146
  dfs = DeepFace.find(
147
  img_path=query_path,
148
  db_path=db_path,
 
152
  )
153
 
154
  if isinstance(dfs, list):
 
155
  if len(dfs) == 0:
156
  return None, "No matching faces found in the database."
157
+ df = dfs[0]
158
  else:
159
  df = dfs
160
 
 
161
  if df.empty:
162
  return None, "No matching faces found in the database."
163
 
 
164
  df = df.sort_values(by=["distance"])
165
 
 
166
  num_matches = min(4, len(df))
167
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
168
 
 
169
  query_display = cv2.imread(query_path)
170
  query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
171
  axes[0].imshow(query_display)
172
  axes[0].set_title("Query Image")
173
  axes[0].axis("off")
174
 
 
175
  for i in range(num_matches):
176
  match_path = df.iloc[i]["identity"]
177
  distance = df.iloc[i]["distance"]
 
187
  plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
188
  plt.tight_layout()
189
 
 
190
  results = df[["identity", "distance"]].copy()
191
  results["confidence"] = (1 - results["distance"]) * 100
192
  results["confidence"] = results["confidence"].round(2)
193
  results = results.rename(columns={"identity": "Image Path"})
194
 
 
195
  os.remove(query_path)
 
196
  if not isinstance(db_folder, str):
197
  shutil.rmtree(db_path)
198
 
199
  return fig, results.to_dict('records')
200
 
201
  except Exception as e:
 
202
  if os.path.exists(query_path):
203
  os.remove(query_path)
204
 
 
209
  return None, error_msg
210
 
211
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
 
212
  temp_dir = tempfile.mkdtemp()
213
  img_path = os.path.join(temp_dir, "analyze.jpg")
214
 
 
218
  img.save(img_path)
219
 
220
  try:
 
221
  results = DeepFace.analyze(
222
  img_path=img_path,
223
  actions=actions,
 
225
  detector_backend='opencv'
226
  )
227
 
 
228
  if isinstance(results, list):
229
  num_faces = len(results)
230
  else:
231
  num_faces = 1
232
  results = [results]
233
 
 
234
  fig = plt.figure(figsize=(14, 7))
235
 
 
236
  img_display = cv2.imread(img_path)
237
  img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
238
 
 
239
  main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
240
  main_ax.imshow(img_display)
241
  main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
242
  main_ax.axis('off')
243
 
 
244
  for i, face_result in enumerate(results):
245
+ if i >= 4:
246
  break
247
 
 
248
  age = face_result.get('age', 'N/A')
249
  gender = face_result.get('dominant_gender', 'N/A')
250
  race = face_result.get('dominant_race', 'N/A')
251
  emotion = face_result.get('dominant_emotion', 'N/A')
252
 
 
253
  gender_conf = 'N/A'
254
  if 'gender' in face_result and isinstance(face_result['gender'], dict):
255
  for g, conf in face_result['gender'].items():
 
257
  gender_conf = f"{conf:.1f}%"
258
  break
259
 
 
260
  race_conf = 'N/A'
261
  if 'race' in face_result and isinstance(face_result['race'], dict):
262
  for r, conf in face_result['race'].items():
 
264
  race_conf = f"{conf:.1f}%"
265
  break
266
 
 
267
  emotion_conf = 'N/A'
268
  if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
269
  for e, conf in face_result['emotion'].items():
 
271
  emotion_conf = f"{conf:.1f}%"
272
  break
273
 
 
274
  ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
275
 
 
276
  text = (
277
  f"Face #{i+1}\n\n"
278
  f"Age: {age}\n\n"
 
286
 
287
  plt.tight_layout()
288
 
 
289
  os.remove(img_path)
290
  os.rmdir(temp_dir)
291
 
 
292
  formatted_results = []
293
+ for i, res in enumerate(results[:8]):
294
  face_data = {
295
  "face_number": i+1,
296
  "age": res.get("age", "N/A"),
 
312
  return fig, formatted_results
313
 
314
  except Exception as e:
 
315
  if os.path.exists(img_path):
316
  os.remove(img_path)
317
  if os.path.exists(temp_dir):
 
323
 
324
  return None, error_msg
325
 
 
326
  with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
327
  gr.Markdown("""
328
  # πŸ” Complete Face Recognition Tool
 
329
  This tool provides three face recognition features:
330
  - **Verify Faces**: Compare two specific images to check if they contain the same person
331
  - **Find Faces**: Search for matching faces in a database/folder
 
335
  with gr.Tabs():
336
  with gr.TabItem("Verify Faces"):
337
  with gr.Row():
338
+ img1_input = gr.Image(label="First Image", type="pil")
339
+ img2_input = gr.Image(label="Second Image", type="pil")
 
 
340
 
341
  with gr.Row():
342
+ verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
343
+ label="Similarity Threshold (lower = stricter matching)")
344
+ verify_model = gr.Dropdown(
345
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
346
+ value="VGG-Face",
347
+ label="Face Recognition Model"
348
+ )
 
 
349
 
350
  verify_button = gr.Button("Verify Faces", variant="primary")
351
 
352
+ verify_result_plot = gr.Plot(label="Verification Result")
353
+ verify_json = gr.JSON(label="Technical Details")
354
 
 
 
 
 
355
  verify_button.click(
356
  verify_faces,
357
  inputs=[img1_input, img2_input, verify_threshold, verify_model],
358
  outputs=[verify_result_plot, verify_json]
359
  )
360
 
 
 
 
 
 
 
 
 
 
361
  with gr.TabItem("Find Faces"):
362
+ query_img = gr.Image(label="Query Image (Face to find)", type="pil")
363
+ db_path_input = gr.Textbox(label="Database Path (folder containing images to search in)")
364
+ db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
 
 
 
 
365
 
366
  with gr.Row():
367
+ find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
368
+ label="Similarity Threshold (lower = stricter matching)")
369
+ find_model = gr.Dropdown(
370
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
371
+ value="VGG-Face",
372
+ label="Face Recognition Model"
373
+ )
 
 
374
 
375
  find_button = gr.Button("Find Matching Faces", variant="primary")
376
 
377
+ find_result_plot = gr.Plot(label="Search Results")
378
+ find_results_table = gr.JSON(label="Detailed Results")
 
 
 
379
 
 
380
  find_button.click(
381
  find_faces,
382
  inputs=[query_img, db_path_input, find_threshold, find_model],
383
  outputs=[find_result_plot, find_results_table]
384
  )
385
 
 
386
  db_files_input.change(
387
+ lambda x: "",
388
  inputs=db_files_input,
389
  outputs=db_path_input
390
  )
391
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  with gr.TabItem("Analyze Face"):
393
+ analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
394
+ actions_checkboxes = gr.CheckboxGroup(
395
+ choices=["age", "gender", "race", "emotion"],
396
+ value=["age", "gender", "race", "emotion"],
397
+ label="Select Attributes to Analyze"
398
+ )
 
 
 
399
 
400
  analyze_button = gr.Button("Analyze Face", variant="primary")
401
 
402
+ analyze_result_plot = gr.Plot(label="Analysis Results")
403
+ analyze_json = gr.JSON(label="Detailed Analysis")
 
 
 
404
 
 
405
  analyze_button.click(
406
  analyze_face,
407
  inputs=[analyze_img, actions_checkboxes],
408
  outputs=[analyze_result_plot, analyze_json]
409
  )
410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  demo.launch()