Natwar commited on
Commit
15cef53
·
verified ·
1 Parent(s): ba0a640

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -168
app.py CHANGED
@@ -63,17 +63,19 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
63
  img1_path = os.path.join(temp_dir, "image1.jpg")
64
  img2_path = os.path.join(temp_dir, "image2.jpg")
65
 
66
- if isinstance(img1, np.ndarray):
67
- Image.fromarray(img1).save(img1_path)
68
- else:
69
- img1.save(img1_path)
70
-
71
- if isinstance(img2, np.ndarray):
72
- Image.fromarray(img2).save(img2_path)
73
- else:
74
- img2.save(img2_path)
75
-
76
  try:
 
 
 
 
 
 
 
 
 
 
 
 
77
  result = DeepFace.verify(
78
  img1_path=img1_path,
79
  img2_path=img2_path,
@@ -82,6 +84,7 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
82
  threshold=threshold
83
  )
84
 
 
85
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
86
 
87
  img1_display = cv2.imread(img1_path)
@@ -106,13 +109,15 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
106
 
107
  plt.tight_layout()
108
 
 
109
  os.remove(img1_path)
110
  os.remove(img2_path)
111
  os.rmdir(temp_dir)
112
 
113
- return fig, json.dumps(result, indent=2)
114
-
115
  except Exception as e:
 
116
  if os.path.exists(img1_path):
117
  os.remove(img1_path)
118
  if os.path.exists(img2_path):
@@ -120,40 +125,42 @@ def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
120
  if os.path.exists(temp_dir):
121
  os.rmdir(temp_dir)
122
 
123
- error_msg = f"Error: {str(e)}"
124
- if "No face detected" in str(e):
125
  error_msg = "No face detected in one or both images. Please try different images."
126
 
127
- return None, error_msg
128
 
129
  def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
130
  temp_dir = tempfile.mkdtemp()
131
  query_path = os.path.join(temp_dir, "query.jpg")
132
 
133
- if isinstance(query_img, np.ndarray):
134
- Image.fromarray(query_img).save(query_path)
135
- else:
136
- query_img.save(query_path)
137
-
138
- # Handle cloud storage paths and uploaded files
139
- if isinstance(db_folder, str):
140
- if db_folder.startswith("/content/drive"):
141
- db_path = db_folder
142
- else:
143
- db_path = os.path.abspath(db_folder)
144
- if not os.path.exists(db_path):
145
- return None, "Invalid database path - directory does not exist"
146
- else:
147
- db_path = os.path.join(temp_dir, "db")
148
- os.makedirs(db_path, exist_ok=True)
149
-
150
- for i, file in enumerate(db_folder):
151
- orig_filename = file.orig_name
152
- file_ext = os.path.splitext(orig_filename)[1]
153
- new_filename = f"image_{i}{file_ext}"
154
- shutil.copy(file.name, os.path.join(db_path, new_filename))
155
-
156
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  dfs = DeepFace.find(
158
  img_path=query_path,
159
  db_path=db_path,
@@ -163,18 +170,20 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
163
  silent=True
164
  )
165
 
 
166
  if isinstance(dfs, list):
167
  if len(dfs) == 0:
168
- return None, "No matching faces found in the database."
169
  df = dfs[0]
170
  else:
171
  df = dfs
172
 
173
  if df.empty:
174
- return None, "No matching faces found in the database."
175
 
176
  df = df.sort_values(by=["distance"])
177
 
 
178
  num_matches = min(4, len(df))
179
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
180
 
@@ -193,9 +202,6 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
193
  if not os.path.exists(match_path):
194
  continue
195
 
196
- distance = df.iloc[i]["distance"]
197
- confidence = round((1 - distance) * 100, 2)
198
-
199
  try:
200
  match_img = cv2.imread(match_path)
201
  if match_img is None:
@@ -203,51 +209,55 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
203
 
204
  match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
205
  axes[valid_matches+1].imshow(match_img)
206
- axes[valid_matches+1].set_title(f"Match #{valid_matches+1}\nConfidence: {confidence}%")
207
  axes[valid_matches+1].axis("off")
208
  valid_matches += 1
209
- except:
210
  continue
211
 
 
212
  for j in range(valid_matches+1, num_matches+1):
213
  axes[j].axis("off")
214
 
215
  plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
216
  plt.tight_layout()
217
 
 
218
  results = df[["identity", "distance"]].copy()
219
  results["confidence"] = (1 - results["distance"]) * 100
220
  results["confidence"] = results["confidence"].round(2)
221
- results = results.rename(columns={"identity": "Image Path"})
222
 
223
- os.remove(query_path)
224
- if not isinstance(db_folder, str):
225
- shutil.rmtree(db_path)
226
-
227
- return fig, results.to_dict('records')
228
-
229
  except Exception as e:
230
- if os.path.exists(query_path):
231
- os.remove(query_path)
232
-
233
- error_msg = f"Error: {str(e)}"
234
- if "No face detected" in str(e):
235
  error_msg = "No face detected in the query image. Please try a different image."
236
- elif "No such file or directory" in str(e):
237
  error_msg = "Invalid database path or corrupted image files"
238
 
239
- return None, error_msg
 
 
 
 
 
 
 
240
 
241
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
242
  temp_dir = tempfile.mkdtemp()
243
  img_path = os.path.join(temp_dir, "analyze.jpg")
244
 
245
- if isinstance(img, np.ndarray):
246
- Image.fromarray(img).save(img_path)
247
- else:
248
- img.save(img_path)
249
-
250
  try:
 
 
 
 
 
 
 
251
  results = DeepFace.analyze(
252
  img_path=img_path,
253
  actions=actions,
@@ -255,12 +265,14 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
255
  detector_backend='opencv'
256
  )
257
 
 
258
  if isinstance(results, list):
259
  num_faces = len(results)
260
  else:
261
  num_faces = 1
262
  results = [results]
263
 
 
264
  fig = plt.figure(figsize=(14, 7))
265
 
266
  img_display = cv2.imread(img_path)
@@ -271,88 +283,48 @@ def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
271
  main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
272
  main_ax.axis('off')
273
 
274
- for i, face_result in enumerate(results):
275
- if i >= 4:
276
- break
277
-
278
  age = face_result.get('age', 'N/A')
279
  gender = face_result.get('dominant_gender', 'N/A')
280
  race = face_result.get('dominant_race', 'N/A')
281
  emotion = face_result.get('dominant_emotion', 'N/A')
282
 
283
- gender_conf = 'N/A'
284
- if 'gender' in face_result and isinstance(face_result['gender'], dict):
285
- for g, conf in face_result['gender'].items():
286
- if g.lower() == gender.lower():
287
- gender_conf = f"{conf:.1f}%"
288
- break
289
-
290
- race_conf = 'N/A'
291
- if 'race' in face_result and isinstance(face_result['race'], dict):
292
- for r, conf in face_result['race'].items():
293
- if r.lower() == race.lower():
294
- race_conf = f"{conf:.1f}%"
295
- break
296
-
297
- emotion_conf = 'N/A'
298
- if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
299
- for e, conf in face_result['emotion'].items():
300
- if e.lower() == emotion.lower():
301
- emotion_conf = f"{conf:.1f}%"
302
- break
303
-
304
- # Fixed line with proper parentheses
305
  ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
306
-
307
- text = (
308
- f"Face #{i+1}\n\n"
309
- f"Age: {age}\n\n"
310
- f"Gender: {gender} ({gender_conf})\n\n"
311
- f"Race: {race} ({race_conf})\n\n"
312
- f"Emotion: {emotion} ({emotion_conf})"
313
- )
314
-
315
  ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
316
  ax.axis('off')
317
 
318
  plt.tight_layout()
319
 
320
- os.remove(img_path)
321
- os.rmdir(temp_dir)
322
-
323
  formatted_results = []
324
  for i, res in enumerate(results[:8]):
325
  face_data = {
326
  "face_number": i+1,
327
  "age": res.get("age", "N/A"),
328
- "gender": {
329
- "dominant": res.get("dominant_gender", "N/A"),
330
- "confidence": res.get("gender", {})
331
- },
332
- "race": {
333
- "dominant": res.get("dominant_race", "N/A"),
334
- "confidence": res.get("race", {})
335
- },
336
- "emotion": {
337
- "dominant": res.get("dominant_emotion", "N/A"),
338
- "confidence": res.get("emotion", {})
339
- }
340
  }
341
  formatted_results.append(face_data)
342
 
343
  return fig, formatted_results
344
 
345
  except Exception as e:
 
 
 
 
 
 
 
 
346
  if os.path.exists(img_path):
347
  os.remove(img_path)
348
  if os.path.exists(temp_dir):
349
  os.rmdir(temp_dir)
350
-
351
- error_msg = f"Error: {str(e)}"
352
- if "No face detected" in str(e):
353
- error_msg = "No face detected in the image. Please try a different image."
354
-
355
- return None, error_msg
356
 
357
  # Create Gradio interface
358
  with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
@@ -360,90 +332,99 @@ with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) a
360
  # 🔍 Complete Face Recognition Tool
361
 
362
  This tool provides three face recognition features:
363
- - **Verify Faces**: Compare two specific images to check if they contain the same person
364
- - **Find Faces**: Search for matching faces in a database/folder (supports Google Drive paths in Colab)
365
- - **Analyze Face**: Determine age, gender, race, and emotion from a facial image
366
  """)
367
 
368
  with gr.Tabs():
 
369
  with gr.TabItem("Verify Faces"):
370
  with gr.Row():
371
- img1_input = gr.Image(label="First Image", type="pil")
372
- img2_input = gr.Image(label="Second Image", type="pil")
373
 
374
  with gr.Row():
375
- verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
376
- label="Similarity Threshold (lower = stricter matching)")
377
  verify_model = gr.Dropdown(
378
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
379
- value="VGG-Face",
380
- label="Face Recognition Model"
381
  )
382
 
383
- verify_button = gr.Button("Verify Faces", variant="primary")
384
 
385
- verify_result_plot = gr.Plot(label="Verification Result")
386
- verify_json = gr.JSON(label="Technical Details")
 
387
 
 
388
  with gr.TabItem("Find Faces"):
389
- query_img = gr.Image(label="Query Image (Face to find)", type="pil")
390
- db_path_input = gr.Textbox(
391
- label="Database Path (folder path or Google Drive path in Colab)",
392
- placeholder="/content/drive/MyDrive/your_folder"
393
- )
394
- db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
 
 
395
 
396
  with gr.Row():
397
- find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
398
- label="Similarity Threshold (lower = stricter matching)")
399
  find_model = gr.Dropdown(
400
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
401
- value="VGG-Face",
402
- label="Face Recognition Model"
403
  )
404
 
405
- find_button = gr.Button("Find Matching Faces", variant="primary")
406
 
407
- find_result_plot = gr.Plot(label="Search Results")
408
- find_results_table = gr.JSON(label="Detailed Results")
 
409
 
 
410
  with gr.TabItem("Analyze Face"):
411
- analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
412
- actions_checkboxes = gr.CheckboxGroup(
413
  choices=["age", "gender", "race", "emotion"],
414
  value=["age", "gender", "race", "emotion"],
415
- label="Select Attributes to Analyze"
416
  )
417
 
418
- analyze_button = gr.Button("Analyze Face", variant="primary")
419
 
420
- analyze_result_plot = gr.Plot(label="Analysis Results")
421
- analyze_json = gr.JSON(label="Detailed Analysis")
 
422
 
423
- # Setup all button clicks
424
- verify_button.click(
425
- verify_faces,
426
- inputs=[img1_input, img2_input, verify_threshold, verify_model],
427
- outputs=[verify_result_plot, verify_json]
428
  )
429
 
430
- find_button.click(
431
  find_faces,
432
- inputs=[query_img, db_path_input, find_threshold, find_model],
433
- outputs=[find_result_plot, find_results_table]
434
  )
435
 
436
- db_files_input.change(
437
  lambda x: "",
438
- inputs=db_files_input,
439
- outputs=db_path_input
440
  )
441
 
442
- analyze_button.click(
443
  analyze_face,
444
- inputs=[analyze_img, actions_checkboxes],
445
- outputs=[analyze_result_plot, analyze_json]
446
  )
447
 
448
  # Launch the app
449
- demo.launch()
 
 
63
  img1_path = os.path.join(temp_dir, "image1.jpg")
64
  img2_path = os.path.join(temp_dir, "image2.jpg")
65
 
 
 
 
 
 
 
 
 
 
 
66
  try:
67
+ # Save images
68
+ if isinstance(img1, np.ndarray):
69
+ Image.fromarray(img1).save(img1_path)
70
+ else:
71
+ img1.save(img1_path)
72
+
73
+ if isinstance(img2, np.ndarray):
74
+ Image.fromarray(img2).save(img2_path)
75
+ else:
76
+ img2.save(img2_path)
77
+
78
+ # Perform verification
79
  result = DeepFace.verify(
80
  img1_path=img1_path,
81
  img2_path=img2_path,
 
84
  threshold=threshold
85
  )
86
 
87
+ # Create visualization
88
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
89
 
90
  img1_display = cv2.imread(img1_path)
 
109
 
110
  plt.tight_layout()
111
 
112
+ # Clean up
113
  os.remove(img1_path)
114
  os.remove(img2_path)
115
  os.rmdir(temp_dir)
116
 
117
+ return fig, result # Return raw dict instead of JSON string
118
+
119
  except Exception as e:
120
+ # Clean up even if error occurs
121
  if os.path.exists(img1_path):
122
  os.remove(img1_path)
123
  if os.path.exists(img2_path):
 
125
  if os.path.exists(temp_dir):
126
  os.rmdir(temp_dir)
127
 
128
+ error_msg = str(e)
129
+ if "No face detected" in error_msg:
130
  error_msg = "No face detected in one or both images. Please try different images."
131
 
132
+ return None, {"error": error_msg}
133
 
134
  def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
135
  temp_dir = tempfile.mkdtemp()
136
  query_path = os.path.join(temp_dir, "query.jpg")
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  try:
139
+ # Save query image
140
+ if isinstance(query_img, np.ndarray):
141
+ Image.fromarray(query_img).save(query_path)
142
+ else:
143
+ query_img.save(query_path)
144
+
145
+ # Handle database path
146
+ if isinstance(db_folder, str):
147
+ if db_folder.startswith("/content/drive"):
148
+ db_path = db_folder
149
+ else:
150
+ db_path = os.path.abspath(db_folder)
151
+ if not os.path.exists(db_path):
152
+ return None, {"error": "Invalid database path - directory does not exist"}
153
+ else:
154
+ db_path = os.path.join(temp_dir, "db")
155
+ os.makedirs(db_path, exist_ok=True)
156
+
157
+ for i, file in enumerate(db_folder):
158
+ orig_filename = file.orig_name
159
+ file_ext = os.path.splitext(orig_filename)[1]
160
+ new_filename = f"image_{i}{file_ext}"
161
+ shutil.copy(file.name, os.path.join(db_path, new_filename))
162
+
163
+ # Find matches
164
  dfs = DeepFace.find(
165
  img_path=query_path,
166
  db_path=db_path,
 
170
  silent=True
171
  )
172
 
173
+ # Process results
174
  if isinstance(dfs, list):
175
  if len(dfs) == 0:
176
+ return None, {"error": "No matching faces found in the database."}
177
  df = dfs[0]
178
  else:
179
  df = dfs
180
 
181
  if df.empty:
182
+ return None, {"error": "No matching faces found in the database."}
183
 
184
  df = df.sort_values(by=["distance"])
185
 
186
+ # Create visualization
187
  num_matches = min(4, len(df))
188
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
189
 
 
202
  if not os.path.exists(match_path):
203
  continue
204
 
 
 
 
205
  try:
206
  match_img = cv2.imread(match_path)
207
  if match_img is None:
 
209
 
210
  match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
211
  axes[valid_matches+1].imshow(match_img)
212
+ axes[valid_matches+1].set_title(f"Match #{valid_matches+1}")
213
  axes[valid_matches+1].axis("off")
214
  valid_matches += 1
215
+ except Exception as e:
216
  continue
217
 
218
+ # Hide empty axes
219
  for j in range(valid_matches+1, num_matches+1):
220
  axes[j].axis("off")
221
 
222
  plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
223
  plt.tight_layout()
224
 
225
+ # Prepare results
226
  results = df[["identity", "distance"]].copy()
227
  results["confidence"] = (1 - results["distance"]) * 100
228
  results["confidence"] = results["confidence"].round(2)
229
+ results = results.rename(columns={"identity": "Image Path"}).to_dict('records')
230
 
231
+ return fig, results
232
+
 
 
 
 
233
  except Exception as e:
234
+ error_msg = str(e)
235
+ if "No face detected" in error_msg:
 
 
 
236
  error_msg = "No face detected in the query image. Please try a different image."
237
+ elif "No such file or directory" in error_msg:
238
  error_msg = "Invalid database path or corrupted image files"
239
 
240
+ return None, {"error": error_msg}
241
+
242
+ finally:
243
+ # Clean up
244
+ if os.path.exists(query_path):
245
+ os.remove(query_path)
246
+ if 'db_path' in locals() and not isinstance(db_folder, str):
247
+ shutil.rmtree(db_path, ignore_errors=True)
248
 
249
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
250
  temp_dir = tempfile.mkdtemp()
251
  img_path = os.path.join(temp_dir, "analyze.jpg")
252
 
 
 
 
 
 
253
  try:
254
+ # Save image
255
+ if isinstance(img, np.ndarray):
256
+ Image.fromarray(img).save(img_path)
257
+ else:
258
+ img.save(img_path)
259
+
260
+ # Analyze image
261
  results = DeepFace.analyze(
262
  img_path=img_path,
263
  actions=actions,
 
265
  detector_backend='opencv'
266
  )
267
 
268
+ # Process results
269
  if isinstance(results, list):
270
  num_faces = len(results)
271
  else:
272
  num_faces = 1
273
  results = [results]
274
 
275
+ # Create visualization
276
  fig = plt.figure(figsize=(14, 7))
277
 
278
  img_display = cv2.imread(img_path)
 
283
  main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
284
  main_ax.axis('off')
285
 
286
+ for i, face_result in enumerate(results[:4]):
287
+ # Get analysis data
 
 
288
  age = face_result.get('age', 'N/A')
289
  gender = face_result.get('dominant_gender', 'N/A')
290
  race = face_result.get('dominant_race', 'N/A')
291
  emotion = face_result.get('dominant_emotion', 'N/A')
292
 
293
+ # Create subplot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
  ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
295
+ text = f"Face #{i+1}\n\nAge: {age}\nGender: {gender}\nRace: {race}\nEmotion: {emotion}"
 
 
 
 
 
 
 
 
296
  ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
297
  ax.axis('off')
298
 
299
  plt.tight_layout()
300
 
301
+ # Format results
 
 
302
  formatted_results = []
303
  for i, res in enumerate(results[:8]):
304
  face_data = {
305
  "face_number": i+1,
306
  "age": res.get("age", "N/A"),
307
+ "gender": res.get("dominant_gender", "N/A"),
308
+ "race": res.get("dominant_race", "N/A"),
309
+ "emotion": res.get("dominant_emotion", "N/A")
 
 
 
 
 
 
 
 
 
310
  }
311
  formatted_results.append(face_data)
312
 
313
  return fig, formatted_results
314
 
315
  except Exception as e:
316
+ error_msg = str(e)
317
+ if "No face detected" in error_msg:
318
+ error_msg = "No face detected in the image. Please try a different image."
319
+
320
+ return None, {"error": error_msg}
321
+
322
+ finally:
323
+ # Clean up
324
  if os.path.exists(img_path):
325
  os.remove(img_path)
326
  if os.path.exists(temp_dir):
327
  os.rmdir(temp_dir)
 
 
 
 
 
 
328
 
329
  # Create Gradio interface
330
  with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
 
332
  # 🔍 Complete Face Recognition Tool
333
 
334
  This tool provides three face recognition features:
335
+ - **Verify Faces**: Compare two images to check if they contain the same person
336
+ - **Find Faces**: Search for matching faces in a database/folder
337
+ - **Analyze Face**: Determine age, gender, race, and emotion from facial images
338
  """)
339
 
340
  with gr.Tabs():
341
+ # Verify Faces Tab
342
  with gr.TabItem("Verify Faces"):
343
  with gr.Row():
344
+ img1 = gr.Image(label="First Image", type="pil")
345
+ img2 = gr.Image(label="Second Image", type="pil")
346
 
347
  with gr.Row():
348
+ verify_threshold = gr.Slider(0.1, 0.9, value=0.6, step=0.05,
349
+ label="Similarity Threshold (lower = stricter)")
350
  verify_model = gr.Dropdown(
351
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
352
+ value="VGG-Face",
353
+ label="Recognition Model"
354
  )
355
 
356
+ verify_btn = gr.Button("Verify Faces", variant="primary")
357
 
358
+ with gr.Row():
359
+ verify_plot = gr.Plot(label="Comparison Result")
360
+ verify_results = gr.JSON(label="Verification Details")
361
 
362
+ # Find Faces Tab
363
  with gr.TabItem("Find Faces"):
364
+ query_img = gr.Image(label="Query Image", type="pil")
365
+
366
+ with gr.Row():
367
+ db_path = gr.Textbox(
368
+ label="Database Path",
369
+ placeholder="/content/drive/MyDrive/your_folder or local path"
370
+ )
371
+ db_files = gr.File(label="Or upload images", file_count="multiple")
372
 
373
  with gr.Row():
374
+ find_threshold = gr.Slider(0.1, 0.9, value=0.6, step=0.05,
375
+ label="Similarity Threshold")
376
  find_model = gr.Dropdown(
377
+ choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
378
+ value="VGG-Face",
379
+ label="Recognition Model"
380
  )
381
 
382
+ find_btn = gr.Button("Find Matches", variant="primary")
383
 
384
+ with gr.Row():
385
+ find_plot = gr.Plot(label="Matching Results")
386
+ find_results = gr.JSON(label="Match Details")
387
 
388
+ # Analyze Face Tab
389
  with gr.TabItem("Analyze Face"):
390
+ analyze_img = gr.Image(label="Input Image", type="pil")
391
+ analyze_actions = gr.CheckboxGroup(
392
  choices=["age", "gender", "race", "emotion"],
393
  value=["age", "gender", "race", "emotion"],
394
+ label="Analysis Features"
395
  )
396
 
397
+ analyze_btn = gr.Button("Analyze Face", variant="primary")
398
 
399
+ with gr.Row():
400
+ analyze_plot = gr.Plot(label="Analysis Visualization")
401
+ analyze_results = gr.JSON(label="Detailed Analysis")
402
 
403
+ # Event handlers
404
+ verify_btn.click(
405
+ verify_faces,
406
+ inputs=[img1, img2, verify_threshold, verify_model],
407
+ outputs=[verify_plot, verify_results]
408
  )
409
 
410
+ find_btn.click(
411
  find_faces,
412
+ inputs=[query_img, db_path, find_threshold, find_model],
413
+ outputs=[find_plot, find_results]
414
  )
415
 
416
+ db_files.change(
417
  lambda x: "",
418
+ inputs=db_files,
419
+ outputs=db_path
420
  )
421
 
422
+ analyze_btn.click(
423
  analyze_face,
424
+ inputs=[analyze_img, analyze_actions],
425
+ outputs=[analyze_plot, analyze_results]
426
  )
427
 
428
  # Launch the app
429
+ if __name__ == "__main__":
430
+ demo.launch()