Natwar's picture
Update app.py
9a2407d verified
raw
history blame
7.06 kB
# Install required packages with version locking
from google.colab import drive
drive.mount('/content/drive')
!pip install deepface==0.0.79 tensorflow==2.10.0 opencv-python-headless==4.7.0.72
import gradio as gr
import json
import cv2
import numpy as np
from deepface import DeepFace
import matplotlib.pyplot as plt
from PIL import Image
import tempfile
import os
import pandas as pd
import shutil
def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
img1_path = os.path.join(temp_dir, "image1.jpg")
img2_path = os.path.join(temp_dir, "image2.jpg")
try:
# Save images
Image.fromarray(img1).save(img1_path) if isinstance(img1, np.ndarray) else img1.save(img1_path)
Image.fromarray(img2).save(img2_path) if isinstance(img2, np.ndarray) else img2.save(img2_path)
# Verify faces with proper API parameters
result = DeepFace.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model,
distance_metric="cosine",
threshold=threshold
)
# Create visualization
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
for idx, path in enumerate([img1_path, img2_path]):
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
ax[idx].imshow(img)
ax[idx].set_title(f"Image {idx+1}")
ax[idx].axis("off")
confidence = round((1 - result["distance"]) * 100, 2)
plt.suptitle(f"{'βœ… MATCH' if result['verified'] else '❌ NO MATCH'}\nConfidence: {confidence}%",
fontsize=14, y=1.05)
return fig, result
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
query_path = os.path.join(temp_dir, "query.jpg")
try:
# Save query image
Image.fromarray(query_img).save(query_path) if isinstance(query_img, np.ndarray) else query_img.save(query_path)
# Handle database path
if isinstance(db_folder, str):
db_path = db_folder
else:
db_path = os.path.join(temp_dir, "db")
os.makedirs(db_path, exist_ok=True)
for i, file in enumerate(db_folder):
ext = os.path.splitext(file.name)[1]
shutil.copy(file.name, os.path.join(db_path, f"img_{i}{ext}"))
# Find faces with corrected API parameters
dfs = DeepFace.find(
img_path=query_path,
db_path=db_path,
model_name=model,
distance_metric="cosine",
enforce_detection=False,
silent=True
)
# Process results
df = dfs[0] if isinstance(dfs, list) else dfs
df = df[df['distance'] <= threshold].sort_values('distance')
# Create visualization
fig, axes = plt.subplots(1, min(4, len(df)) if len(df) > 0 else plt.subplots(1, 1))
axes[0].imshow(cv2.cvtColor(cv2.imread(query_path), cv2.COLOR_BGR2RGB))
axes[0].set_title("Query Image")
for idx, (_, row) in enumerate(df.head(3).iterrows()):
if idx >= len(axes)-1: break
match_img = cv2.cvtColor(cv2.imread(row['identity']), cv2.COLOR_BGR2RGB)
axes[idx+1].imshow(match_img)
axes[idx+1].set_title(f"Match {idx+1}\n{row['distance']:.2f}")
return fig, df[['identity', 'distance']].to_dict('records')
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
temp_dir = tempfile.mkdtemp()
img_path = os.path.join(temp_dir, "analyze.jpg")
try:
# Save image
Image.fromarray(img).save(img_path) if isinstance(img, np.ndarray) else img.save(img_path)
# Analyze face
results = DeepFace.analyze(
img_path=img_path,
actions=actions,
enforce_detection=False,
detector_backend='opencv'
)
# Process results
results = results if isinstance(results, list) else [results]
fig = plt.figure(figsize=(10, 5))
# Display main image
plt.subplot(121)
plt.imshow(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))
plt.title("Input Image")
plt.axis('off')
# Display attributes
plt.subplot(122)
attrs = {k:v for res in results for k,v in res.items() if k != 'region'}
plt.barh(list(attrs.keys()), list(attrs.values()))
plt.title("Analysis Results")
return fig, results
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
# Gradio interface
with gr.Blocks(title="Face Analysis Tool", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ” Face Analysis Toolkit")
with gr.Tabs():
with gr.Tab("Verify Faces"):
gr.Markdown("## Compare two faces")
with gr.Row():
img1 = gr.Image(type="pil", label="First Face")
img2 = gr.Image(type="pil", label="Second Face")
thresh = gr.Slider(0.1, 1.0, 0.6, label="Matching Threshold")
model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace"], value="VGG-Face")
verify_btn = gr.Button("Compare Faces")
result_plot = gr.Plot()
result_json = gr.JSON()
verify_btn.click(
verify_faces,
[img1, img2, thresh, model],
[result_plot, result_json]
)
with gr.Tab("Find Faces"):
gr.Markdown("## Find similar faces in database")
query = gr.Image(type="pil", label="Query Image")
db = gr.Textbox("/content/drive/MyDrive/db", label="Database Path")
files = gr.File(file_count="multiple", label="Or upload files")
find_btn = gr.Button("Search Faces")
matches_plot = gr.Plot()
matches_json = gr.JSON()
find_btn.click(
find_faces,
[query, db, thresh, model],
[matches_plot, matches_json]
)
files.change(lambda x: None, [files], [db])
with gr.Tab("Analyze Face"):
gr.Markdown("## Analyze facial attributes")
inp_img = gr.Image(type="pil", label="Input Image")
analyze_btn = gr.Button("Analyze")
analysis_plot = gr.Plot()
analysis_json = gr.JSON()
analyze_btn.click(
analyze_face,
[inp_img],
[analysis_plot, analysis_json]
)
demo.launch()