Natwar's picture
Update app.py
15cef53 verified
raw
history blame
15.1 kB
# Install required packages
import os
import subprocess
import sys
import importlib
import pkg_resources
def install_package(package, version=None):
package_spec = f"{package}=={version}" if version else package
print(f"Installing {package_spec}...")
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
except subprocess.CalledProcessError as e:
print(f"Failed to install {package_spec}: {e}")
raise
def ensure_package(package, version=None):
try:
if version:
pkg_resources.require(f"{package}=={version}")
else:
importlib.import_module(package)
print(f"{package} is already installed with the correct version.")
except (ImportError, pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:
print(f"Package requirement failed: {e}")
install_package(package, version)
# Check environment and install dependencies
if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
print("Setting up environment...")
# Install core dependencies
ensure_package("numpy", "1.23.5")
ensure_package("protobuf", "3.20.3")
ensure_package("tensorflow", "2.10.0")
ensure_package("opencv-python-headless", "4.7.0.72")
ensure_package("deepface", "0.0.79")
ensure_package("gradio", "3.50.2")
# Install additional required packages
for pkg in ["matplotlib", "pillow", "pandas"]:
ensure_package(pkg)
# Now import the required modules
import gradio as gr
import json
import cv2
import numpy as np
from deepface import DeepFace
import matplotlib.pyplot as plt
from PIL import Image
import tempfile
import pandas as pd
import shutil
# Google Drive integration (for Colab users)
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/drive')
def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
img1_path = os.path.join(temp_dir, "image1.jpg")
img2_path = os.path.join(temp_dir, "image2.jpg")
try:
# Save images
if isinstance(img1, np.ndarray):
Image.fromarray(img1).save(img1_path)
else:
img1.save(img1_path)
if isinstance(img2, np.ndarray):
Image.fromarray(img2).save(img2_path)
else:
img2.save(img2_path)
# Perform verification
result = DeepFace.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model,
distance_metric="cosine",
threshold=threshold
)
# Create visualization
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
img1_display = cv2.imread(img1_path)
img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
img2_display = cv2.imread(img2_path)
img2_display = cv2.cvtColor(img2_display, cv2.COLOR_BGR2RGB)
ax[0].imshow(img1_display)
ax[0].set_title("Image 1")
ax[0].axis("off")
ax[1].imshow(img2_display)
ax[1].set_title("Image 2")
ax[1].axis("off")
verification_result = "βœ… FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
confidence = round((1 - result["distance"]) * 100, 2)
plt.suptitle(f"{verification_result}\nConfidence: {confidence}%\nDistance: {result['distance']:.4f}",
fontsize=16, fontweight='bold',
color='green' if result["verified"] else 'red')
plt.tight_layout()
# Clean up
os.remove(img1_path)
os.remove(img2_path)
os.rmdir(temp_dir)
return fig, result # Return raw dict instead of JSON string
except Exception as e:
# Clean up even if error occurs
if os.path.exists(img1_path):
os.remove(img1_path)
if os.path.exists(img2_path):
os.remove(img2_path)
if os.path.exists(temp_dir):
os.rmdir(temp_dir)
error_msg = str(e)
if "No face detected" in error_msg:
error_msg = "No face detected in one or both images. Please try different images."
return None, {"error": error_msg}
def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
query_path = os.path.join(temp_dir, "query.jpg")
try:
# Save query image
if isinstance(query_img, np.ndarray):
Image.fromarray(query_img).save(query_path)
else:
query_img.save(query_path)
# Handle database path
if isinstance(db_folder, str):
if db_folder.startswith("/content/drive"):
db_path = db_folder
else:
db_path = os.path.abspath(db_folder)
if not os.path.exists(db_path):
return None, {"error": "Invalid database path - directory does not exist"}
else:
db_path = os.path.join(temp_dir, "db")
os.makedirs(db_path, exist_ok=True)
for i, file in enumerate(db_folder):
orig_filename = file.orig_name
file_ext = os.path.splitext(orig_filename)[1]
new_filename = f"image_{i}{file_ext}"
shutil.copy(file.name, os.path.join(db_path, new_filename))
# Find matches
dfs = DeepFace.find(
img_path=query_path,
db_path=db_path,
model_name=model,
distance_metric="cosine",
threshold=threshold,
silent=True
)
# Process results
if isinstance(dfs, list):
if len(dfs) == 0:
return None, {"error": "No matching faces found in the database."}
df = dfs[0]
else:
df = dfs
if df.empty:
return None, {"error": "No matching faces found in the database."}
df = df.sort_values(by=["distance"])
# Create visualization
num_matches = min(4, len(df))
fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
query_display = cv2.imread(query_path)
query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
axes[0].imshow(query_display)
axes[0].set_title("Query Image")
axes[0].axis("off")
valid_matches = 0
for i in range(num_matches):
if i >= len(df):
break
match_path = df.iloc[i]["identity"]
if not os.path.exists(match_path):
continue
try:
match_img = cv2.imread(match_path)
if match_img is None:
continue
match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
axes[valid_matches+1].imshow(match_img)
axes[valid_matches+1].set_title(f"Match #{valid_matches+1}")
axes[valid_matches+1].axis("off")
valid_matches += 1
except Exception as e:
continue
# Hide empty axes
for j in range(valid_matches+1, num_matches+1):
axes[j].axis("off")
plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
plt.tight_layout()
# Prepare results
results = df[["identity", "distance"]].copy()
results["confidence"] = (1 - results["distance"]) * 100
results["confidence"] = results["confidence"].round(2)
results = results.rename(columns={"identity": "Image Path"}).to_dict('records')
return fig, results
except Exception as e:
error_msg = str(e)
if "No face detected" in error_msg:
error_msg = "No face detected in the query image. Please try a different image."
elif "No such file or directory" in error_msg:
error_msg = "Invalid database path or corrupted image files"
return None, {"error": error_msg}
finally:
# Clean up
if os.path.exists(query_path):
os.remove(query_path)
if 'db_path' in locals() and not isinstance(db_folder, str):
shutil.rmtree(db_path, ignore_errors=True)
def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
temp_dir = tempfile.mkdtemp()
img_path = os.path.join(temp_dir, "analyze.jpg")
try:
# Save image
if isinstance(img, np.ndarray):
Image.fromarray(img).save(img_path)
else:
img.save(img_path)
# Analyze image
results = DeepFace.analyze(
img_path=img_path,
actions=actions,
enforce_detection=True,
detector_backend='opencv'
)
# Process results
if isinstance(results, list):
num_faces = len(results)
else:
num_faces = 1
results = [results]
# Create visualization
fig = plt.figure(figsize=(14, 7))
img_display = cv2.imread(img_path)
img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
main_ax.imshow(img_display)
main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
main_ax.axis('off')
for i, face_result in enumerate(results[:4]):
# Get analysis data
age = face_result.get('age', 'N/A')
gender = face_result.get('dominant_gender', 'N/A')
race = face_result.get('dominant_race', 'N/A')
emotion = face_result.get('dominant_emotion', 'N/A')
# Create subplot
ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
text = f"Face #{i+1}\n\nAge: {age}\nGender: {gender}\nRace: {race}\nEmotion: {emotion}"
ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
ax.axis('off')
plt.tight_layout()
# Format results
formatted_results = []
for i, res in enumerate(results[:8]):
face_data = {
"face_number": i+1,
"age": res.get("age", "N/A"),
"gender": res.get("dominant_gender", "N/A"),
"race": res.get("dominant_race", "N/A"),
"emotion": res.get("dominant_emotion", "N/A")
}
formatted_results.append(face_data)
return fig, formatted_results
except Exception as e:
error_msg = str(e)
if "No face detected" in error_msg:
error_msg = "No face detected in the image. Please try a different image."
return None, {"error": error_msg}
finally:
# Clean up
if os.path.exists(img_path):
os.remove(img_path)
if os.path.exists(temp_dir):
os.rmdir(temp_dir)
# Create Gradio interface
with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# πŸ” Complete Face Recognition Tool
This tool provides three face recognition features:
- **Verify Faces**: Compare two images to check if they contain the same person
- **Find Faces**: Search for matching faces in a database/folder
- **Analyze Face**: Determine age, gender, race, and emotion from facial images
""")
with gr.Tabs():
# Verify Faces Tab
with gr.TabItem("Verify Faces"):
with gr.Row():
img1 = gr.Image(label="First Image", type="pil")
img2 = gr.Image(label="Second Image", type="pil")
with gr.Row():
verify_threshold = gr.Slider(0.1, 0.9, value=0.6, step=0.05,
label="Similarity Threshold (lower = stricter)")
verify_model = gr.Dropdown(
choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
value="VGG-Face",
label="Recognition Model"
)
verify_btn = gr.Button("Verify Faces", variant="primary")
with gr.Row():
verify_plot = gr.Plot(label="Comparison Result")
verify_results = gr.JSON(label="Verification Details")
# Find Faces Tab
with gr.TabItem("Find Faces"):
query_img = gr.Image(label="Query Image", type="pil")
with gr.Row():
db_path = gr.Textbox(
label="Database Path",
placeholder="/content/drive/MyDrive/your_folder or local path"
)
db_files = gr.File(label="Or upload images", file_count="multiple")
with gr.Row():
find_threshold = gr.Slider(0.1, 0.9, value=0.6, step=0.05,
label="Similarity Threshold")
find_model = gr.Dropdown(
choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
value="VGG-Face",
label="Recognition Model"
)
find_btn = gr.Button("Find Matches", variant="primary")
with gr.Row():
find_plot = gr.Plot(label="Matching Results")
find_results = gr.JSON(label="Match Details")
# Analyze Face Tab
with gr.TabItem("Analyze Face"):
analyze_img = gr.Image(label="Input Image", type="pil")
analyze_actions = gr.CheckboxGroup(
choices=["age", "gender", "race", "emotion"],
value=["age", "gender", "race", "emotion"],
label="Analysis Features"
)
analyze_btn = gr.Button("Analyze Face", variant="primary")
with gr.Row():
analyze_plot = gr.Plot(label="Analysis Visualization")
analyze_results = gr.JSON(label="Detailed Analysis")
# Event handlers
verify_btn.click(
verify_faces,
inputs=[img1, img2, verify_threshold, verify_model],
outputs=[verify_plot, verify_results]
)
find_btn.click(
find_faces,
inputs=[query_img, db_path, find_threshold, find_model],
outputs=[find_plot, find_results]
)
db_files.change(
lambda x: "",
inputs=db_files,
outputs=db_path
)
analyze_btn.click(
analyze_face,
inputs=[analyze_img, analyze_actions],
outputs=[analyze_plot, analyze_results]
)
# Launch the app
if __name__ == "__main__":
demo.launch()