Natwar's picture
Update app.py
c57556d verified
raw
history blame
19.1 kB
# Install required packages
import os
import subprocess
import sys
# Check if running in a standard environment (not Colab/Jupyter)
# and install packages if needed
if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
try:
# Try importing the required packages
import gradio
import cv2
import numpy as np
import matplotlib
import PIL
# Special handling for TensorFlow and DeepFace dependencies
try:
import tensorflow as tf
tf_version = tf.__version__
print(f"TensorFlow version: {tf_version}")
# If TensorFlow version is >=2.16, we need to install tf-keras
if tf_version >= "2.16.0":
print("Installing tf-keras for compatibility with newer TensorFlow...")
subprocess.check_call([sys.executable, "-m", "pip", "install", "tf-keras"])
# Now try to import deepface
import deepface
except ImportError as e:
print(f"Error importing dependencies: {str(e)}")
print("Installing deepface with specific dependencies...")
# First downgrade tensorflow to a compatible version if needed
subprocess.check_call([sys.executable, "-m", "pip", "install", "tensorflow<2.16.0"])
# Then install deepface
subprocess.check_call([sys.executable, "-m", "pip", "install", "deepface"])
except ImportError:
print("Installing required packages...")
subprocess.check_call([sys.executable, "-m", "pip", "install",
"gradio", "opencv-python-headless", "numpy", "matplotlib", "pillow"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "tensorflow<2.16.0"]) # Use older version
subprocess.check_call([sys.executable, "-m", "pip", "install", "deepface"])
# Now import the required modules
import gradio as gr
import json
import cv2
import numpy as np
from PIL import Image
import tempfile
import pandas as pd
import shutil
import matplotlib.pyplot as plt
# Import DeepFace after ensuring dependencies are properly installed
from deepface import DeepFace
def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
# Save uploaded images to temporary files
temp_dir = tempfile.mkdtemp()
img1_path = os.path.join(temp_dir, "image1.jpg")
img2_path = os.path.join(temp_dir, "image2.jpg")
# Convert to PIL Images and save
if isinstance(img1, np.ndarray):
Image.fromarray(img1).save(img1_path)
else:
img1.save(img1_path)
if isinstance(img2, np.ndarray):
Image.fromarray(img2).save(img2_path)
else:
img2.save(img2_path)
# Perform face verification
try:
result = DeepFace.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model,
distance_metric="cosine",
threshold=threshold
)
# Create comparison visualization
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
# Display images
img1_display = cv2.imread(img1_path)
img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
img2_display = cv2.imread(img2_path)
img2_display = cv2.cvtColor(img2_display, cv2.COLOR_BGR2RGB)
ax[0].imshow(img1_display)
ax[0].set_title("Image 1")
ax[0].axis("off")
ax[1].imshow(img2_display)
ax[1].set_title("Image 2")
ax[1].axis("off")
# Create result message
verification_result = "βœ… FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
confidence = round((1 - result["distance"]) * 100, 2)
plt.suptitle(f"{verification_result}\nConfidence: {confidence}%\nDistance: {result['distance']:.4f}",
fontsize=16, fontweight='bold',
color='green' if result["verified"] else 'red')
plt.tight_layout()
# Clean up temporary files
os.remove(img1_path)
os.remove(img2_path)
os.rmdir(temp_dir)
return fig, json.dumps(result, indent=2)
except Exception as e:
# Clean up temporary files
if os.path.exists(img1_path):
os.remove(img1_path)
if os.path.exists(img2_path):
os.remove(img2_path)
if os.path.exists(temp_dir):
os.rmdir(temp_dir)
error_msg = f"Error: {str(e)}"
if "No face detected" in str(e):
error_msg = "No face detected in one or both images. Please try different images."
return None, error_msg
def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
# Create temp directory
temp_dir = tempfile.mkdtemp()
query_path = os.path.join(temp_dir, "query.jpg")
# Save query image
if isinstance(query_img, np.ndarray):
Image.fromarray(query_img).save(query_path)
else:
query_img.save(query_path)
# If db_folder is just a string, assume it's a folder path the user entered
if isinstance(db_folder, str):
db_path = db_folder
else:
# Handling for folder upload (creates a temp directory with the images)
db_path = os.path.join(temp_dir, "db")
os.makedirs(db_path, exist_ok=True)
for i, file in enumerate(db_folder):
file_ext = os.path.splitext(file.name)[1]
shutil.copy(file.name, os.path.join(db_path, f"image_{i}{file_ext}"))
try:
# Find matching faces
dfs = DeepFace.find(
img_path=query_path,
db_path=db_path,
model_name=model,
distance_metric="cosine",
threshold=threshold
)
if isinstance(dfs, list):
# Handle case where multiple faces are found in query image
if len(dfs) == 0:
return None, "No matching faces found in the database."
df = dfs[0] # Take first face results
else:
df = dfs
# Check if any matches were found
if df.empty:
return None, "No matching faces found in the database."
# Sort by similarity (lowest distance first)
df = df.sort_values(by=["distance"])
# Create visualization for top matches (up to 4)
num_matches = min(4, len(df))
fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
# Display query image
query_display = cv2.imread(query_path)
query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
axes[0].imshow(query_display)
axes[0].set_title("Query Image")
axes[0].axis("off")
# Display matches
for i in range(num_matches):
match_path = df.iloc[i]["identity"]
distance = df.iloc[i]["distance"]
confidence = round((1 - distance) * 100, 2)
match_img = cv2.imread(match_path)
match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
axes[i+1].imshow(match_img)
axes[i+1].set_title(f"Match #{i+1}\nConfidence: {confidence}%")
axes[i+1].axis("off")
plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
plt.tight_layout()
# Format results for display
results = df[["identity", "distance"]].copy()
results["confidence"] = (1 - results["distance"]) * 100
results["confidence"] = results["confidence"].round(2)
results = results.rename(columns={"identity": "Image Path"})
# Clean up temp files
os.remove(query_path)
# Don't remove temp DB folder if it came from user input
if not isinstance(db_folder, str):
shutil.rmtree(db_path)
return fig, results.to_dict('records')
except Exception as e:
# Clean up temp files
if os.path.exists(query_path):
os.remove(query_path)
error_msg = f"Error: {str(e)}"
if "No face detected" in str(e):
error_msg = "No face detected in the query image. Please try a different image."
return None, error_msg
def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
# Create temp directory and save image
temp_dir = tempfile.mkdtemp()
img_path = os.path.join(temp_dir, "analyze.jpg")
if isinstance(img, np.ndarray):
Image.fromarray(img).save(img_path)
else:
img.save(img_path)
try:
# Analyze facial attributes
results = DeepFace.analyze(
img_path=img_path,
actions=actions,
enforce_detection=True,
detector_backend='opencv'
)
# Handle both single face and multiple faces results
if isinstance(results, list):
num_faces = len(results)
else:
num_faces = 1
results = [results]
# Create visualization
fig = plt.figure(figsize=(14, 7))
# Load the image for display
img_display = cv2.imread(img_path)
img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
# Main image display
main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
main_ax.imshow(img_display)
main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
main_ax.axis('off')
# Create a results summary for each face
for i, face_result in enumerate(results):
if i >= 4: # Limit to 4 faces for display
break
# Get main results
age = face_result.get('age', 'N/A')
gender = face_result.get('dominant_gender', 'N/A')
race = face_result.get('dominant_race', 'N/A')
emotion = face_result.get('dominant_emotion', 'N/A')
# Gender confidence
gender_conf = 'N/A'
if 'gender' in face_result and isinstance(face_result['gender'], dict):
for g, conf in face_result['gender'].items():
if g.lower() == gender.lower():
gender_conf = f"{conf:.1f}%"
break
# Race confidence
race_conf = 'N/A'
if 'race' in face_result and isinstance(face_result['race'], dict):
for r, conf in face_result['race'].items():
if r.lower() == race.lower():
race_conf = f"{conf:.1f}%"
break
# Emotion confidence
emotion_conf = 'N/A'
if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
for e, conf in face_result['emotion'].items():
if e.lower() == emotion.lower():
emotion_conf = f"{conf:.1f}%"
break
# Create subplot for this face's results
ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
# Format text for subplot
text = (
f"Face #{i+1}\n\n"
f"Age: {age}\n\n"
f"Gender: {gender} ({gender_conf})\n\n"
f"Race: {race} ({race_conf})\n\n"
f"Emotion: {emotion} ({emotion_conf})"
)
ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
ax.axis('off')
plt.tight_layout()
# Clean up temp files
os.remove(img_path)
os.rmdir(temp_dir)
# Format results for display in JSON
formatted_results = []
for i, res in enumerate(results[:8]): # Limit to 8 faces for JSON display
face_data = {
"face_number": i+1,
"age": res.get("age", "N/A"),
"gender": {
"dominant": res.get("dominant_gender", "N/A"),
"confidence": res.get("gender", {})
},
"race": {
"dominant": res.get("dominant_race", "N/A"),
"confidence": res.get("race", {})
},
"emotion": {
"dominant": res.get("dominant_emotion", "N/A"),
"confidence": res.get("emotion", {})
}
}
formatted_results.append(face_data)
return fig, formatted_results
except Exception as e:
# Clean up temp files
if os.path.exists(img_path):
os.remove(img_path)
if os.path.exists(temp_dir):
os.rmdir(temp_dir)
error_msg = f"Error: {str(e)}"
if "No face detected" in str(e):
error_msg = "No face detected in the image. Please try a different image."
return None, error_msg
# Create Gradio interface
with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# πŸ” Complete Face Recognition Tool
This tool provides three face recognition features:
- **Verify Faces**: Compare two specific images to check if they contain the same person
- **Find Faces**: Search for matching faces in a database/folder
- **Analyze Face**: Determine age, gender, race, and emotion from a facial image
""")
with gr.Tabs():
with gr.TabItem("Verify Faces"):
with gr.Row():
with gr.Column():
img1_input = gr.Image(label="First Image", type="pil")
with gr.Column():
img2_input = gr.Image(label="Second Image", type="pil")
with gr.Row():
with gr.Column():
verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
label="Similarity Threshold (lower = stricter matching)")
with gr.Column():
verify_model = gr.Dropdown(
choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
value="VGG-Face",
label="Face Recognition Model"
)
verify_button = gr.Button("Verify Faces", variant="primary")
with gr.Row():
verify_result_plot = gr.Plot(label="Verification Result")
with gr.Row():
verify_json = gr.JSON(label="Technical Details")
# Set up the verification function
verify_button.click(
verify_faces,
inputs=[img1_input, img2_input, verify_threshold, verify_model],
outputs=[verify_result_plot, verify_json]
)
gr.Markdown("""
### πŸ“‹ How to use Face Verification:
1. Upload two facial images
2. Adjust the similarity threshold if needed
3. Select a face recognition model
4. Click "Verify Faces" button
5. View the results below
""")
with gr.TabItem("Find Faces"):
with gr.Row():
query_img = gr.Image(label="Query Image (Face to find)", type="pil")
with gr.Row():
with gr.Column():
db_path_input = gr.Textbox(label="Database Path (folder containing images to search in)")
db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
with gr.Row():
with gr.Column():
find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
label="Similarity Threshold (lower = stricter matching)")
with gr.Column():
find_model = gr.Dropdown(
choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
value="VGG-Face",
label="Face Recognition Model"
)
find_button = gr.Button("Find Matching Faces", variant="primary")
with gr.Row():
find_result_plot = gr.Plot(label="Search Results")
with gr.Row():
find_results_table = gr.JSON(label="Detailed Results")
# Connect function to button
find_button.click(
find_faces,
inputs=[query_img, db_path_input, find_threshold, find_model],
outputs=[find_result_plot, find_results_table]
)
# Also connect with files input
db_files_input.change(
lambda x: "", # Clear the text input when files are uploaded
inputs=db_files_input,
outputs=db_path_input
)
gr.Markdown("""
### πŸ“‹ How to use Face Finding:
1. Upload a query image containing the face you want to find
2. Either:
- Enter the path to a folder containing images to search through, or
- Upload multiple images to create a temporary database
3. Adjust the similarity threshold if needed
4. Select a face recognition model
5. Click "Find Matching Faces" button
6. View the results showing the most similar faces
""")
with gr.TabItem("Analyze Face"):
with gr.Row():
analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
with gr.Row():
actions_checkboxes = gr.CheckboxGroup(
choices=["age", "gender", "race", "emotion"],
value=["age", "gender", "race", "emotion"],
label="Select Attributes to Analyze"
)
analyze_button = gr.Button("Analyze Face", variant="primary")
with gr.Row():
analyze_result_plot = gr.Plot(label="Analysis Results")
with gr.Row():
analyze_json = gr.JSON(label="Detailed Analysis")
# Connect function to button
analyze_button.click(
analyze_face,
inputs=[analyze_img, actions_checkboxes],
outputs=[analyze_result_plot, analyze_json]
)
gr.Markdown("""
### πŸ“‹ How to use Facial Analysis:
1. Upload an image containing one or more faces
2. Select which attributes you want to analyze
3. Click "Analyze Face" button
4. View the visual results and detailed JSON data
### πŸ“Š Understanding the results:
- The tool can detect multiple faces in a single image
- For each face, it provides:
- Estimated age
- Predicted gender with confidence
- Predicted race/ethnicity with confidence
- Detected emotional expression with confidence
- The JSON output provides detailed confidence scores for all categories
""")
# Launch the app
demo.launch()