Spaces:
Running
on
Zero
Running
on
Zero
# Standard library imports | |
import os | |
# Third-party imports | |
import cv2 | |
# Local imports | |
from utils.image_utils import preprocess_image, get_image_from_input | |
from utils.face_detector import ( | |
load_face_detector, | |
) # Assuming this is the dlib detector loader | |
# Define constants | |
HAAR_CASCADE_FILENAME = "haarcascade_frontalface_default.xml" | |
def face_detection( | |
input_type, uploaded_image, image_url, base64_string, face_detection_method | |
): | |
""" | |
Performs face detection on the image from various input types using the selected method. | |
Args: | |
input_type (str): The selected input method ("Upload File", "Enter URL", "Enter Base64"). | |
uploaded_image (PIL.Image.Image): The uploaded image (if input_type is "Upload File"). | |
image_url (str): The image URL (if input_type is "Enter URL"). | |
base64_string (str): The image base64 string (if input_type is "Enter Base64"). | |
face_detection_method (str): The selected face detection method ("OpenCV" or "dlib"). | |
Returns: | |
tuple: A tuple containing: | |
- numpy.ndarray: The image with detected faces, or None if an error occurred. | |
- list: A list of dictionaries, where each dictionary represents a bounding box | |
with keys 'x', 'y', 'w', 'h', or an empty list if no faces were detected | |
or an error occurred. | |
""" | |
# Use the centralized function to get the image | |
image = get_image_from_input(input_type, uploaded_image, image_url, base64_string) | |
if image is None: | |
print("Image is None after loading/selection.") | |
return None, [] # Return None for image and empty list for bboxes | |
processed_image = None | |
bounding_boxes = [] | |
try: | |
# Preprocess the image (convert PIL to numpy, ensure RGB) | |
# preprocess_image expects a PIL Image or something convertible by Image.fromarray | |
processed_image = preprocess_image(image) | |
if processed_image is not None: | |
gray = cv2.cvtColor(processed_image, cv2.COLOR_BGR2GRAY) | |
if face_detection_method == "OpenCV": | |
print("Using OpenCV for face detection.") | |
# Ensure the haarcascade file is accessible. | |
# This path might need adjustment depending on the environment. | |
# Construct the full path to the Haar cascade file | |
cascade_path = os.path.join( | |
cv2.data.haarcascades, HAAR_CASCADE_FILENAME | |
) | |
# Check if the cascade file exists | |
if not os.path.exists(cascade_path): | |
error_message = f"Error: Haar cascade file not found at {cascade_path}. Please ensure OpenCV is installed correctly and the file exists." | |
print(error_message) | |
return None, [] # Return None for image and empty list for bboxes | |
face_cascade = cv2.CascadeClassifier(cascade_path) | |
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
for x, y, w, h in faces: | |
cv2.rectangle( | |
processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2 | |
) | |
bounding_boxes.append( | |
{"x": int(x), "y": int(y), "w": int(w), "h": int(h)} | |
) | |
elif face_detection_method == "dlib": | |
print("Using dlib for face detection.") | |
face_detector = load_face_detector() | |
# dlib works on RGB images, but the detector can take grayscale | |
# However, the rectangles are relative to the original image size | |
# Let's use the original processed_image (RGB numpy array) for drawing | |
faces = face_detector(processed_image, 1) # 1 is the upsample level | |
for face in faces: | |
x, y, w, h = face.left(), face.top(), face.width(), face.height() | |
cv2.rectangle( | |
processed_image, (x, y), (x + w, y + h), (255, 0, 0), 2 | |
) | |
bounding_boxes.append( | |
{"x": int(x), "y": int(y), "w": int(w), "h": int(h)} | |
) | |
return processed_image, bounding_boxes | |
else: | |
return None, [] # Return None for image and empty list for bboxes | |
except Exception as e: | |
print(f"Error in face detection processing: {e}") | |
return None, [] # Return None for image and empty list for bboxes | |