Spaces:
Sleeping
Sleeping
Created app.py
Browse files
app.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision.transforms as T
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
import streamlit as st
|
6 |
+
import mediapipe as mp
|
7 |
+
from PIL import Image
|
8 |
+
import os
|
9 |
+
torch.classes.__path__ = []
|
10 |
+
|
11 |
+
class FaceHairSegmenter:
|
12 |
+
def __init__(self):
|
13 |
+
# Use MediaPipe for face detection
|
14 |
+
self.mp_face_detection = mp.solutions.face_detection
|
15 |
+
self.face_detection = self.mp_face_detection.FaceDetection(
|
16 |
+
model_selection=1, # Use full range model
|
17 |
+
min_detection_confidence=0.6
|
18 |
+
)
|
19 |
+
|
20 |
+
# Load BiSeNet model
|
21 |
+
self.model = self.load_model()
|
22 |
+
|
23 |
+
# Define transforms - adjust according to BiSeNet requirements
|
24 |
+
self.transform = T.Compose([
|
25 |
+
T.Resize((512, 512)),
|
26 |
+
T.ToTensor(),
|
27 |
+
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
28 |
+
])
|
29 |
+
|
30 |
+
# CelebAMask-HQ classes - focus on the categories we want to keep
|
31 |
+
self.keep_classes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 17, 18] # All except 0, 14, 16
|
32 |
+
|
33 |
+
def load_model(self):
|
34 |
+
try:
|
35 |
+
# Import locally to avoid dependency issues if model isn't present
|
36 |
+
from model import BiSeNet
|
37 |
+
|
38 |
+
# Initialize BiSeNet with 19 classes (for CelebAMask-HQ)
|
39 |
+
model = BiSeNet(n_classes=19)
|
40 |
+
|
41 |
+
# Try to load the pretrained weights using a safer approach
|
42 |
+
try:
|
43 |
+
# First attempt: standard loading
|
44 |
+
model.load_state_dict(torch.load('bisenet.pth', map_location=torch.device('cpu')))
|
45 |
+
except RuntimeError as e:
|
46 |
+
if "__path__._path" in str(e):
|
47 |
+
# Alternative loading approach if we encounter the class path error
|
48 |
+
print("Using alternative model loading approach...")
|
49 |
+
checkpoint = torch.load('bisenet.pth', map_location='cpu', weights_only=True)
|
50 |
+
model.load_state_dict(checkpoint)
|
51 |
+
else:
|
52 |
+
# Other type of RuntimeError, re-raise
|
53 |
+
raise
|
54 |
+
|
55 |
+
model.eval()
|
56 |
+
|
57 |
+
if torch.cuda.is_available():
|
58 |
+
model = model.cuda()
|
59 |
+
|
60 |
+
print("BiSeNet model loaded successfully")
|
61 |
+
return model
|
62 |
+
except Exception as e:
|
63 |
+
print(f"Error loading model: {e}")
|
64 |
+
# Let's provide a more detailed error message to help with debugging
|
65 |
+
import traceback
|
66 |
+
traceback.print_exc()
|
67 |
+
return None
|
68 |
+
|
69 |
+
def detect_faces(self, image):
|
70 |
+
"""Detect faces using MediaPipe (expects image in RGB)."""
|
71 |
+
# Since image from cv2 is BGR, convert to RGB for MediaPipe
|
72 |
+
image_rgb = image if len(image.shape) == 3 and image.shape[2] == 3 else cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
73 |
+
h, w = image.shape[:2]
|
74 |
+
|
75 |
+
# Process with MediaPipe
|
76 |
+
results = self.face_detection.process(image_rgb)
|
77 |
+
|
78 |
+
bboxes = []
|
79 |
+
if results.detections:
|
80 |
+
for detection in results.detections:
|
81 |
+
bbox = detection.location_data.relative_bounding_box
|
82 |
+
x_min = max(0, int(bbox.xmin * w))
|
83 |
+
y_min = max(0, int(bbox.ymin * h))
|
84 |
+
x_max = min(w, int((bbox.xmin + bbox.width) * w))
|
85 |
+
y_max = min(h, int((bbox.ymin + bbox.height) * h))
|
86 |
+
bboxes.append((x_min, y_min, x_max, y_max))
|
87 |
+
|
88 |
+
if len(bboxes) > 1:
|
89 |
+
bboxes = self.remove_overlapping_boxes(bboxes)
|
90 |
+
|
91 |
+
return len(bboxes), bboxes
|
92 |
+
|
93 |
+
def remove_overlapping_boxes(self, boxes, overlap_threshold=0.5):
|
94 |
+
if not boxes:
|
95 |
+
return []
|
96 |
+
def box_area(box):
|
97 |
+
return (box[2] - box[0]) * (box[3] - box[1])
|
98 |
+
boxes = sorted(boxes, key=box_area, reverse=True)
|
99 |
+
keep = []
|
100 |
+
for current in boxes:
|
101 |
+
is_duplicate = False
|
102 |
+
for kept_box in keep:
|
103 |
+
x1 = max(current[0], kept_box[0])
|
104 |
+
y1 = max(current[1], kept_box[1])
|
105 |
+
x2 = min(current[2], kept_box[2])
|
106 |
+
y2 = min(current[3], kept_box[3])
|
107 |
+
if x1 < x2 and y1 < y2:
|
108 |
+
intersection = (x2 - x1) * (y2 - y1)
|
109 |
+
area1 = box_area(current)
|
110 |
+
area2 = box_area(kept_box)
|
111 |
+
union = area1 + area2 - intersection
|
112 |
+
iou = intersection / union
|
113 |
+
if iou > overlap_threshold:
|
114 |
+
is_duplicate = True
|
115 |
+
break
|
116 |
+
if not is_duplicate:
|
117 |
+
keep.append(current)
|
118 |
+
return keep
|
119 |
+
|
120 |
+
def segment_face_hair(self, image):
|
121 |
+
"""Segment face using BiSeNet trained on CelebAMask-HQ."""
|
122 |
+
if self.model is None:
|
123 |
+
return image, "Model not loaded correctly."
|
124 |
+
if image is None or image.size == 0:
|
125 |
+
return image, "Invalid image provided."
|
126 |
+
|
127 |
+
# Detect faces
|
128 |
+
num_faces, bboxes = self.detect_faces(image)
|
129 |
+
if num_faces == 0:
|
130 |
+
return image, "No face detected! Please upload an image with a clear face."
|
131 |
+
elif num_faces > 1:
|
132 |
+
debug_img = image.copy()
|
133 |
+
for (x_min, y_min, x_max, y_max) in bboxes:
|
134 |
+
cv2.rectangle(debug_img, (x_min, y_min), (x_max, y_max), (255, 0, 0), 2)
|
135 |
+
return debug_img, f"{num_faces} faces detected! Please upload an image with exactly ONE face."
|
136 |
+
|
137 |
+
# Get the face bounding box (we'll use this only for ROI, not for final segmentation)
|
138 |
+
bbox = bboxes[0]
|
139 |
+
x_min, y_min, x_max, y_max = bbox
|
140 |
+
h, w = image.shape[:2]
|
141 |
+
|
142 |
+
# Expand bounding box for better segmentation
|
143 |
+
face_height = y_max - y_min + 550
|
144 |
+
face_width = x_max - x_min + 550
|
145 |
+
|
146 |
+
y_min_exp = max(0, y_min - int(face_height * 0.5)) # Expand more for hair
|
147 |
+
x_min_exp = max(0, x_min - int(face_width * 0.3))
|
148 |
+
x_max_exp = min(w, x_max + int(face_width * 0.3))
|
149 |
+
y_max_exp = min(h, y_max + int(face_height * 0.2))
|
150 |
+
|
151 |
+
# Crop and prepare image for BiSeNet
|
152 |
+
face_region = image[y_min_exp:y_max_exp, x_min_exp:x_max_exp]
|
153 |
+
original_face_size = face_region.shape[:2]
|
154 |
+
|
155 |
+
# Ensure RGB format for PIL
|
156 |
+
if face_region.shape[2] == 3:
|
157 |
+
pil_face = Image.fromarray(face_region)
|
158 |
+
else:
|
159 |
+
pil_face = Image.fromarray(cv2.cvtColor(face_region, cv2.COLOR_BGR2RGB))
|
160 |
+
|
161 |
+
# Apply transformations and run model
|
162 |
+
input_tensor = self.transform(pil_face).unsqueeze(0)
|
163 |
+
if torch.cuda.is_available():
|
164 |
+
input_tensor = input_tensor.cuda()
|
165 |
+
|
166 |
+
with torch.no_grad():
|
167 |
+
out = self.model(input_tensor)[0]
|
168 |
+
parsing = out.squeeze(0).argmax(0).byte().cpu().numpy()
|
169 |
+
|
170 |
+
# Resize parsing map back to original size
|
171 |
+
parsing = cv2.resize(parsing, (original_face_size[1], original_face_size[0]),
|
172 |
+
interpolation=cv2.INTER_NEAREST)
|
173 |
+
|
174 |
+
# Create mask that keeps only the classes we want
|
175 |
+
mask = np.zeros_like(parsing, dtype=np.uint8)
|
176 |
+
for cls_id in self.keep_classes:
|
177 |
+
mask[parsing == cls_id] = 255
|
178 |
+
|
179 |
+
# Refine the mask
|
180 |
+
kernel = np.ones((3, 3), np.uint8)
|
181 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
|
182 |
+
|
183 |
+
# Create full image mask (initialize with zeros)
|
184 |
+
full_mask = np.zeros((h, w), dtype=np.uint8)
|
185 |
+
# Place the face mask in the right position
|
186 |
+
full_mask[y_min_exp:y_max_exp, x_min_exp:x_max_exp] = mask
|
187 |
+
|
188 |
+
# Create the RGBA output
|
189 |
+
if image.shape[2] == 3: # RGB
|
190 |
+
rgba = np.dstack((image, np.zeros((h, w), dtype=np.uint8)))
|
191 |
+
# Copy only the face region with its alpha
|
192 |
+
rgba[y_min_exp:y_max_exp, x_min_exp:x_max_exp, 3] = mask
|
193 |
+
else: # Already RGBA or other format
|
194 |
+
rgba = np.dstack((cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
|
195 |
+
np.zeros((h, w), dtype=np.uint8)))
|
196 |
+
rgba[y_min_exp:y_max_exp, x_min_exp:x_max_exp, 3] = mask
|
197 |
+
|
198 |
+
return rgba, "Face segmented successfully!"
|
199 |
+
|
200 |
+
# Streamlit app
|
201 |
+
def main():
|
202 |
+
st.set_page_config(page_title="Face Segmentation Tool", layout="wide")
|
203 |
+
|
204 |
+
st.title("Face Segmentation Tool")
|
205 |
+
st.markdown("""
|
206 |
+
Upload an image to extract the face with a transparent background.
|
207 |
+
|
208 |
+
## Guidelines:
|
209 |
+
- Upload an image with **exactly one face**
|
210 |
+
- The face should be clearly visible
|
211 |
+
- For best results, use images with good lighting
|
212 |
+
""")
|
213 |
+
|
214 |
+
col1, col2 = st.columns(2)
|
215 |
+
|
216 |
+
with col1:
|
217 |
+
st.header("Input Image")
|
218 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
219 |
+
|
220 |
+
if uploaded_file is not None:
|
221 |
+
# Convert to numpy array
|
222 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
223 |
+
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
224 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
225 |
+
|
226 |
+
st.image(image, caption="Uploaded Image", use_container_width=True)
|
227 |
+
|
228 |
+
if st.button("Segment Face"):
|
229 |
+
with st.spinner("Processing..."):
|
230 |
+
segmenter = FaceHairSegmenter()
|
231 |
+
result, message = segmenter.segment_face_hair(image)
|
232 |
+
|
233 |
+
with col2:
|
234 |
+
st.header("Segmented Result")
|
235 |
+
st.image(result, caption="Segmented Face", use_container_width=True)
|
236 |
+
st.text(message)
|
237 |
+
|
238 |
+
# Add download button for the result
|
239 |
+
if "No face detected" not in message and "faces detected" not in message:
|
240 |
+
# Convert numpy array to PIL Image
|
241 |
+
result_img = Image.fromarray(result)
|
242 |
+
|
243 |
+
# Create a BytesIO object
|
244 |
+
from io import BytesIO
|
245 |
+
buf = BytesIO()
|
246 |
+
result_img.save(buf, format="PNG")
|
247 |
+
|
248 |
+
# Add download button
|
249 |
+
st.download_button(
|
250 |
+
label="Download Segmented Face",
|
251 |
+
data=buf.getvalue(),
|
252 |
+
file_name="segmented_face.png",
|
253 |
+
mime="image/png"
|
254 |
+
)
|
255 |
+
|
256 |
+
if __name__ == "__main__":
|
257 |
+
main()
|