Spaces:
Running
Running
Siyun He
commited on
Commit
·
4aebdd5
1
Parent(s):
acd6b6a
add more transform effects
Browse files
app.py
CHANGED
@@ -70,8 +70,8 @@ def process_frame(frame):
|
|
70 |
# Determine face shape
|
71 |
# face_shape = determine_face_shape(landmarks)
|
72 |
|
73 |
-
#
|
74 |
-
nose_x, nose_y = face_landmarks[2].astype(int)
|
75 |
left_eye_x, left_eye_y = face_landmarks[0].astype(int)
|
76 |
right_eye_x, right_eye_y = face_landmarks[1].astype(int)
|
77 |
|
@@ -134,6 +134,38 @@ def transform_cv2(frame, transform):
|
|
134 |
# perform edge detection
|
135 |
img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
|
136 |
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
else:
|
138 |
return frame
|
139 |
|
@@ -171,7 +203,7 @@ def webcam_input(frame, transform):
|
|
171 |
with gr.Blocks() as demo:
|
172 |
with gr.Column(elem_classes=["my-column"]):
|
173 |
with gr.Group(elem_classes=["my-group"]):
|
174 |
-
transform = gr.Dropdown(choices=["cartoon", "edges", "none"],
|
175 |
value="none", label="Transformation")
|
176 |
input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
|
177 |
face_shape_output = gr.Textbox(label="Detected Face Shape")
|
|
|
70 |
# Determine face shape
|
71 |
# face_shape = determine_face_shape(landmarks)
|
72 |
|
73 |
+
# Adjust the overlay size and position
|
74 |
+
# nose_x, nose_y = face_landmarks[2].astype(int)
|
75 |
left_eye_x, left_eye_y = face_landmarks[0].astype(int)
|
76 |
right_eye_x, right_eye_y = face_landmarks[1].astype(int)
|
77 |
|
|
|
134 |
# perform edge detection
|
135 |
img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
|
136 |
return img
|
137 |
+
|
138 |
+
elif transform == "sepia":
|
139 |
+
# apply sepia effect
|
140 |
+
kernel = np.array([[0.272, 0.534, 0.131],
|
141 |
+
[0.349, 0.686, 0.168],
|
142 |
+
[0.393, 0.769, 0.189]])
|
143 |
+
img = cv2.transform(frame, kernel)
|
144 |
+
img = np.clip(img, 0, 255) # ensure values are within byte range
|
145 |
+
# Convert BGR to RGB if necessary (for display purposes)
|
146 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
147 |
+
return img_rgb
|
148 |
+
|
149 |
+
elif transform == "negative":
|
150 |
+
# apply negative effect
|
151 |
+
img = cv2.bitwise_not(frame)
|
152 |
+
return img
|
153 |
+
|
154 |
+
elif transform == "sketch":
|
155 |
+
# apply sketch effect
|
156 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
157 |
+
inv_gray = cv2.bitwise_not(gray)
|
158 |
+
blur = cv2.GaussianBlur(inv_gray, (21, 21), 0)
|
159 |
+
inv_blur = cv2.bitwise_not(blur)
|
160 |
+
img = cv2.divide(gray, inv_blur, scale=256.0)
|
161 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
162 |
+
return img
|
163 |
+
|
164 |
+
elif transform == "blur":
|
165 |
+
# apply blur effect
|
166 |
+
img = cv2.GaussianBlur(frame, (15, 15), 0)
|
167 |
+
return img
|
168 |
+
|
169 |
else:
|
170 |
return frame
|
171 |
|
|
|
203 |
with gr.Blocks() as demo:
|
204 |
with gr.Column(elem_classes=["my-column"]):
|
205 |
with gr.Group(elem_classes=["my-group"]):
|
206 |
+
transform = gr.Dropdown(choices=["cartoon", "edges", "sepia", "negative", "sketch", "blur", "none"],
|
207 |
value="none", label="Transformation")
|
208 |
input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
|
209 |
face_shape_output = gr.Textbox(label="Detected Face Shape")
|