Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -153,6 +153,34 @@ def convert_from_cv2_to_image(img: np.ndarray) -> Image:
|
|
153 |
def convert_from_image_to_cv2(img: Image) -> np.ndarray:
|
154 |
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
def resize_img(
|
157 |
input_image,
|
158 |
max_side=1280,
|
@@ -301,10 +329,10 @@ with gr.Blocks(css=css) as demo:
|
|
301 |
)
|
302 |
|
303 |
submit = gr.Button("Submit", variant="primary")
|
304 |
-
enable_LCM = gr.Checkbox(
|
305 |
-
|
306 |
-
|
307 |
-
)
|
308 |
|
309 |
# strength
|
310 |
controlnet_conditioning_scale = gr.Slider(
|
@@ -383,12 +411,12 @@ with gr.Blocks(css=css) as demo:
|
|
383 |
outputs=[gallery, usage_tips],
|
384 |
)
|
385 |
|
386 |
-
enable_LCM.input(
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
)
|
392 |
|
393 |
gr.Examples(
|
394 |
examples=get_example(),
|
|
|
153 |
def convert_from_image_to_cv2(img: Image) -> np.ndarray:
|
154 |
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
155 |
|
156 |
+
def draw_kps(w,h, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
|
157 |
+
|
158 |
+
stickwidth = 8
|
159 |
+
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
|
160 |
+
|
161 |
+
out_img = np.zeros([h, w, 3])
|
162 |
+
|
163 |
+
for i in range(len(limbSeq)):
|
164 |
+
index = limbSeq[i]
|
165 |
+
color = color_list[index[0]]
|
166 |
+
|
167 |
+
x = kps[index][:, 0]
|
168 |
+
y = kps[index][:, 1]
|
169 |
+
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
|
170 |
+
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
|
171 |
+
polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
172 |
+
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
|
173 |
+
out_img = (out_img * 0.6).astype(np.uint8)
|
174 |
+
|
175 |
+
for idx_kp, kp in enumerate(kps):
|
176 |
+
color = color_list[idx_kp]
|
177 |
+
x, y = kp
|
178 |
+
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
|
179 |
+
|
180 |
+
# out_img = out_img.astype(np.uint8)
|
181 |
+
out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
|
182 |
+
return out_img_pil
|
183 |
+
|
184 |
def resize_img(
|
185 |
input_image,
|
186 |
max_side=1280,
|
|
|
329 |
)
|
330 |
|
331 |
submit = gr.Button("Submit", variant="primary")
|
332 |
+
# enable_LCM = gr.Checkbox(
|
333 |
+
# label="Enable Fast Inference with LCM", value=enable_lcm_arg,
|
334 |
+
# info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
|
335 |
+
# )
|
336 |
|
337 |
# strength
|
338 |
controlnet_conditioning_scale = gr.Slider(
|
|
|
411 |
outputs=[gallery, usage_tips],
|
412 |
)
|
413 |
|
414 |
+
# enable_LCM.input(
|
415 |
+
# fn=toggle_lcm_ui,
|
416 |
+
# inputs=[enable_LCM],
|
417 |
+
# outputs=[num_steps, guidance_scale],
|
418 |
+
# queue=False,
|
419 |
+
# )
|
420 |
|
421 |
gr.Examples(
|
422 |
examples=get_example(),
|