Spaces:
Running
on
Zero
Running
on
Zero
added tab and func changes
Browse files
app.py
CHANGED
@@ -177,8 +177,17 @@ def extract_frames(video_path):
|
|
177 |
cap.release()
|
178 |
return frames
|
179 |
|
|
|
180 |
@spaces.GPU(duration=120)
|
181 |
-
def process_video_frames(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
"""
|
183 |
Process each frame of the video through the flux pipeline
|
184 |
|
@@ -196,34 +205,16 @@ def process_video_frames(video, cloth_image, cloth_type, num_inference_steps, gu
|
|
196 |
processed_frames = []
|
197 |
|
198 |
for person_image in frames:
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
|
210 |
-
|
211 |
-
# Generate mask (you might want to optimize this if mask is similar across frames)
|
212 |
-
mask = automasker(person_image, cloth_type)['mask']
|
213 |
-
mask = mask_processor.blur(mask, blur_factor=9)
|
214 |
-
|
215 |
-
# Run inference
|
216 |
-
result_image = pipeline_flux(
|
217 |
-
image=person_image,
|
218 |
-
condition_image=cloth_image,
|
219 |
-
mask_image=mask,
|
220 |
-
width=args.width,
|
221 |
-
height=args.height,
|
222 |
-
num_inference_steps=num_inference_steps,
|
223 |
-
guidance_scale=guidance_scale,
|
224 |
-
generator=generator
|
225 |
-
).images[0]
|
226 |
-
|
227 |
processed_frames.append(result_image)
|
228 |
|
229 |
yield processed_frames
|
@@ -399,65 +390,71 @@ def app_gradio():
|
|
399 |
with gr.Row():
|
400 |
with gr.Column(scale=1, min_width=350):
|
401 |
with gr.Row():
|
402 |
-
|
403 |
type="filepath",
|
404 |
interactive=True,
|
405 |
visible=False,
|
406 |
)
|
407 |
-
|
408 |
-
|
409 |
)
|
410 |
|
411 |
with gr.Row():
|
412 |
with gr.Column(scale=1, min_width=230):
|
413 |
-
|
414 |
interactive=True, label="Condition Image", type="filepath"
|
415 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
|
417 |
-
|
418 |
gr.Markdown(
|
419 |
'<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
|
420 |
)
|
421 |
|
422 |
-
gr.Markdown(
|
423 |
-
'<span style="color: #808080; font-size: small;">Advanced options can adjust details:<br>1. `Inference Step` may enhance details;<br>2. `CFG` is highly correlated with saturation;<br>3. `Random seed` may improve pseudo-shadow.</span>'
|
424 |
-
)
|
425 |
with gr.Accordion("Advanced Options", open=False):
|
426 |
-
|
427 |
label="Inference Step", minimum=10, maximum=100, step=5, value=50
|
428 |
)
|
429 |
# Guidence Scale
|
430 |
-
|
431 |
-
label="CFG Strenth", minimum=0.0, maximum=
|
432 |
)
|
433 |
# Random Seed
|
434 |
-
|
435 |
label="Seed", minimum=-1, maximum=10000, step=1, value=42
|
436 |
)
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
|
443 |
with gr.Column(scale=2, min_width=500):
|
444 |
-
|
445 |
-
|
446 |
-
|
|
|
|
|
|
|
|
|
447 |
|
448 |
-
|
449 |
-
person_example_fn, inputs=
|
450 |
)
|
451 |
|
452 |
-
|
453 |
process_video_frames,
|
454 |
-
[
|
455 |
-
|
456 |
-
|
457 |
-
num_inference_steps_p2p,
|
458 |
-
guidance_scale_p2p,
|
459 |
-
seed_p2p],
|
460 |
-
gall_output,
|
461 |
)
|
462 |
|
463 |
|
|
|
177 |
cap.release()
|
178 |
return frames
|
179 |
|
180 |
+
#process_video_frames
|
181 |
@spaces.GPU(duration=120)
|
182 |
+
def process_video_frames(
|
183 |
+
video,
|
184 |
+
cloth_image,
|
185 |
+
cloth_type,
|
186 |
+
num_inference_steps,
|
187 |
+
guidance_scale,
|
188 |
+
seed,
|
189 |
+
show_type
|
190 |
+
):
|
191 |
"""
|
192 |
Process each frame of the video through the flux pipeline
|
193 |
|
|
|
205 |
processed_frames = []
|
206 |
|
207 |
for person_image in frames:
|
208 |
+
new_result_image, result_image = submit_function_flux(
|
209 |
+
person_image,
|
210 |
+
cloth_image,
|
211 |
+
cloth_type,
|
212 |
+
num_inference_steps,
|
213 |
+
guidance_scale,
|
214 |
+
seed,
|
215 |
+
show_type
|
216 |
+
)
|
217 |
+
yield result_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
processed_frames.append(result_image)
|
219 |
|
220 |
yield processed_frames
|
|
|
390 |
with gr.Row():
|
391 |
with gr.Column(scale=1, min_width=350):
|
392 |
with gr.Row():
|
393 |
+
image_path_vidflux = gr.Image(
|
394 |
type="filepath",
|
395 |
interactive=True,
|
396 |
visible=False,
|
397 |
)
|
398 |
+
person_image_vidflux = gr.Video(
|
399 |
+
|
400 |
)
|
401 |
|
402 |
with gr.Row():
|
403 |
with gr.Column(scale=1, min_width=230):
|
404 |
+
cloth_image_vidflux = gr.Image(
|
405 |
interactive=True, label="Condition Image", type="filepath"
|
406 |
)
|
407 |
+
with gr.Column(scale=1, min_width=120):
|
408 |
+
gr.Markdown(
|
409 |
+
'<span style="color: #808080; font-size: small;">Two ways to provide Mask:<br>1. Upload the person image and use the `🖌️` above to draw the Mask (higher priority)<br>2. Select the `Try-On Cloth Type` to generate automatically </span>'
|
410 |
+
)
|
411 |
+
cloth_type = gr.Radio(
|
412 |
+
label="Try-On Cloth Type",
|
413 |
+
choices=["upper", "lower", "overall"],
|
414 |
+
value="upper",
|
415 |
+
)
|
416 |
|
417 |
+
submit_flux = gr.Button("Submit")
|
418 |
gr.Markdown(
|
419 |
'<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
|
420 |
)
|
421 |
|
|
|
|
|
|
|
422 |
with gr.Accordion("Advanced Options", open=False):
|
423 |
+
num_inference_steps_vidflux = gr.Slider(
|
424 |
label="Inference Step", minimum=10, maximum=100, step=5, value=50
|
425 |
)
|
426 |
# Guidence Scale
|
427 |
+
guidance_scale_vidflux = gr.Slider(
|
428 |
+
label="CFG Strenth", minimum=0.0, maximum=50, step=0.5, value=30
|
429 |
)
|
430 |
# Random Seed
|
431 |
+
seed_vidflux = gr.Slider(
|
432 |
label="Seed", minimum=-1, maximum=10000, step=1, value=42
|
433 |
)
|
434 |
+
show_type = gr.Radio(
|
435 |
+
label="Show Type",
|
436 |
+
choices=["result only", "input & result", "input & mask & result"],
|
437 |
+
value="input & mask & result",
|
438 |
+
)
|
439 |
|
440 |
with gr.Column(scale=2, min_width=500):
|
441 |
+
result_image_vidflux = gr.Image(interactive=False, label="Result")
|
442 |
+
with gr.Row():
|
443 |
+
# Photo Examples
|
444 |
+
root_path = "resource/demo/example"
|
445 |
+
with gr.Column():
|
446 |
+
gal_output = gr.Gallery(label="Processed Frames")
|
447 |
+
|
448 |
|
449 |
+
image_path_vidflux.change(
|
450 |
+
person_example_fn, inputs=image_path_vidflux, outputs=person_image_vidflux
|
451 |
)
|
452 |
|
453 |
+
submit_flux.click(
|
454 |
process_video_frames,
|
455 |
+
[person_image_vidflux, cloth_image_vidflux, cloth_type, num_inference_steps_vidflux, guidance_scale_vidflux,
|
456 |
+
seed_vidflux, show_type],
|
457 |
+
result_image_vidflux,gal_output
|
|
|
|
|
|
|
|
|
458 |
)
|
459 |
|
460 |
|