Spaces:
Running
on
Zero
Running
on
Zero
added gl ref
Browse files
app.py
CHANGED
@@ -1,6 +1,13 @@
|
|
1 |
import argparse
|
2 |
import os
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
os.environ['CUDA_HOME'] = '/usr/local/cuda'
|
5 |
os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
|
6 |
from datetime import datetime
|
@@ -117,7 +124,8 @@ def image_grid(imgs, rows, cols):
|
|
117 |
|
118 |
|
119 |
args = parse_args()
|
120 |
-
|
|
|
121 |
# Mask-based CatVTON
|
122 |
catvton_repo = "zhengchong/CatVTON"
|
123 |
repo_path = snapshot_download(repo_id=catvton_repo)
|
@@ -148,6 +156,13 @@ pipeline_flux.load_lora_weights(
|
|
148 |
)
|
149 |
pipeline_flux.to("cuda", init_weight_dtype(args.mixed_precision))
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
def print_image_info(img):
|
152 |
# Basic attributes
|
153 |
info = {
|
@@ -216,7 +231,7 @@ def process_video_frames(
|
|
216 |
|
217 |
processed_frames = []
|
218 |
print(f"processed_frames {len(frames)}")
|
219 |
-
for person_image in frames:
|
220 |
result_image = proc_function_vidfl(
|
221 |
person_image,
|
222 |
cloth_image,
|
@@ -227,6 +242,7 @@ def process_video_frames(
|
|
227 |
show_type
|
228 |
)
|
229 |
print_image_info(result_image)
|
|
|
230 |
yield result_image
|
231 |
processed_frames.append(result_image)
|
232 |
|
@@ -360,7 +376,28 @@ def submit_function_flux(
|
|
360 |
def person_example_fn(image_path):
|
361 |
return image_path
|
362 |
|
363 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
HEADER = """
|
365 |
<h1 style="text-align: center;"> 🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models </h1>
|
366 |
|
@@ -504,12 +541,19 @@ def app_gradio():
|
|
504 |
root_path = "resource/demo/example"
|
505 |
with gr.Column():
|
506 |
gal_output = gr.Gallery(label="Processed Frames")
|
|
|
507 |
|
508 |
|
509 |
image_path_vidflux.change(
|
510 |
person_example_fn, inputs=image_path_vidflux, outputs=person_image_vidflux
|
511 |
)
|
512 |
|
|
|
|
|
|
|
|
|
|
|
|
|
513 |
submit_flux.click(
|
514 |
process_video_frames,
|
515 |
[person_image_vidflux, cloth_image_vidflux, cloth_type, num_inference_steps_vidflux, guidance_scale_vidflux,
|
|
|
1 |
import argparse
|
2 |
import os
|
3 |
+
import gc
|
4 |
+
import psutil
|
5 |
+
import threading
|
6 |
+
from pathlib import Path
|
7 |
+
import shutil
|
8 |
+
import time
|
9 |
+
import glob
|
10 |
+
from datetime import datetime
|
11 |
os.environ['CUDA_HOME'] = '/usr/local/cuda'
|
12 |
os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
|
13 |
from datetime import datetime
|
|
|
124 |
|
125 |
|
126 |
args = parse_args()
|
127 |
+
OUTPUT_DIR = "generated_images"
|
128 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
129 |
# Mask-based CatVTON
|
130 |
catvton_repo = "zhengchong/CatVTON"
|
131 |
repo_path = snapshot_download(repo_id=catvton_repo)
|
|
|
156 |
)
|
157 |
pipeline_flux.to("cuda", init_weight_dtype(args.mixed_precision))
|
158 |
|
159 |
+
def save_generated_image(image, frame_no):
|
160 |
+
"""Save generated image with timestamp and model name"""
|
161 |
+
filename = f"{frame_no}_frame.png"
|
162 |
+
filepath = os.path.join(OUTPUT_DIR, filename)
|
163 |
+
image.save(filepath)
|
164 |
+
return filepath
|
165 |
+
|
166 |
def print_image_info(img):
|
167 |
# Basic attributes
|
168 |
info = {
|
|
|
231 |
|
232 |
processed_frames = []
|
233 |
print(f"processed_frames {len(frames)}")
|
234 |
+
for index, person_image in enumerate(frames):
|
235 |
result_image = proc_function_vidfl(
|
236 |
person_image,
|
237 |
cloth_image,
|
|
|
242 |
show_type
|
243 |
)
|
244 |
print_image_info(result_image)
|
245 |
+
save_generated_image(result_image,index)
|
246 |
yield result_image
|
247 |
processed_frames.append(result_image)
|
248 |
|
|
|
376 |
def person_example_fn(image_path):
|
377 |
return image_path
|
378 |
|
379 |
+
def get_generated_images():
|
380 |
+
"""Get list of generated images with their details"""
|
381 |
+
files = glob.glob(os.path.join(OUTPUT_DIR, "*.png"))
|
382 |
+
files.sort(key=os.path.getctime, reverse=True) # Sort by creation time
|
383 |
+
return [
|
384 |
+
{
|
385 |
+
"path": f,
|
386 |
+
"name": os.path.basename(f),
|
387 |
+
"date": datetime.fromtimestamp(os.path.getctime(f)).strftime("%Y-%m-%d %H:%M:%S"),
|
388 |
+
"size": f"{os.path.getsize(f) / 1024:.1f} KB"
|
389 |
+
}
|
390 |
+
for f in files
|
391 |
+
]
|
392 |
+
|
393 |
+
def update_gallery():
|
394 |
+
"""Update the file gallery"""
|
395 |
+
files = get_generated_images()
|
396 |
+
return [
|
397 |
+
(f["path"], f"{f['name']}\n{f['date']}")
|
398 |
+
for f in files
|
399 |
+
]
|
400 |
+
|
401 |
HEADER = """
|
402 |
<h1 style="text-align: center;"> 🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models </h1>
|
403 |
|
|
|
541 |
root_path = "resource/demo/example"
|
542 |
with gr.Column():
|
543 |
gal_output = gr.Gallery(label="Processed Frames")
|
544 |
+
refresh_button = gr.Button("Refresh Gallery")
|
545 |
|
546 |
|
547 |
image_path_vidflux.change(
|
548 |
person_example_fn, inputs=image_path_vidflux, outputs=person_image_vidflux
|
549 |
)
|
550 |
|
551 |
+
refresh_button.click(
|
552 |
+
fn=update_gallery,
|
553 |
+
inputs=[],
|
554 |
+
outputs=[file_gallery],
|
555 |
+
)
|
556 |
+
|
557 |
submit_flux.click(
|
558 |
process_video_frames,
|
559 |
[person_image_vidflux, cloth_image_vidflux, cloth_type, num_inference_steps_vidflux, guidance_scale_vidflux,
|