Spaces:
Running
on
Zero
Running
on
Zero
img pron
Browse files
app.py
CHANGED
@@ -148,6 +148,23 @@ pipeline_flux.load_lora_weights(
|
|
148 |
)
|
149 |
pipeline_flux.to("cuda", init_weight_dtype(args.mixed_precision))
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
def extract_frames(video_path):
|
153 |
"""
|
@@ -203,9 +220,9 @@ def process_video_frames(
|
|
203 |
frames = extract_frames(video)
|
204 |
|
205 |
processed_frames = []
|
206 |
-
|
207 |
for person_image in frames:
|
208 |
-
|
209 |
person_image,
|
210 |
cloth_image,
|
211 |
cloth_type,
|
@@ -214,12 +231,60 @@ def process_video_frames(
|
|
214 |
seed,
|
215 |
show_type
|
216 |
)
|
|
|
217 |
yield result_image
|
218 |
processed_frames.append(result_image)
|
219 |
|
220 |
yield processed_frames
|
221 |
|
222 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
@spaces.GPU(duration=120)
|
224 |
def submit_function_flux(
|
225 |
person_image,
|
|
|
148 |
)
|
149 |
pipeline_flux.to("cuda", init_weight_dtype(args.mixed_precision))
|
150 |
|
151 |
+
def print_image_info(img):
|
152 |
+
# Basic attributes
|
153 |
+
info = {
|
154 |
+
"Filename": img.filename,
|
155 |
+
"Format": img.format,
|
156 |
+
"Mode": img.mode,
|
157 |
+
"Size": img.size,
|
158 |
+
"Width": img.width,
|
159 |
+
"Height": img.height,
|
160 |
+
"DPI": img.info.get('dpi', "N/A"),
|
161 |
+
"Is Animated": getattr(img, "is_animated", False),
|
162 |
+
"Frames": getattr(img, "n_frames", 1)
|
163 |
+
}
|
164 |
+
|
165 |
+
print("----- Image Information -----")
|
166 |
+
for key, value in info.items():
|
167 |
+
print(f"{key}: {value}")
|
168 |
|
169 |
def extract_frames(video_path):
|
170 |
"""
|
|
|
220 |
frames = extract_frames(video)
|
221 |
|
222 |
processed_frames = []
|
223 |
+
print(f"processed_frames {len(processed_frames)}")
|
224 |
for person_image in frames:
|
225 |
+
result_image = proc_function_vidfl(
|
226 |
person_image,
|
227 |
cloth_image,
|
228 |
cloth_type,
|
|
|
231 |
seed,
|
232 |
show_type
|
233 |
)
|
234 |
+
print_image_info(result_image)
|
235 |
yield result_image
|
236 |
processed_frames.append(result_image)
|
237 |
|
238 |
yield processed_frames
|
239 |
|
240 |
|
241 |
+
@spaces.GPU(duration=120)
|
242 |
+
def proc_function_vidfl(
|
243 |
+
person_image,
|
244 |
+
cloth_image,
|
245 |
+
cloth_type,
|
246 |
+
num_inference_steps,
|
247 |
+
guidance_scale,
|
248 |
+
seed,
|
249 |
+
show_type
|
250 |
+
):
|
251 |
+
|
252 |
+
|
253 |
+
# Set random seed
|
254 |
+
generator = None
|
255 |
+
if seed != -1:
|
256 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
257 |
+
|
258 |
+
# Process input images
|
259 |
+
person_image = Image.open(person_image).convert("RGB")
|
260 |
+
cloth_image = Image.open(cloth_image).convert("RGB")
|
261 |
+
|
262 |
+
# Adjust image sizes
|
263 |
+
person_image = resize_and_crop(person_image, (args.width, args.height))
|
264 |
+
cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
|
265 |
+
|
266 |
+
# Process mask
|
267 |
+
|
268 |
+
mask = automasker(
|
269 |
+
person_image,
|
270 |
+
cloth_type
|
271 |
+
)['mask']
|
272 |
+
mask = mask_processor.blur(mask, blur_factor=9)
|
273 |
+
|
274 |
+
# Inference
|
275 |
+
result_image = pipeline_flux(
|
276 |
+
image=person_image,
|
277 |
+
condition_image=cloth_image,
|
278 |
+
mask_image=mask,
|
279 |
+
width=args.width,
|
280 |
+
height=args.height,
|
281 |
+
num_inference_steps=num_inference_steps,
|
282 |
+
guidance_scale=guidance_scale,
|
283 |
+
generator=generator
|
284 |
+
).images[0]
|
285 |
+
|
286 |
+
return result_image
|
287 |
+
|
288 |
@spaces.GPU(duration=120)
|
289 |
def submit_function_flux(
|
290 |
person_image,
|