yusuf commited on
Commit
296208e
·
1 Parent(s): d08d609

aeayüz düzenleme 3

Browse files
Files changed (1) hide show
  1. app.py +184 -235
app.py CHANGED
@@ -9,11 +9,13 @@ from leffa_utils.densepose_predictor import DensePosePredictor
9
  from leffa_utils.utils import resize_and_center, list_dir, get_agnostic_mask_hd, get_agnostic_mask_dc
10
  from preprocess.humanparsing.run_parsing import Parsing
11
  from preprocess.openpose.run_openpose import OpenPose
 
12
  import gradio as gr
13
 
14
  # Download checkpoints
15
  snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
16
 
 
17
  class LeffaPredictor(object):
18
  def __init__(self):
19
  self.mask_predictor = AutoMasker(
@@ -90,6 +92,9 @@ class LeffaPredictor(object):
90
  mask = get_agnostic_mask_dc(
91
  model_parse, keypoints, vt_garment_type)
92
  mask = mask.resize((768, 1024))
 
 
 
93
  elif control_type == "pose_transfer":
94
  mask = Image.fromarray(np.ones_like(src_image_array) * 255)
95
 
@@ -139,6 +144,7 @@ class LeffaPredictor(object):
139
  seed=seed,
140
  repaint=vt_repaint,)
141
  gen_image = output["generated_image"][0]
 
142
  return np.array(gen_image), np.array(mask), np.array(densepose)
143
 
144
  def dehasoft(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint):
@@ -147,260 +153,203 @@ class LeffaPredictor(object):
147
  def leffa_predict_pt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed):
148
  return self.leffa_predict(src_image_path, ref_image_path, "pose_transfer", ref_acceleration, step, scale, seed)
149
 
 
150
  if __name__ == "__main__":
 
151
  leffa_predictor = LeffaPredictor()
152
  example_dir = "./ckpts/examples"
153
  person1_images = list_dir(f"{example_dir}/person1")
154
  person2_images = list_dir(f"{example_dir}/person2")
155
  garment_images = list_dir(f"{example_dir}/garment")
156
 
157
- # Özelleştirilmiş Tema
158
- theme = gr.themes.Soft(
159
- primary_hue="indigo",
160
- secondary_hue="purple",
161
- neutral_hue="gray",
162
- radius_size="lg",
163
- text_size="lg",
164
- spacing_size="md",
165
- ).set(
166
- body_background_fill="#f5f5f5",
167
- background_fill_primary="#ffffff",
168
- button_primary_background_fill="#4f46e5",
169
- button_primary_background_fill_hover="#6b7280",
170
- button_primary_text_color="#ffffff",
171
- shadow="0 4px 6px rgba(0, 0, 0, 0.1)",
172
- )
173
-
174
- # Başlık ve Açıklama
175
- title = "# Dehasoft AI Studio"
176
- description = """
177
- Welcome to **Dehasoft AI Studio**! Transform appearances with virtual try-on or adjust poses with pose transfer using cutting-edge AI models.
178
- Powered by VITON-HD, DressCode, and DeepFashion datasets.
179
- """
180
- footer_note = """
181
- **Note:** Models are trained on academic datasets only. Virtual try-on leverages VITON-HD/DressCode, while pose transfer uses DeepFashion.
182
- """
183
-
184
- with gr.Blocks(theme=theme, title="Dehasoft AI Studio") as demo:
185
- gr.Markdown(title, elem_classes=["title"])
186
- gr.Markdown(description, elem_classes=["description"])
187
-
188
- with gr.Tabs(elem_classes=["tabs"]):
189
- with gr.TabItem("Virtual Try-On", elem_id="vt_tab"):
190
- with gr.Row(equal_height=True):
191
- with gr.Column(scale=1):
192
- gr.Markdown("### Upload Person Image", elem_classes=["section-title"])
193
- vt_src_image = gr.Image(
194
- sources=["upload"],
195
- type="filepath",
196
- label="Person Image",
197
- interactive=True,
198
- height=400,
199
- elem_classes=["image-upload"],
200
- )
201
- gr.Examples(
202
- examples=person1_images,
203
- inputs=vt_src_image,
204
- examples_per_page=5,
205
- elem_classes=["examples"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  )
207
 
208
- with gr.Column(scale=1):
209
- gr.Markdown("### Upload Garment Image", elem_classes=["section-title"])
210
- vt_ref_image = gr.Image(
211
- sources=["upload"],
212
- type="filepath",
213
- label="Garment Image",
214
- interactive=True,
215
- height=400,
216
- elem_classes=["image-upload"],
217
- )
218
- gr.Examples(
219
- examples=garment_images,
220
- inputs=vt_ref_image,
221
- examples_per_page=5,
222
- elem_classes=["examples"],
223
  )
224
 
225
- with gr.Column(scale=1):
226
- gr.Markdown("### Result", elem_classes=["section-title"])
227
- vt_gen_image = gr.Image(
228
- label="Generated Image",
229
- height=400,
230
- elem_classes=["image-output"],
231
- )
232
- vt_gen_button = gr.Button(
233
- "Generate Image",
234
- variant="primary",
235
- size="lg",
236
- elem_classes=["generate-btn"],
237
  )
238
 
239
- with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]):
240
- vt_model_type = gr.Radio(
241
- label="Model Type",
242
- choices=[("VITON-HD (Recommended)", "viton_hd"), ("DressCode (Experimental)", "dress_code")],
243
- value="viton_hd",
244
- elem_classes=["radio"],
245
- )
246
- vt_garment_type = gr.Radio(
247
- label="Garment Type",
248
- choices=[("Upper", "upper_body"), ("Lower", "lower_body"), ("Dress", "dresses")],
249
- value="upper_body",
250
- elem_classes=["radio"],
251
- )
252
- vt_ref_acceleration = gr.Checkbox(
253
- label="Accelerate Reference UNet",
254
- value=False,
255
- elem_classes=["checkbox"],
256
- )
257
- vt_repaint = gr.Checkbox(
258
- label="Repaint Mode",
259
- value=False,
260
- elem_classes=["checkbox"],
261
- )
262
- vt_step = gr.Slider(
263
- label="Inference Steps",
264
- minimum=30,
265
- maximum=100,
266
- step=1,
267
- value=30,
268
- elem_classes=["slider"],
269
- )
270
- vt_scale = gr.Slider(
271
- label="Guidance Scale",
272
- minimum=0.1,
273
- maximum=5.0,
274
- step=0.1,
275
- value=2.5,
276
- elem_classes=["slider"],
277
- )
278
- vt_seed = gr.Number(
279
- label="Random Seed",
280
- minimum=-1,
281
- maximum=2147483647,
282
- step=1,
283
- value=42,
284
- elem_classes=["number"],
285
- )
286
-
287
- with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]):
288
- vt_mask = gr.Image(label="Generated Mask", height=200)
289
- vt_densepose = gr.Image(label="Generated DensePose", height=200)
290
-
291
- vt_gen_button.click(
292
- fn=leffa_predictor.dehasoft,
293
- inputs=[vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint],
294
- outputs=[vt_gen_image, vt_mask, vt_densepose],
295
- _js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }"
296
- )
297
-
298
- with gr.TabItem("Pose Transfer", elem_id="pt_tab"):
299
- with gr.Row(equal_height=True):
300
- with gr.Column(scale=1):
301
- gr.Markdown("### Source Person Image", elem_classes=["section-title"])
302
- pt_ref_image = gr.Image(
303
- sources=["upload"],
304
- type="filepath",
305
- label="Person Image",
306
- interactive=True,
307
- height=400,
308
- elem_classes=["image-upload"],
309
  )
310
- gr.Examples(
311
- examples=person1_images,
312
- inputs=pt_ref_image,
313
- examples_per_page=5,
314
- elem_classes=["examples"],
 
 
 
 
 
 
 
 
 
 
315
  )
316
 
317
- with gr.Column(scale=1):
318
- gr.Markdown("### Target Pose Image", elem_classes=["section-title"])
319
- pt_src_image = gr.Image(
320
- sources=["upload"],
321
- type="filepath",
322
- label="Target Pose Person Image",
323
- interactive=True,
324
- height=400,
325
- elem_classes=["image-upload"],
326
  )
327
- gr.Examples(
328
- examples=person2_images,
329
- inputs=pt_src_image,
330
- examples_per_page=5,
331
- elem_classes=["examples"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  )
333
 
334
- with gr.Column(scale=1):
335
- gr.Markdown("### Result", elem_classes=["section-title"])
336
- pt_gen_image = gr.Image(
337
- label="Generated Image",
338
- height=400,
339
- elem_classes=["image-output"],
 
 
 
 
 
 
 
 
340
  )
341
- pt_gen_button = gr.Button(
342
- "Generate Image",
343
- variant="primary",
344
- size="lg",
345
- elem_classes=["generate-btn"],
346
  )
347
 
348
- with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]):
349
- pt_ref_acceleration = gr.Checkbox(
350
- label="Accelerate Reference UNet",
351
- value=False,
352
- elem_classes=["checkbox"],
353
- )
354
- pt_step = gr.Slider(
355
- label="Inference Steps",
356
- minimum=30,
357
- maximum=100,
358
- step=1,
359
- value=30,
360
- elem_classes=["slider"],
361
- )
362
- pt_scale = gr.Slider(
363
- label="Guidance Scale",
364
- minimum=0.1,
365
- maximum=5.0,
366
- step=0.1,
367
- value=2.5,
368
- elem_classes=["slider"],
369
- )
370
- pt_seed = gr.Number(
371
- label="Random Seed",
372
- minimum=-1,
373
- maximum=2147483647,
374
- step=1,
375
- value=42,
376
- elem_classes=["number"],
377
- )
378
-
379
- with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]):
380
- pt_mask = gr.Image(label="Generated Mask", height=200)
381
- pt_densepose = gr.Image(label="Generated DensePose", height=200)
382
-
383
- pt_gen_button.click(
384
- fn=leffa_predictor.leffa_predict_pt,
385
- inputs=[pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed],
386
- outputs=[pt_gen_image, pt_mask, pt_densepose],
387
- _js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }"
388
- )
389
-
390
- gr.Markdown(footer_note, elem_classes=["footer"])
391
-
392
- demo.css = """
393
- .title { text-align: center; font-size: 2.5em; margin-bottom: 10px; color: #4f46e5; }
394
- .description { text-align: center; font-size: 1.2em; margin-bottom: 20px; color: #374151; }
395
- .section-title { font-size: 1.5em; color: #6b7280; margin-bottom: 10px; }
396
- .image-upload, .image-output { border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); }
397
- .generate-btn { transition: all 0.3s ease; }
398
- .generate-btn:hover { transform: scale(1.05); }
399
- .generate-btn.loading { opacity: 0.7; cursor: not-allowed; }
400
- .accordion { background-color: #f9fafb; border-radius: 8px; }
401
- .radio, .checkbox, .slider, .number { margin: 5px 0; }
402
- .examples { margin-top: 10px; }
403
- .footer { text-align: center; margin-top: 20px; font-size: 0.9em; color: #6b7280; }
404
- """
405
-
406
- demo.launch(share=True, server_port=7860, allowed_paths=["./ckpts/examples"])
 
9
  from leffa_utils.utils import resize_and_center, list_dir, get_agnostic_mask_hd, get_agnostic_mask_dc
10
  from preprocess.humanparsing.run_parsing import Parsing
11
  from preprocess.openpose.run_openpose import OpenPose
12
+
13
  import gradio as gr
14
 
15
  # Download checkpoints
16
  snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
17
 
18
+
19
  class LeffaPredictor(object):
20
  def __init__(self):
21
  self.mask_predictor = AutoMasker(
 
92
  mask = get_agnostic_mask_dc(
93
  model_parse, keypoints, vt_garment_type)
94
  mask = mask.resize((768, 1024))
95
+ # garment_type_hd = "upper" if vt_garment_type in [
96
+ # "upper_body", "dresses"] else "lower"
97
+ # mask = self.mask_predictor(src_image, garment_type_hd)["mask"]
98
  elif control_type == "pose_transfer":
99
  mask = Image.fromarray(np.ones_like(src_image_array) * 255)
100
 
 
144
  seed=seed,
145
  repaint=vt_repaint,)
146
  gen_image = output["generated_image"][0]
147
+ # gen_image.save("gen_image.png")
148
  return np.array(gen_image), np.array(mask), np.array(densepose)
149
 
150
  def dehasoft(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint):
 
153
  def leffa_predict_pt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed):
154
  return self.leffa_predict(src_image_path, ref_image_path, "pose_transfer", ref_acceleration, step, scale, seed)
155
 
156
+
157
  if __name__ == "__main__":
158
+
159
  leffa_predictor = LeffaPredictor()
160
  example_dir = "./ckpts/examples"
161
  person1_images = list_dir(f"{example_dir}/person1")
162
  person2_images = list_dir(f"{example_dir}/person2")
163
  garment_images = list_dir(f"{example_dir}/garment")
164
 
165
+ title = "## Dehasoft"
166
+ link = """Dehasoft"""
167
+ news = """Dehasoft"""
168
+ description = "Dehasoft"
169
+ note = "Note: The models used in the demo are trained solely on academic datasets. Virtual try-on uses VITON-HD/DressCode, and pose transfer uses DeepFashion."
170
+
171
+ with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.red)).queue() as demo:
172
+ gr.Markdown(title)
173
+ gr.Markdown(link)
174
+ gr.Markdown(news)
175
+ gr.Markdown(description)
176
+
177
+ with gr.Tab("Control Appearance (Virtual Try-on)"):
178
+ with gr.Row():
179
+ with gr.Column():
180
+ gr.Markdown("#### Person Image")
181
+ vt_src_image = gr.Image(
182
+ sources=["upload"],
183
+ type="filepath",
184
+ label="Person Image",
185
+ width=512,
186
+ height=512,
187
+ )
188
+
189
+ gr.Examples(
190
+ inputs=vt_src_image,
191
+ examples_per_page=10,
192
+ examples=person1_images,
193
+ )
194
+
195
+ with gr.Column():
196
+ gr.Markdown("#### Garment Image")
197
+ vt_ref_image = gr.Image(
198
+ sources=["upload"],
199
+ type="filepath",
200
+ label="Garment Image",
201
+ width=512,
202
+ height=512,
203
+ )
204
+
205
+ gr.Examples(
206
+ inputs=vt_ref_image,
207
+ examples_per_page=10,
208
+ examples=garment_images,
209
+ )
210
+
211
+ with gr.Column():
212
+ gr.Markdown("#### Generated Image")
213
+ vt_gen_image = gr.Image(
214
+ label="Generated Image",
215
+ width=512,
216
+ height=512,
217
+ )
218
+
219
+ with gr.Row():
220
+ vt_gen_button = gr.Button("Generate")
221
+
222
+ with gr.Accordion("Advanced Options", open=False):
223
+ vt_model_type = gr.Radio(
224
+ label="Model Type",
225
+ choices=[("VITON-HD (Recommended)", "viton_hd"),
226
+ ("DressCode (Experimental)", "dress_code")],
227
+ value="viton_hd",
228
  )
229
 
230
+ vt_garment_type = gr.Radio(
231
+ label="Garment Type",
232
+ choices=[("Upper", "upper_body"),
233
+ ("Lower", "lower_body"),
234
+ ("Dress", "dresses")],
235
+ value="upper_body",
 
 
 
 
 
 
 
 
 
236
  )
237
 
238
+ vt_ref_acceleration = gr.Radio(
239
+ label="Accelerate Reference UNet (may slightly reduce performance)",
240
+ choices=[("True", True), ("False", False)],
241
+ value=False,
 
 
 
 
 
 
 
 
242
  )
243
 
244
+ vt_repaint = gr.Radio(
245
+ label="Repaint Mode",
246
+ choices=[("True", True), ("False", False)],
247
+ value=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  )
249
+
250
+ vt_step = gr.Number(
251
+ label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
252
+
253
+ vt_scale = gr.Number(
254
+ label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
255
+
256
+ vt_seed = gr.Number(
257
+ label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
258
+
259
+ with gr.Accordion("Debug", open=False):
260
+ vt_mask = gr.Image(
261
+ label="Generated Mask",
262
+ width=256,
263
+ height=256,
264
  )
265
 
266
+ vt_densepose = gr.Image(
267
+ label="Generated DensePose",
268
+ width=256,
269
+ height=256,
 
 
 
 
 
270
  )
271
+
272
+ vt_gen_button.click(fn=leffa_predictor.dehasoft, inputs=[
273
+ vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint], outputs=[vt_gen_image, vt_mask, vt_densepose])
274
+
275
+ with gr.Tab("Control Pose (Pose Transfer)"):
276
+ with gr.Row():
277
+ with gr.Column():
278
+ gr.Markdown("#### Person Image")
279
+ pt_ref_image = gr.Image(
280
+ sources=["upload"],
281
+ type="filepath",
282
+ label="Person Image",
283
+ width=512,
284
+ height=512,
285
+ )
286
+
287
+ gr.Examples(
288
+ inputs=pt_ref_image,
289
+ examples_per_page=10,
290
+ examples=person1_images,
291
+ )
292
+
293
+ with gr.Column():
294
+ gr.Markdown("#### Target Pose Person Image")
295
+ pt_src_image = gr.Image(
296
+ sources=["upload"],
297
+ type="filepath",
298
+ label="Target Pose Person Image",
299
+ width=512,
300
+ height=512,
301
+ )
302
+
303
+ gr.Examples(
304
+ inputs=pt_src_image,
305
+ examples_per_page=10,
306
+ examples=person2_images,
307
+ )
308
+
309
+ with gr.Column():
310
+ gr.Markdown("#### Generated Image")
311
+ pt_gen_image = gr.Image(
312
+ label="Generated Image",
313
+ width=512,
314
+ height=512,
315
+ )
316
+
317
+ with gr.Row():
318
+ pose_transfer_gen_button = gr.Button("Generate")
319
+
320
+ with gr.Accordion("Advanced Options", open=False):
321
+ pt_ref_acceleration = gr.Radio(
322
+ label="Accelerate Reference UNet",
323
+ choices=[("True", True), ("False", False)],
324
+ value=False,
325
  )
326
 
327
+ pt_step = gr.Number(
328
+ label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
329
+
330
+ pt_scale = gr.Number(
331
+ label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
332
+
333
+ pt_seed = gr.Number(
334
+ label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
335
+
336
+ with gr.Accordion("Debug", open=False):
337
+ pt_mask = gr.Image(
338
+ label="Generated Mask",
339
+ width=256,
340
+ height=256,
341
  )
342
+
343
+ pt_densepose = gr.Image(
344
+ label="Generated DensePose",
345
+ width=256,
346
+ height=256,
347
  )
348
 
349
+ pose_transfer_gen_button.click(fn=leffa_predictor.leffa_predict_pt, inputs=[
350
+ pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed], outputs=[pt_gen_image, pt_mask, pt_densepose])
351
+
352
+ gr.Markdown(note)
353
+
354
+ demo.launch(share=True, server_port=7860,
355
+ allowed_paths=["./ckpts/examples"])