yusuf commited on
Commit
5e53a08
·
1 Parent(s): a79d684

aeayüz düzenlenöesi

Browse files
Files changed (1) hide show
  1. app.py +241 -319
app.py CHANGED
@@ -12,344 +12,266 @@ from preprocess.openpose.run_openpose import OpenPose
12
 
13
  import gradio as gr
14
 
15
- # Download checkpoints
16
- snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
17
-
18
-
19
- class LeffaPredictor(object):
20
- def __init__(self):
21
- self.mask_predictor = AutoMasker(
22
- densepose_path="./ckpts/densepose",
23
- schp_path="./ckpts/schp",
24
- )
25
-
26
- self.densepose_predictor = DensePosePredictor(
27
- config_path="./ckpts/densepose/densepose_rcnn_R_50_FPN_s1x.yaml",
28
- weights_path="./ckpts/densepose/model_final_162be9.pkl",
29
- )
30
-
31
- self.parsing = Parsing(
32
- atr_path="./ckpts/humanparsing/parsing_atr.onnx",
33
- lip_path="./ckpts/humanparsing/parsing_lip.onnx",
34
- )
35
-
36
- self.openpose = OpenPose(
37
- body_model_path="./ckpts/openpose/body_pose_model.pth",
38
- )
39
-
40
- vt_model_hd = LeffaModel(
41
- pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
42
- pretrained_model="./ckpts/virtual_tryon.pth",
43
- dtype="float16",
44
- )
45
- self.vt_inference_hd = LeffaInference(model=vt_model_hd)
46
-
47
- vt_model_dc = LeffaModel(
48
- pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting",
49
- pretrained_model="./ckpts/virtual_tryon_dc.pth",
50
- dtype="float16",
51
- )
52
- self.vt_inference_dc = LeffaInference(model=vt_model_dc)
53
-
54
- pt_model = LeffaModel(
55
- pretrained_model_name_or_path="./ckpts/stable-diffusion-xl-1.0-inpainting-0.1",
56
- pretrained_model="./ckpts/pose_transfer.pth",
57
- dtype="float16",
58
- )
59
- self.pt_inference = LeffaInference(model=pt_model)
60
-
61
- def leffa_predict(
62
- self,
63
- src_image_path,
64
- ref_image_path,
65
- control_type,
66
- ref_acceleration=False,
67
- step=50,
68
- scale=2.5,
69
- seed=42,
70
- vt_model_type="viton_hd",
71
- vt_garment_type="upper_body",
72
- vt_repaint=False
73
- ):
74
- assert control_type in [
75
- "virtual_tryon", "pose_transfer"], "Invalid control type: {}".format(control_type)
76
- src_image = Image.open(src_image_path)
77
- ref_image = Image.open(ref_image_path)
78
- src_image = resize_and_center(src_image, 768, 1024)
79
- ref_image = resize_and_center(ref_image, 768, 1024)
80
-
81
- src_image_array = np.array(src_image)
82
-
83
- # Mask
84
- if control_type == "virtual_tryon":
85
- src_image = src_image.convert("RGB")
86
- model_parse, _ = self.parsing(src_image.resize((384, 512)))
87
- keypoints = self.openpose(src_image.resize((384, 512)))
88
- if vt_model_type == "viton_hd":
89
- mask = get_agnostic_mask_hd(
90
- model_parse, keypoints, vt_garment_type)
91
- elif vt_model_type == "dress_code":
92
- mask = get_agnostic_mask_dc(
93
- model_parse, keypoints, vt_garment_type)
94
- mask = mask.resize((768, 1024))
95
- # garment_type_hd = "upper" if vt_garment_type in [
96
- # "upper_body", "dresses"] else "lower"
97
- # mask = self.mask_predictor(src_image, garment_type_hd)["mask"]
98
- elif control_type == "pose_transfer":
99
- mask = Image.fromarray(np.ones_like(src_image_array) * 255)
100
-
101
- # DensePose
102
- if control_type == "virtual_tryon":
103
- if vt_model_type == "viton_hd":
104
- src_image_seg_array = self.densepose_predictor.predict_seg(
105
- src_image_array)[:, :, ::-1]
106
- src_image_seg = Image.fromarray(src_image_seg_array)
107
- densepose = src_image_seg
108
- elif vt_model_type == "dress_code":
109
- src_image_iuv_array = self.densepose_predictor.predict_iuv(
110
- src_image_array)
111
- src_image_seg_array = src_image_iuv_array[:, :, 0:1]
112
- src_image_seg_array = np.concatenate(
113
- [src_image_seg_array] * 3, axis=-1)
114
- src_image_seg = Image.fromarray(src_image_seg_array)
115
- densepose = src_image_seg
116
- elif control_type == "pose_transfer":
117
- src_image_iuv_array = self.densepose_predictor.predict_iuv(
118
- src_image_array)[:, :, ::-1]
119
- src_image_iuv = Image.fromarray(src_image_iuv_array)
120
- densepose = src_image_iuv
121
-
122
- # Leffa
123
- transform = LeffaTransform()
124
-
125
- data = {
126
- "src_image": [src_image],
127
- "ref_image": [ref_image],
128
- "mask": [mask],
129
- "densepose": [densepose],
130
- }
131
- data = transform(data)
132
- if control_type == "virtual_tryon":
133
- if vt_model_type == "viton_hd":
134
- inference = self.vt_inference_hd
135
- elif vt_model_type == "dress_code":
136
- inference = self.vt_inference_dc
137
- elif control_type == "pose_transfer":
138
- inference = self.pt_inference
139
- output = inference(
140
- data,
141
- ref_acceleration=ref_acceleration,
142
- num_inference_steps=step,
143
- guidance_scale=scale,
144
- seed=seed,
145
- repaint=vt_repaint,)
146
- gen_image = output["generated_image"][0]
147
- # gen_image.save("gen_image.png")
148
- return np.array(gen_image), np.array(mask), np.array(densepose)
149
-
150
- def dehasoft(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint):
151
- return self.leffa_predict(src_image_path, ref_image_path, "virtual_tryon", ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint)
152
-
153
- def leffa_predict_pt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed):
154
- return self.leffa_predict(src_image_path, ref_image_path, "pose_transfer", ref_acceleration, step, scale, seed)
155
-
156
-
157
  if __name__ == "__main__":
158
-
159
  leffa_predictor = LeffaPredictor()
160
  example_dir = "./ckpts/examples"
161
  person1_images = list_dir(f"{example_dir}/person1")
162
  person2_images = list_dir(f"{example_dir}/person2")
163
  garment_images = list_dir(f"{example_dir}/garment")
164
 
165
- title = "## Dehasoft"
166
- link = """Dehasoft"""
167
- news = """Dehasoft"""
168
- description = "Dehasoft"
169
- note = "Note: The models used in the demo are trained solely on academic datasets. Virtual try-on uses VITON-HD/DressCode, and pose transfer uses DeepFashion."
170
-
171
- with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.pink, secondary_hue=gr.themes.colors.red)).queue() as demo:
172
- gr.Markdown(title)
173
- gr.Markdown(link)
174
- gr.Markdown(news)
175
- gr.Markdown(description)
176
-
177
- with gr.Tab("Control Appearance (Virtual Try-on)"):
178
- with gr.Row():
179
- with gr.Column():
180
- gr.Markdown("#### Person Image")
181
- vt_src_image = gr.Image(
182
- sources=["upload"],
183
- type="filepath",
184
- label="Person Image",
185
- width=512,
186
- height=512,
187
- )
188
-
189
- gr.Examples(
190
- inputs=vt_src_image,
191
- examples_per_page=10,
192
- examples=person1_images,
193
- )
194
-
195
- with gr.Column():
196
- gr.Markdown("#### Garment Image")
197
- vt_ref_image = gr.Image(
198
- sources=["upload"],
199
- type="filepath",
200
- label="Garment Image",
201
- width=512,
202
- height=512,
203
- )
204
-
205
- gr.Examples(
206
- inputs=vt_ref_image,
207
- examples_per_page=10,
208
- examples=garment_images,
209
- )
210
-
211
- with gr.Column():
212
- gr.Markdown("#### Generated Image")
213
- vt_gen_image = gr.Image(
214
- label="Generated Image",
215
- width=512,
216
- height=512,
217
- )
218
-
219
- with gr.Row():
220
- vt_gen_button = gr.Button("Generate")
221
-
222
- with gr.Accordion("Advanced Options", open=False):
223
- vt_model_type = gr.Radio(
224
- label="Model Type",
225
- choices=[("VITON-HD (Recommended)", "viton_hd"),
226
- ("DressCode (Experimental)", "dress_code")],
227
- value="viton_hd",
228
  )
229
-
230
- vt_garment_type = gr.Radio(
231
- label="Garment Type",
232
- choices=[("Upper", "upper_body"),
233
- ("Lower", "lower_body"),
234
- ("Dress", "dresses")],
235
- value="upper_body",
236
  )
237
 
238
- vt_ref_acceleration = gr.Radio(
239
- label="Accelerate Reference UNet (may slightly reduce performance)",
240
- choices=[("True", True), ("False", False)],
241
- value=False,
 
 
 
 
 
242
  )
243
-
244
- vt_repaint = gr.Radio(
245
- label="Repaint Mode",
246
- choices=[("True", True), ("False", False)],
247
- value=False,
248
  )
249
 
250
- vt_step = gr.Number(
251
- label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
252
-
253
- vt_scale = gr.Number(
254
- label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
255
-
256
- vt_seed = gr.Number(
257
- label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
258
-
259
- with gr.Accordion("Debug", open=False):
260
- vt_mask = gr.Image(
261
- label="Generated Mask",
262
- width=256,
263
- height=256,
264
  )
265
-
266
- vt_densepose = gr.Image(
267
- label="Generated DensePose",
268
- width=256,
269
- height=256,
270
  )
271
 
272
- vt_gen_button.click(fn=leffa_predictor.dehasoft, inputs=[
273
- vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint], outputs=[vt_gen_image, vt_mask, vt_densepose])
274
-
275
- with gr.Tab("Control Pose (Pose Transfer)"):
276
- with gr.Row():
277
- with gr.Column():
278
- gr.Markdown("#### Person Image")
279
- pt_ref_image = gr.Image(
280
- sources=["upload"],
281
- type="filepath",
282
- label="Person Image",
283
- width=512,
284
- height=512,
285
- )
286
-
287
- gr.Examples(
288
- inputs=pt_ref_image,
289
- examples_per_page=10,
290
- examples=person1_images,
291
- )
292
-
293
- with gr.Column():
294
- gr.Markdown("#### Target Pose Person Image")
295
- pt_src_image = gr.Image(
296
- sources=["upload"],
297
- type="filepath",
298
- label="Target Pose Person Image",
299
- width=512,
300
- height=512,
301
- )
302
-
303
- gr.Examples(
304
- inputs=pt_src_image,
305
- examples_per_page=10,
306
- examples=person2_images,
307
- )
308
-
309
- with gr.Column():
310
- gr.Markdown("#### Generated Image")
311
- pt_gen_image = gr.Image(
312
- label="Generated Image",
313
- width=512,
314
- height=512,
315
- )
316
-
317
- with gr.Row():
318
- pose_transfer_gen_button = gr.Button("Generate")
319
-
320
- with gr.Accordion("Advanced Options", open=False):
321
- pt_ref_acceleration = gr.Radio(
322
- label="Accelerate Reference UNet",
323
- choices=[("True", True), ("False", False)],
324
- value=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  )
326
-
327
- pt_step = gr.Number(
328
- label="Inference Steps", minimum=30, maximum=100, step=1, value=30)
329
-
330
- pt_scale = gr.Number(
331
- label="Guidance Scale", minimum=0.1, maximum=5.0, step=0.1, value=2.5)
332
-
333
- pt_seed = gr.Number(
334
- label="Random Seed", minimum=-1, maximum=2147483647, step=1, value=42)
335
-
336
- with gr.Accordion("Debug", open=False):
337
- pt_mask = gr.Image(
338
- label="Generated Mask",
339
- width=256,
340
- height=256,
341
  )
342
 
343
- pt_densepose = gr.Image(
344
- label="Generated DensePose",
345
- width=256,
346
- height=256,
 
 
 
 
 
 
 
 
 
 
 
347
  )
348
 
349
- pose_transfer_gen_button.click(fn=leffa_predictor.leffa_predict_pt, inputs=[
350
- pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed], outputs=[pt_gen_image, pt_mask, pt_densepose])
351
-
352
- gr.Markdown(note)
 
 
 
 
 
 
 
 
 
353
 
354
- demo.launch(share=True, server_port=7860,
355
- allowed_paths=["./ckpts/examples"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  import gradio as gr
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  if __name__ == "__main__":
 
16
  leffa_predictor = LeffaPredictor()
17
  example_dir = "./ckpts/examples"
18
  person1_images = list_dir(f"{example_dir}/person1")
19
  person2_images = list_dir(f"{example_dir}/person2")
20
  garment_images = list_dir(f"{example_dir}/garment")
21
 
22
+ # Özelleştirilmiş Tema
23
+ theme = gr.themes.Soft(
24
+ primary_hue="indigo",
25
+ secondary_hue="purple",
26
+ neutral_hue="gray",
27
+ radius_size="lg",
28
+ text_size="lg",
29
+ spacing_size="md",
30
+ ).set(
31
+ body_background_fill="#f5f5f5",
32
+ background_fill_primary="#ffffff",
33
+ button_primary_background_fill="#4f46e5",
34
+ button_primary_background_fill_hover="#6b7280",
35
+ button_primary_text_color="#ffffff",
36
+ shadow="0 4px 6px rgba(0, 0, 0, 0.1)",
37
+ )
38
+
39
+ # Başlık ve Açıklama
40
+ title = "# Dehasoft AI Studio"
41
+ description = """
42
+ Welcome to **Dehasoft AI Studio**! Transform appearances with virtual try-on or adjust poses with pose transfer using cutting-edge AI models.
43
+ Powered by VITON-HD, DressCode, and DeepFashion datasets.
44
+ """
45
+ footer_note = """
46
+ **Note:** Models are trained on academic datasets only. Virtual try-on leverages VITON-HD/DressCode, while pose transfer uses DeepFashion.
47
+ """
48
+
49
+ with gr.Blocks(theme=theme, title="Dehasoft AI Studio") as demo:
50
+ # Başlık ve Açıklama
51
+ gr.Markdown(title, elem_classes=["title"])
52
+ gr.Markdown(description, elem_classes=["description"])
53
+
54
+ # Sekmeler
55
+ with gr.Tabs(elem_classes=["tabs"]):
56
+ # Virtual Try-on Sekmesi
57
+ with gr.TabItem("Virtual Try-On", elem_id="vt_tab"):
58
+ with gr.Row(equal_height=True):
59
+ with gr.Column(scale=1):
60
+ gr.Markdown("### Upload Person Image", elem_classes=["section-title"])
61
+ vt_src_image = gr.Image(
62
+ sources=["upload"],
63
+ type="filepath",
64
+ label="Person Image",
65
+ interactive=True,
66
+ height=400,
67
+ elem_classes=["image-upload"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  )
69
+ gr.Examples(
70
+ examples=person1_images,
71
+ inputs=vt_src_image,
72
+ examples_per_page=5,
73
+ elem_classes=["examples"],
 
 
74
  )
75
 
76
+ with gr.Column(scale=1):
77
+ gr.Markdown("### Upload Garment Image", elem_classes=["section-title"])
78
+ vt_ref_image = gr.Image(
79
+ sources=["upload"],
80
+ type="filepath",
81
+ label="Garment Image",
82
+ interactive=True,
83
+ height=400,
84
+ elem_classes=["image-upload"],
85
  )
86
+ gr.Examples(
87
+ examples=garment_images,
88
+ inputs=vt_ref_image,
89
+ examples_per_page=5,
90
+ elem_classes=["examples"],
91
  )
92
 
93
+ with gr.Column(scale=1):
94
+ gr.Markdown("### Result", elem_classes=["section-title"])
95
+ vt_gen_image = gr.Image(
96
+ label="Generated Image",
97
+ height=400,
98
+ elem_classes=["image-output"],
 
 
 
 
 
 
 
 
99
  )
100
+ vt_gen_button = gr.Button(
101
+ "Generate Image",
102
+ variant="primary",
103
+ size="lg",
104
+ elem_classes=["generate-btn"],
105
  )
106
 
107
+ with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]):
108
+ vt_model_type = gr.Radio(
109
+ label="Model Type",
110
+ choices=[("VITON-HD (Recommended)", "viton_hd"), ("DressCode (Experimental)", "dress_code")],
111
+ value="viton_hd",
112
+ elem_classes=["radio"],
113
+ )
114
+ vt_garment_type = gr.Radio(
115
+ label="Garment Type",
116
+ choices=[("Upper", "upper_body"), ("Lower", "lower_body"), ("Dress", "dresses")],
117
+ value="upper_body",
118
+ elem_classes=["radio"],
119
+ )
120
+ vt_ref_acceleration = gr.Checkbox(
121
+ label="Accelerate Reference UNet",
122
+ value=False,
123
+ elem_classes=["checkbox"],
124
+ )
125
+ vt_repaint = gr.Checkbox(
126
+ label="Repaint Mode",
127
+ value=False,
128
+ elem_classes=["checkbox"],
129
+ )
130
+ vt_step = gr.Slider(
131
+ label="Inference Steps",
132
+ minimum=30,
133
+ maximum=100,
134
+ step=1,
135
+ value=30,
136
+ elem_classes=["slider"],
137
+ )
138
+ vt_scale = gr.Slider(
139
+ label="Guidance Scale",
140
+ minimum=0.1,
141
+ maximum=5.0,
142
+ step=0.1,
143
+ value=2.5,
144
+ elem_classes=["slider"],
145
+ )
146
+ vt_seed = gr.Number(
147
+ label="Random Seed",
148
+ minimum=-1,
149
+ maximum=2147483647,
150
+ step=1,
151
+ value=42,
152
+ elem_classes=["number"],
153
+ )
154
+
155
+ with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]):
156
+ vt_mask = gr.Image(label="Generated Mask", height=200)
157
+ vt_densepose = gr.Image(label="Generated DensePose", height=200)
158
+
159
+ vt_gen_button.click(
160
+ fn=leffa_predictor.dehasoft,
161
+ inputs=[vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint],
162
+ outputs=[vt_gen_image, vt_mask, vt_densepose],
163
+ _js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }"
164
+ )
165
+
166
+ # Pose Transfer Sekmesi
167
+ with gr.TabItem("Pose Transfer", elem_id="pt_tab"):
168
+ with gr.Row(equal_height=True):
169
+ with gr.Column(scale=1):
170
+ gr.Markdown("### Source Person Image", elem_classes=["section-title"])
171
+ pt_ref_image = gr.Image(
172
+ sources=["upload"],
173
+ type="filepath",
174
+ label="Person Image",
175
+ interactive=True,
176
+ height=400,
177
+ elem_classes=["image-upload"],
178
  )
179
+ gr.Examples(
180
+ examples=person1_images,
181
+ inputs=pt_ref_image,
182
+ examples_per_page=5,
183
+ elem_classes=["examples"],
 
 
 
 
 
 
 
 
 
 
184
  )
185
 
186
+ with gr.Column(scale=1):
187
+ gr.Markdown("### Target Pose Image", elem_classes=["section-title"])
188
+ pt_src_image = gr.Image(
189
+ sources=["upload"],
190
+ type="filepath",
191
+ label="Target Pose Person Image",
192
+ interactive=True,
193
+ height=400,
194
+ elem_classes=["image-upload"],
195
+ )
196
+ gr.Examples(
197
+ examples=person2_images,
198
+ inputs=pt_src_image,
199
+ examples_per_page=5,
200
+ elem_classes=["examples"],
201
  )
202
 
203
+ with gr.Column(scale=1):
204
+ gr.Markdown("### Result", elem_classes=["section-title"])
205
+ pt_gen_image = gr.Image(
206
+ label="Generated Image",
207
+ height=400,
208
+ elem_classes=["image-output"],
209
+ )
210
+ pt_gen_button = gr.Button(
211
+ "Generate Image",
212
+ variant="primary",
213
+ size="lg",
214
+ elem_classes=["generate-btn"],
215
+ )
216
 
217
+ with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]):
218
+ pt_ref_acceleration = gr.Checkbox(
219
+ label="Accelerate Reference UNet",
220
+ value=False,
221
+ elem_classes=["checkbox"],
222
+ )
223
+ pt_step = gr.Slider(
224
+ label="Inference Steps",
225
+ minimum=30,
226
+ maximum=100,
227
+ step=1,
228
+ value=30,
229
+ elem_classes=["slider"],
230
+ )
231
+ pt_scale = gr.Slider(
232
+ label="Guidance Scale",
233
+ minimum=0.1,
234
+ maximum=5.0,
235
+ step=0.1,
236
+ value=2.5,
237
+ elem_classes=["slider"],
238
+ )
239
+ pt_seed = gr.Number(
240
+ label="Random Seed",
241
+ minimum=-1,
242
+ maximum=2147483647,
243
+ step=1,
244
+ value=42,
245
+ elem_classes=["number"],
246
+ )
247
+
248
+ with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]):
249
+ pt_mask = gr.Image(label="Generated Mask", height=200)
250
+ pt_densepose = gr.Image(label="Generated DensePose", height=200)
251
+
252
+ pt_gen_button.click(
253
+ fn=leffa_predictor.leffa_predict_pt,
254
+ inputs=[pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed],
255
+ outputs=[pt_gen_image, pt_mask, pt_densepose],
256
+ _js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }"
257
+ )
258
+
259
+ # Altbilgi
260
+ gr.Markdown(footer_note, elem_classes=["footer"])
261
+
262
+ # Özel CSS
263
+ demo.css = """
264
+ .title { text-align: center; font-size: 2.5em; margin-bottom: 10px; color: #4f46e5; }
265
+ .description { text-align: center; font-size: 1.2em; margin-bottom: 20px; color: #374151; }
266
+ .section-title { font-size: 1.5em; color: #6b7280; margin-bottom: 10px; }
267
+ .image-upload, .image-output { border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); }
268
+ .generate-btn { transition: all 0.3s ease; }
269
+ .generate-btn:hover { transform: scale(1.05); }
270
+ .generate-btn.loading { opacity: 0.7; cursor: not-allowed; }
271
+ .accordion { background-color: #f9fafb; border-radius: 8px; }
272
+ .radio, .checkbox, .slider, .number { margin: 5px 0; }
273
+ .examples { margin-top: 10px; }
274
+ .footer { text-align: center; margin-top: 20px; font-size: 0.9em; color: #6b7280; }
275
+ """
276
+
277
+ demo.launch(share=True, server_port=7860, allowed_paths=["./ckpts/examples"])