XuDongZhou commited on
Commit
40535be
·
verified ·
1 Parent(s): b9227fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -248,7 +248,8 @@ def generate_image(
248
  # prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
249
 
250
  face_image = load_image(face_image_path)
251
- face_image = resize_img(face_image, max_side=1024)
 
252
  face_image_cv2 = convert_from_image_to_cv2(face_image)
253
  height, width, _ = face_image_cv2.shape
254
 
@@ -270,7 +271,8 @@ def generate_image(
270
  img_controlnet = face_image
271
  if pose_image_path is not None:
272
  pose_image = load_image(pose_image_path)
273
- pose_image = resize_img(pose_image, max_side=1024)
 
274
  img_controlnet = pose_image
275
  pose_image_cv2 = convert_from_image_to_cv2(pose_image)
276
 
@@ -285,19 +287,20 @@ def generate_image(
285
  face_kps = draw_kps(pose_image, face_info["kps"])
286
 
287
  width, height = face_kps.size
288
-
289
  print("Start inference...")
290
  print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
291
 
292
- pipe.set_ip_adapter_scale(adapter_strength_ratio)
293
  images = pipe(
294
  prompt=prompt,
295
  negative_prompt=negative_prompt,
296
  image=face_kps,
297
  face_emb=face_emb,
298
- controlnet_conditioning_scale=controlnet_conditioning_scale,
 
299
  num_inference_steps=num_steps,
300
- guidance_scale=guidance_scale,
301
  height=height,
302
  width=width,
303
  generator=torch.Generator(device=device).manual_seed(seed),
@@ -319,6 +322,7 @@ tips = r"""
319
  css = """
320
  .gradio-container {width: 85% !important}
321
  """
 
322
  with gr.Blocks(css=css) as demo:
323
  # description
324
  gr.Markdown(title)
 
248
  # prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
249
 
250
  face_image = load_image(face_image_path)
251
+ # face_image = resize_img(face_image, max_side=1024)
252
+ face_image = resize_img(face_image)
253
  face_image_cv2 = convert_from_image_to_cv2(face_image)
254
  height, width, _ = face_image_cv2.shape
255
 
 
271
  img_controlnet = face_image
272
  if pose_image_path is not None:
273
  pose_image = load_image(pose_image_path)
274
+ # pose_image = resize_img(pose_image, max_side=1024)
275
+ pose_image = resize_img(pose_image)
276
  img_controlnet = pose_image
277
  pose_image_cv2 = convert_from_image_to_cv2(pose_image)
278
 
 
287
  face_kps = draw_kps(pose_image, face_info["kps"])
288
 
289
  width, height = face_kps.size
290
+ print(width, height)
291
  print("Start inference...")
292
  print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
293
 
294
+ # pipe.set_ip_adapter_scale(adapter_strength_ratio)
295
  images = pipe(
296
  prompt=prompt,
297
  negative_prompt=negative_prompt,
298
  image=face_kps,
299
  face_emb=face_emb,
300
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
301
+ ip_adapter_scale=float(adapter_strength_ratio)
302
  num_inference_steps=num_steps,
303
+ guidance_scale=float(guidance_scale),
304
  height=height,
305
  width=width,
306
  generator=torch.Generator(device=device).manual_seed(seed),
 
322
  css = """
323
  .gradio-container {width: 85% !important}
324
  """
325
+
326
  with gr.Blocks(css=css) as demo:
327
  # description
328
  gr.Markdown(title)