lzyhha commited on
Commit
0c52494
·
1 Parent(s): b924914
Files changed (2) hide show
  1. app.py +8 -1
  2. visualcloze.py +0 -2
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import argparse
 
2
  from visualcloze import VisualClozeModel
3
  import gradio as gr
4
  import demo_tasks
@@ -313,7 +314,8 @@ def create_demo(model):
313
  images[i].append(inputs[i * max_grid_w + j])
314
  seed, cfg, steps, upsampling_steps, upsampling_noise, layout_text, task_text, content_text = inputs[-8:]
315
 
316
- results = model.process_images(
 
317
  images,
318
  [layout_text, task_text, content_text],
319
  seed=seed, cfg=cfg, steps=steps,
@@ -458,6 +460,11 @@ def create_demo(model):
458
 
459
  return demo
460
 
 
 
 
 
 
461
  def parse_args():
462
  parser = argparse.ArgumentParser()
463
  parser.add_argument("--model_path", type=str, default="models/visualcloze-384-lora.pth")
 
1
  import argparse
2
+ import spaces
3
  from visualcloze import VisualClozeModel
4
  import gradio as gr
5
  import demo_tasks
 
314
  images[i].append(inputs[i * max_grid_w + j])
315
  seed, cfg, steps, upsampling_steps, upsampling_noise, layout_text, task_text, content_text = inputs[-8:]
316
 
317
+ results = generate(
318
+ model,
319
  images,
320
  [layout_text, task_text, content_text],
321
  seed=seed, cfg=cfg, steps=steps,
 
460
 
461
  return demo
462
 
463
+
464
+ @spaces.GPU
465
+ def generate(model, **args):
466
+ return model.process_images(args)
467
+
468
  def parse_args():
469
  parser = argparse.ArgumentParser()
470
  parser.add_argument("--model_path", type=str, default="models/visualcloze-384-lora.pth")
visualcloze.py CHANGED
@@ -1,6 +1,5 @@
1
 
2
  import random
3
- import spaces
4
  from einops import rearrange
5
  from diffusers.models import AutoencoderKL
6
  from PIL import Image
@@ -75,7 +74,6 @@ def resize_with_aspect_ratio(img, resolution, divisible=16, aspect_ratio=None):
75
  return img.resize((new_w, new_h), Image.LANCZOS)
76
 
77
 
78
- @spaces.GPU
79
  class VisualClozeModel:
80
  def __init__(
81
  self, model_path, model_name="flux-dev-fill-lora", max_length=512, lora_rank=256,
 
1
 
2
  import random
 
3
  from einops import rearrange
4
  from diffusers.models import AutoencoderKL
5
  from PIL import Image
 
74
  return img.resize((new_w, new_h), Image.LANCZOS)
75
 
76
 
 
77
  class VisualClozeModel:
78
  def __init__(
79
  self, model_path, model_name="flux-dev-fill-lora", max_length=512, lora_rank=256,