MohamedRashad commited on
Commit
cce8702
·
1 Parent(s): 5b9fc85

Update generate_item_image function to reduce inference steps and image dimensions

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -82,15 +82,15 @@ def preprocess_pil_image(image: Image.Image) -> Tuple[str, Image.Image]:
82
  processed_image.save(f"{TMP_DIR}/{trial_id}.png")
83
  return trial_id, processed_image
84
 
85
- @spaces.GPU(duration=120)
86
  def generate_item_image(object_t2i_prompt):
87
  trial_id = ""
88
  for image in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
89
  prompt=object_t2i_prompt,
90
  guidance_scale=3.5,
91
- num_inference_steps=4,
92
- width=1024,
93
- height=1024,
94
  generator=torch.Generator("cpu").manual_seed(0),
95
  output_type="pil",
96
  ):
 
82
  processed_image.save(f"{TMP_DIR}/{trial_id}.png")
83
  return trial_id, processed_image
84
 
85
+ @spaces.GPU()
86
  def generate_item_image(object_t2i_prompt):
87
  trial_id = ""
88
  for image in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
89
  prompt=object_t2i_prompt,
90
  guidance_scale=3.5,
91
+ num_inference_steps=1,
92
+ width=512,
93
+ height=512,
94
  generator=torch.Generator("cpu").manual_seed(0),
95
  output_type="pil",
96
  ):