NikhilJoson commited on
Commit
5f66807
·
verified ·
1 Parent(s): 8f0d478

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -35,17 +35,17 @@ def predict_clothing(images):
35
  with torch.no_grad():
36
  output = model.generate(**inputs, max_new_tokens=32)
37
 
38
- # .<|eot_id|>
39
- output_reponse = str(processor.decode(output[0])).split('\n')[-1]
40
- output_texts.append(output_reponse[:-11])
41
 
42
  print(output_texts)
43
  return output_texts
44
 
45
 
46
- @spaces.GPU(duration=180)
47
  def generate_image(category, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
48
- use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=3):
49
 
50
  print()
51
  input_images = [img1, img2, img3]
@@ -95,8 +95,7 @@ def get_example():
95
  ]
96
  return case
97
 
98
- def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed, max_input_image_size, randomize_seed,
99
- use_input_image_size_as_output, guidance_scale=3):
100
  # Check the internal configuration of the function
101
  inference_steps = 50
102
  separate_cfg_infer = True
@@ -110,17 +109,16 @@ def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed,
110
  description = """
111
  This is a Virtual Try-On Platform.
112
 
113
- Usage:
114
  - First upload your own image as the first image, also tagged 'Person'
115
  - Then upload you 'Top-wear' and 'Bottom-wear' images
116
  - If its a single dress, and/or you don't have a Topwear and Bottomwear as separate images upload that single image under 'Topwear'
117
 
118
- Tips:
119
- - For image editing task and controlnet task, we recommend setting the height and width of output image as the same as input image. For example, if you want to edit a 512x512 image, you should set the height and width of output image as 512x512. You also can set the `use_input_image_size_as_output` to automatically set the height and width of output image as the same as input image.
120
  - For out-of-memory or time cost, you can set `offload_model=True` or refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting.
121
  - If inference time is too long when inputting multiple images, please try to reduce the `max_input_image_size`. For more details please refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources).
122
 
123
- **HF Spaces often encounter errors due to quota limitations, so recommend to run it locally.**
124
  """
125
 
126
  Credits = """**Credits**
 
35
  with torch.no_grad():
36
  output = model.generate(**inputs, max_new_tokens=32)
37
 
38
+
39
+ output_reponse = str(processor.decode(output[0])).split('\n')[-1]
40
+ output_texts.append(output_reponse[:-11]) # without .<|eot_id|>
41
 
42
  print(output_texts)
43
  return output_texts
44
 
45
 
46
+ @spaces.GPU()
47
  def generate_image(category, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
48
+ use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=2.6):
49
 
50
  print()
51
  input_images = [img1, img2, img3]
 
95
  ]
96
  return case
97
 
98
+ def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed, max_input_image_size, randomize_seed, use_input_image_size_as_output,):
 
99
  # Check the internal configuration of the function
100
  inference_steps = 50
101
  separate_cfg_infer = True
 
109
  description = """
110
  This is a Virtual Try-On Platform.
111
 
112
+ ### Usage:
113
  - First upload your own image as the first image, also tagged 'Person'
114
  - Then upload you 'Top-wear' and 'Bottom-wear' images
115
  - If its a single dress, and/or you don't have a Topwear and Bottomwear as separate images upload that single image under 'Topwear'
116
 
117
+ ### Tips:
 
118
  - For out-of-memory or time cost, you can set `offload_model=True` or refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting.
119
  - If inference time is too long when inputting multiple images, please try to reduce the `max_input_image_size`. For more details please refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources).
120
 
121
+ **Please note that HF Spaces often encounter errors due to GPU quota or other limitations, so please try lowering the image sizes and inference steps to manage the generation.**
122
  """
123
 
124
  Credits = """**Credits**