RishabA commited on
Commit
b0aca51
·
verified ·
1 Parent(s): 9835792

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -14,7 +14,7 @@ from model import (
14
  autoencoder_params,
15
  train_params,
16
  )
17
- from huggingface_hub import hf_hub_download
18
  import json
19
 
20
 
@@ -55,11 +55,14 @@ vae.eval()
55
  print("Model and checkpoints loaded successfully!")
56
 
57
 
58
- def sample_ddpm_inference(text_prompt, mask_image_pil):
 
59
  """
60
  Given a text prompt and (optionally) an image condition (as a PIL image),
61
  sample from the diffusion model and return a generated image (PIL image).
62
  """
 
 
63
  guidance_scale = 1.0
64
 
65
  # Create noise scheduler
@@ -199,14 +202,13 @@ with gr.Blocks(css=css_str) as demo:
199
  lines=2,
200
  placeholder="E.g., 'He is a man with brown hair.'",
201
  )
202
- mask_input = gr.Image(type="pil", label="Optional Mask for Conditioning")
203
 
204
  generate_button = gr.Button("Generate Image")
205
  output_image = gr.Image(label="Generated Image", type="pil")
206
 
207
  generate_button.click(
208
  fn=sample_ddpm_inference,
209
- inputs=[text_input, mask_input],
210
  outputs=[output_image],
211
  )
212
 
 
14
  autoencoder_params,
15
  train_params,
16
  )
17
+ from huggingface_hub import hf_hub_download, spaces
18
  import json
19
 
20
 
 
55
  print("Model and checkpoints loaded successfully!")
56
 
57
 
58
+ @spaces.GPU
59
+ def sample_ddpm_inference(text_prompt):
60
  """
61
  Given a text prompt and (optionally) an image condition (as a PIL image),
62
  sample from the diffusion model and return a generated image (PIL image).
63
  """
64
+
65
+ mask_image_pil = None
66
  guidance_scale = 1.0
67
 
68
  # Create noise scheduler
 
202
  lines=2,
203
  placeholder="E.g., 'He is a man with brown hair.'",
204
  )
 
205
 
206
  generate_button = gr.Button("Generate Image")
207
  output_image = gr.Image(label="Generated Image", type="pil")
208
 
209
  generate_button.click(
210
  fn=sample_ddpm_inference,
211
+ inputs=[text_input],
212
  outputs=[output_image],
213
  )
214