lzyhha commited on
Commit
ac469eb
·
1 Parent(s): 5263fbf
app.py CHANGED
@@ -312,7 +312,7 @@ def create_demo(model):
312
 
313
  def generate_image(*inputs):
314
  images = []
315
- if grid_h != model.grid_h or grid_w != model.grid_w:
316
  raise gr.Error('Please wait for the loading to complete.')
317
  for i in range(model.grid_h):
318
  images.append([])
 
312
 
313
  def generate_image(*inputs):
314
  images = []
315
+ if grid_h + 1 != model.grid_h or grid_w != model.grid_w:
316
  raise gr.Error('Please wait for the loading to complete.')
317
  for i in range(model.grid_h):
318
  images.append([])
demo_tasks/gradio_tasks.py CHANGED
@@ -131,7 +131,7 @@ def process_dense_prediction_tasks(x):
131
  mask = task.get('mask', [0 for _ in range(grid_w - 1)] + [1])
132
  layout_prompt = get_layout_instruction(grid_w, grid_h)
133
 
134
- upsampling_noise = 0.7
135
  steps = None
136
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
137
  break
 
131
  mask = task.get('mask', [0 for _ in range(grid_w - 1)] + [1])
132
  layout_prompt = get_layout_instruction(grid_w, grid_h)
133
 
134
+ upsampling_noise = 1.0
135
  steps = None
136
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
137
  break
demo_tasks/gradio_tasks_unseen.py CHANGED
@@ -253,7 +253,7 @@ def process_unseen_tasks(x):
253
  mask = task.get('mask', [0 for _ in range(grid_w - 1)] + [1])
254
  layout_prompt = get_layout_instruction(grid_w, grid_h)
255
 
256
- upsampling_noise = 0.7
257
  steps = None
258
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
259
  break
 
253
  mask = task.get('mask', [0 for _ in range(grid_w - 1)] + [1])
254
  layout_prompt = get_layout_instruction(grid_w, grid_h)
255
 
256
+ upsampling_noise = 1.0
257
  steps = None
258
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
259
  break
visualcloze.py CHANGED
@@ -190,6 +190,8 @@ class VisualClozeModel:
190
  )
191
 
192
  image = image.resize(((target_size[0] // 16) * 16, (target_size[1] // 16) * 16))
 
 
193
  processed_image = self.image_transform(image)
194
  processed_image = processed_image.to(self.device, non_blocking=True)
195
  blank = torch.zeros_like(processed_image, device=self.device, dtype=self.dtype)
 
190
  )
191
 
192
  image = image.resize(((target_size[0] // 16) * 16, (target_size[1] // 16) * 16))
193
+ if upsampling_noise >= 1.0:
194
+ return image
195
  processed_image = self.image_transform(image)
196
  processed_image = processed_image.to(self.device, non_blocking=True)
197
  blank = torch.zeros_like(processed_image, device=self.device, dtype=self.dtype)