lzyhha commited on
Commit
8e96e96
·
1 Parent(s): 32b1467
demo_tasks/gradio_tasks_relighting.py CHANGED
@@ -5,7 +5,7 @@ from PIL import Image
5
 
6
 
7
  task_instruction = "Each row shows a process to manipulate the illumination of images and changes the background following the instruction."
8
- content_instruction = "Beautiful woman, the illumination comes from left side of the image, "
9
  relighting = [
10
  dict(
11
  name='sunset over sea',
@@ -232,7 +232,7 @@ def process_relighting_tasks(x):
232
  layout_prompt = get_layout_instruction(grid_w, grid_h)
233
 
234
  upsampling_noise = 0.6
235
- steps = 50
236
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
237
  break
238
 
 
5
 
6
 
7
  task_instruction = "Each row shows a process to manipulate the illumination of images and changes the background following the instruction."
8
+ content_instruction = "In the last row, the illumination comes from left side of the image, with changed background and style as "
9
  relighting = [
10
  dict(
11
  name='sunset over sea',
 
232
  layout_prompt = get_layout_instruction(grid_w, grid_h)
233
 
234
  upsampling_noise = 0.6
235
+ steps = 30
236
  outputs = [mask, grid_h, grid_w, layout_prompt, task_prompt, content_prompt, upsampling_noise, steps] + rets
237
  break
238
 
demo_tasks/gradio_tasks_unseen.py CHANGED
@@ -99,21 +99,25 @@ dense_prediction_data = [
99
  unseen_tasks = [
100
  dict(
101
  name='Frontal Face Reconstruction',
102
- images=[
103
- 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_0.jpg',
104
- 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_2.jpg',
105
- 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_1.jpg',
106
- 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_0.jpg',
107
- 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_2.jpg',
108
- 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_1.jpg',
109
- 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_8.jpg',
110
- 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_6.jpg',
111
- 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_1.jpg',
112
- ],
113
- grid_h=3,
114
- grid_w=3,
115
- task_prompt="Each row presents multi-view of a face, given a frontal face reconstruction task that leverages [IMAGE1] a left side of the face and [IMAGE2] a right side of the face, to generate [IMAGE3] a frontal face that faces the center of the lens.",
116
- content_prompt="The content of the last image in the final row is: the woman's frontal face that faces the center of the lens.",
 
 
 
 
117
  ),
118
  dict(
119
  name='Image to Depth + Normal + Hed',
 
99
  unseen_tasks = [
100
  dict(
101
  name='Frontal Face Reconstruction',
102
+ examples=[
103
+ dict(
104
+ images=[
105
+ 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_0.jpg',
106
+ 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_2.jpg',
107
+ 'demo_tasks/examples/face/34e1633a-369f-4324-86c3-3e6418ec00be/face_1.jpg',
108
+ 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_0.jpg',
109
+ 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_2.jpg',
110
+ 'demo_tasks/examples/face/cb5d403a-f1bb-4392-8302-24846893a797/face_1.jpg',
111
+ 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_8.jpg',
112
+ 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_6.jpg',
113
+ 'demo_tasks/examples/face/2ef6aa5a-e751-4bf2-a302-0237ab460627/face_1.jpg',
114
+ ],
115
+ grid_h=3,
116
+ grid_w=3,
117
+ task_prompt="Each row presents multi-view of a face, given a frontal face reconstruction task that leverages [IMAGE1] a left side of the face and [IMAGE2] a right side of the face, to generate [IMAGE3] a frontal face that faces the center of the lens.",
118
+ content_prompt="The content of the last image in the final row is: the woman's frontal face that faces the center of the lens.",
119
+ )
120
+ ],
121
  ),
122
  dict(
123
  name='Image to Depth + Normal + Hed',
visualcloze.py CHANGED
@@ -171,9 +171,9 @@ class VisualClozeModel:
171
  new_w = int(new_h * aspect_ratio)
172
  target_size = (new_w, new_h)
173
 
174
- if target_size[0] * target_size[1] > 1600 * 1600:
175
  aspect_ratio = target_size[0] / target_size[1]
176
- target_area = 1600 * 1600
177
  new_h = int((target_area / aspect_ratio) ** 0.5)
178
  new_w = int(new_h * aspect_ratio)
179
  target_size = (new_w, new_h)
 
171
  new_w = int(new_h * aspect_ratio)
172
  target_size = (new_w, new_h)
173
 
174
+ if target_size[0] * target_size[1] > 1024 * 1024:
175
  aspect_ratio = target_size[0] / target_size[1]
176
+ target_area = 1024 * 1024
177
  new_h = int((target_area / aspect_ratio) ** 0.5)
178
  new_w = int(new_h * aspect_ratio)
179
  target_size = (new_w, new_h)