Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
·
5e0a3ac
1
Parent(s):
69f4bd5
update
Browse files- app.py +18 -27
- common.py +13 -21
- requirements.txt +0 -1
app.py
CHANGED
@@ -35,19 +35,13 @@ with gr.Blocks(
|
|
35 |
with gr.Tab(
|
36 |
label="Image(auto seg)", id=0
|
37 |
) as single_image_input_tab:
|
38 |
-
|
39 |
-
label="Input Image(raw)",
|
40 |
-
format="png",
|
41 |
-
image_mode="RGBA",
|
42 |
-
type="pil",
|
43 |
-
height=300,
|
44 |
-
)
|
45 |
image_prompt = gr.Image(
|
46 |
label="Input Image",
|
47 |
format="png",
|
48 |
image_mode="RGBA",
|
49 |
type="pil",
|
50 |
-
|
51 |
)
|
52 |
gr.Markdown(
|
53 |
"""
|
@@ -59,9 +53,6 @@ with gr.Blocks(
|
|
59 |
) as samimage_input_tab:
|
60 |
with gr.Row():
|
61 |
with gr.Column(scale=1):
|
62 |
-
image_prompt_sam_raw = gr.Image(
|
63 |
-
label="Input Image(raw)", type="numpy", visible=False,
|
64 |
-
)
|
65 |
image_prompt_sam = gr.Image(
|
66 |
label="Input Image", type="numpy", height=400
|
67 |
)
|
@@ -193,14 +184,14 @@ with gr.Blocks(
|
|
193 |
examples = gr.Examples(
|
194 |
label="Image Gallery",
|
195 |
examples=[
|
196 |
-
[f"
|
197 |
for image in os.listdir(
|
198 |
-
"
|
199 |
)
|
200 |
],
|
201 |
-
inputs=[
|
202 |
fn=preprocess_image_fn,
|
203 |
-
outputs=[image_prompt],
|
204 |
run_on_click=True,
|
205 |
examples_per_page=10,
|
206 |
)
|
@@ -209,14 +200,14 @@ with gr.Blocks(
|
|
209 |
examples = gr.Examples(
|
210 |
label="Image Gallery",
|
211 |
examples=[
|
212 |
-
f"
|
213 |
for image in os.listdir(
|
214 |
-
"
|
215 |
)
|
216 |
],
|
217 |
-
inputs=[
|
218 |
fn=preprocess_sam_image_fn,
|
219 |
-
outputs=[image_prompt_sam],
|
220 |
run_on_click=True,
|
221 |
examples_per_page=10,
|
222 |
)
|
@@ -272,10 +263,10 @@ with gr.Blocks(
|
|
272 |
outputs=[is_samimage, single_sam_image_example, single_image_example],
|
273 |
)
|
274 |
|
275 |
-
|
276 |
preprocess_image_fn,
|
277 |
-
inputs=[
|
278 |
-
outputs=[image_prompt],
|
279 |
)
|
280 |
image_prompt.change(
|
281 |
lambda: tuple(
|
@@ -321,12 +312,11 @@ with gr.Blocks(
|
|
321 |
outputs=generate_btn,
|
322 |
)
|
323 |
|
324 |
-
|
325 |
preprocess_sam_image_fn,
|
326 |
-
inputs=[
|
327 |
-
outputs=[image_prompt_sam],
|
328 |
)
|
329 |
-
|
330 |
image_prompt_sam.change(
|
331 |
lambda: tuple(
|
332 |
[
|
@@ -396,6 +386,7 @@ with gr.Blocks(
|
|
396 |
ss_sampling_steps,
|
397 |
slat_guidance_strength,
|
398 |
slat_sampling_steps,
|
|
|
399 |
image_seg_sam,
|
400 |
is_samimage,
|
401 |
],
|
@@ -448,4 +439,4 @@ with gr.Blocks(
|
|
448 |
|
449 |
|
450 |
if __name__ == "__main__":
|
451 |
-
demo.launch()
|
|
|
35 |
with gr.Tab(
|
36 |
label="Image(auto seg)", id=0
|
37 |
) as single_image_input_tab:
|
38 |
+
raw_image_cache = gr.State()
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
image_prompt = gr.Image(
|
40 |
label="Input Image",
|
41 |
format="png",
|
42 |
image_mode="RGBA",
|
43 |
type="pil",
|
44 |
+
height=300,
|
45 |
)
|
46 |
gr.Markdown(
|
47 |
"""
|
|
|
53 |
) as samimage_input_tab:
|
54 |
with gr.Row():
|
55 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
56 |
image_prompt_sam = gr.Image(
|
57 |
label="Input Image", type="numpy", height=400
|
58 |
)
|
|
|
184 |
examples = gr.Examples(
|
185 |
label="Image Gallery",
|
186 |
examples=[
|
187 |
+
[f"assets/example_image/{image}"]
|
188 |
for image in os.listdir(
|
189 |
+
"assets/example_image"
|
190 |
)
|
191 |
],
|
192 |
+
inputs=[image_prompt],
|
193 |
fn=preprocess_image_fn,
|
194 |
+
outputs=[image_prompt, raw_image_cache],
|
195 |
run_on_click=True,
|
196 |
examples_per_page=10,
|
197 |
)
|
|
|
200 |
examples = gr.Examples(
|
201 |
label="Image Gallery",
|
202 |
examples=[
|
203 |
+
f"assets/example_image/{image}"
|
204 |
for image in os.listdir(
|
205 |
+
"assets/example_image"
|
206 |
)
|
207 |
],
|
208 |
+
inputs=[image_prompt_sam],
|
209 |
fn=preprocess_sam_image_fn,
|
210 |
+
outputs=[image_prompt_sam, raw_image_cache],
|
211 |
run_on_click=True,
|
212 |
examples_per_page=10,
|
213 |
)
|
|
|
263 |
outputs=[is_samimage, single_sam_image_example, single_image_example],
|
264 |
)
|
265 |
|
266 |
+
image_prompt.upload(
|
267 |
preprocess_image_fn,
|
268 |
+
inputs=[image_prompt],
|
269 |
+
outputs=[image_prompt, raw_image_cache],
|
270 |
)
|
271 |
image_prompt.change(
|
272 |
lambda: tuple(
|
|
|
312 |
outputs=generate_btn,
|
313 |
)
|
314 |
|
315 |
+
image_prompt_sam.upload(
|
316 |
preprocess_sam_image_fn,
|
317 |
+
inputs=[image_prompt_sam],
|
318 |
+
outputs=[image_prompt_sam, raw_image_cache],
|
319 |
)
|
|
|
320 |
image_prompt_sam.change(
|
321 |
lambda: tuple(
|
322 |
[
|
|
|
386 |
ss_sampling_steps,
|
387 |
slat_guidance_strength,
|
388 |
slat_sampling_steps,
|
389 |
+
raw_image_cache,
|
390 |
image_seg_sam,
|
391 |
is_samimage,
|
392 |
],
|
|
|
439 |
|
440 |
|
441 |
if __name__ == "__main__":
|
442 |
+
demo.launch(server_name="10.34.8.82", server_port=8084)
|
common.py
CHANGED
@@ -127,20 +127,6 @@ elif os.getenv("GRADIO_APP") == "texture_edit":
|
|
127 |
os.makedirs(TMP_DIR, exist_ok=True)
|
128 |
|
129 |
|
130 |
-
def inject_image_css():
|
131 |
-
return gr.HTML(
|
132 |
-
"""
|
133 |
-
<style>
|
134 |
-
.image-container img {
|
135 |
-
object-fit: contain !important;
|
136 |
-
max-width: 100% !important;
|
137 |
-
max-height: 100% !important;
|
138 |
-
}
|
139 |
-
</style>
|
140 |
-
"""
|
141 |
-
)
|
142 |
-
|
143 |
-
|
144 |
def start_session(req: gr.Request) -> None:
|
145 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
146 |
os.makedirs(user_dir, exist_ok=True)
|
@@ -213,26 +199,27 @@ def preprocess_image_fn(
|
|
213 |
image = Image.open(image)
|
214 |
elif isinstance(image, np.ndarray):
|
215 |
image = Image.fromarray(image)
|
|
|
|
|
216 |
|
217 |
-
# image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
218 |
image = RBG_REMOVER(image)
|
219 |
image = trellis_preprocess(image)
|
220 |
|
221 |
-
return image
|
222 |
|
223 |
|
224 |
@spaces.GPU
|
225 |
def preprocess_sam_image_fn(
|
226 |
-
image: Image.Image,
|
227 |
) -> Image.Image:
|
228 |
if isinstance(image, np.ndarray):
|
229 |
image = Image.fromarray(image)
|
230 |
|
231 |
-
# image.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
232 |
sam_image = SAM_PREDICTOR.preprocess_image(image)
|
|
|
233 |
SAM_PREDICTOR.predictor.set_image(sam_image)
|
234 |
|
235 |
-
return sam_image
|
236 |
|
237 |
|
238 |
def active_btn_by_content(content: gr.Image) -> gr.Button:
|
@@ -263,6 +250,10 @@ def get_selected_image(
|
|
263 |
raise ValueError(f"Invalid choice: {choice}")
|
264 |
|
265 |
|
|
|
|
|
|
|
|
|
266 |
@spaces.GPU
|
267 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
268 |
return {
|
@@ -353,6 +344,7 @@ def image_to_3d(
|
|
353 |
ss_sampling_steps: int,
|
354 |
slat_guidance_strength: float,
|
355 |
slat_sampling_steps: int,
|
|
|
356 |
sam_image: Image.Image = None,
|
357 |
is_sam_image: bool = False,
|
358 |
req: gr.Request = None,
|
@@ -361,7 +353,6 @@ def image_to_3d(
|
|
361 |
seg_image = filter_image_small_connected_components(sam_image)
|
362 |
seg_image = Image.fromarray(seg_image, mode="RGBA")
|
363 |
seg_image = trellis_preprocess(seg_image)
|
364 |
-
# seg_image.save(f"{TMP_DIR}/seg_image_sam.png")
|
365 |
else:
|
366 |
seg_image = image
|
367 |
|
@@ -369,6 +360,7 @@ def image_to_3d(
|
|
369 |
seg_image = Image.fromarray(seg_image)
|
370 |
|
371 |
seg_image.save(f"{TMP_DIR}/{req.session_hash}/seg_image.png")
|
|
|
372 |
PIPELINE.cuda()
|
373 |
outputs = PIPELINE.run(
|
374 |
seg_image,
|
@@ -650,7 +642,7 @@ def text2image_fn(
|
|
650 |
if postprocess:
|
651 |
for idx in range(len(images)):
|
652 |
image = images[idx]
|
653 |
-
images[idx] = preprocess_image_fn(image)
|
654 |
|
655 |
save_paths = []
|
656 |
for idx, image in enumerate(images):
|
|
|
127 |
os.makedirs(TMP_DIR, exist_ok=True)
|
128 |
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
def start_session(req: gr.Request) -> None:
|
131 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
132 |
os.makedirs(user_dir, exist_ok=True)
|
|
|
199 |
image = Image.open(image)
|
200 |
elif isinstance(image, np.ndarray):
|
201 |
image = Image.fromarray(image)
|
202 |
+
|
203 |
+
image_cache = image.copy().resize((512, 512))
|
204 |
|
|
|
205 |
image = RBG_REMOVER(image)
|
206 |
image = trellis_preprocess(image)
|
207 |
|
208 |
+
return image, image_cache
|
209 |
|
210 |
|
211 |
@spaces.GPU
|
212 |
def preprocess_sam_image_fn(
|
213 |
+
image: Image.Image, req: gr.Request
|
214 |
) -> Image.Image:
|
215 |
if isinstance(image, np.ndarray):
|
216 |
image = Image.fromarray(image)
|
217 |
|
|
|
218 |
sam_image = SAM_PREDICTOR.preprocess_image(image)
|
219 |
+
image_cache = Image.fromarray(sam_image).resize((512, 512))
|
220 |
SAM_PREDICTOR.predictor.set_image(sam_image)
|
221 |
|
222 |
+
return sam_image, image_cache
|
223 |
|
224 |
|
225 |
def active_btn_by_content(content: gr.Image) -> gr.Button:
|
|
|
250 |
raise ValueError(f"Invalid choice: {choice}")
|
251 |
|
252 |
|
253 |
+
def get_cached_image(image_path: str) -> Image.Image:
|
254 |
+
return Image.open(image_path).resize((512, 512))
|
255 |
+
|
256 |
+
|
257 |
@spaces.GPU
|
258 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
259 |
return {
|
|
|
344 |
ss_sampling_steps: int,
|
345 |
slat_guidance_strength: float,
|
346 |
slat_sampling_steps: int,
|
347 |
+
raw_image_cache: Image.Image,
|
348 |
sam_image: Image.Image = None,
|
349 |
is_sam_image: bool = False,
|
350 |
req: gr.Request = None,
|
|
|
353 |
seg_image = filter_image_small_connected_components(sam_image)
|
354 |
seg_image = Image.fromarray(seg_image, mode="RGBA")
|
355 |
seg_image = trellis_preprocess(seg_image)
|
|
|
356 |
else:
|
357 |
seg_image = image
|
358 |
|
|
|
360 |
seg_image = Image.fromarray(seg_image)
|
361 |
|
362 |
seg_image.save(f"{TMP_DIR}/{req.session_hash}/seg_image.png")
|
363 |
+
raw_image_cache.save(f"{TMP_DIR}/{req.session_hash}/raw_image.png")
|
364 |
PIPELINE.cuda()
|
365 |
outputs = PIPELINE.run(
|
366 |
seg_image,
|
|
|
642 |
if postprocess:
|
643 |
for idx in range(len(images)):
|
644 |
image = images[idx]
|
645 |
+
images[idx] = preprocess_image_fn(image, req)
|
646 |
|
647 |
save_paths = []
|
648 |
for idx, image in enumerate(images):
|
requirements.txt
CHANGED
@@ -19,7 +19,6 @@ igraph==0.11.8
|
|
19 |
pyvista==0.36.1
|
20 |
openai==1.58.1
|
21 |
transformers==4.42.4
|
22 |
-
# gradio_litmodel3d==0.0.1
|
23 |
gradio==5.12.0
|
24 |
sentencepiece==0.2.0
|
25 |
diffusers==0.31.0
|
|
|
19 |
pyvista==0.36.1
|
20 |
openai==1.58.1
|
21 |
transformers==4.42.4
|
|
|
22 |
gradio==5.12.0
|
23 |
sentencepiece==0.2.0
|
24 |
diffusers==0.31.0
|