Upload 186 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +64 -0
- TRELLIS/.gitignore +398 -0
- TRELLIS/.gitmodules +3 -0
- TRELLIS/CODE_OF_CONDUCT.md +9 -0
- TRELLIS/DATASET.md +231 -0
- TRELLIS/DORA.png +0 -0
- TRELLIS/LICENSE +21 -0
- TRELLIS/README.md +230 -0
- TRELLIS/SECURITY.md +41 -0
- TRELLIS/SUPPORT.md +25 -0
- TRELLIS/app.py +403 -0
- TRELLIS/assets/example_image/T.png +3 -0
- TRELLIS/assets/example_image/typical_building_building.png +3 -0
- TRELLIS/assets/example_image/typical_building_castle.png +3 -0
- TRELLIS/assets/example_image/typical_building_colorful_cottage.png +3 -0
- TRELLIS/assets/example_image/typical_building_maya_pyramid.png +3 -0
- TRELLIS/assets/example_image/typical_building_mushroom.png +3 -0
- TRELLIS/assets/example_image/typical_building_space_station.png +3 -0
- TRELLIS/assets/example_image/typical_creature_dragon.png +3 -0
- TRELLIS/assets/example_image/typical_creature_elephant.png +3 -0
- TRELLIS/assets/example_image/typical_creature_furry.png +3 -0
- TRELLIS/assets/example_image/typical_creature_quadruped.png +3 -0
- TRELLIS/assets/example_image/typical_creature_robot_crab.png +3 -0
- TRELLIS/assets/example_image/typical_creature_robot_dinosour.png +3 -0
- TRELLIS/assets/example_image/typical_creature_rock_monster.png +3 -0
- TRELLIS/assets/example_image/typical_humanoid_block_robot.png +3 -0
- TRELLIS/assets/example_image/typical_humanoid_dragonborn.png +3 -0
- TRELLIS/assets/example_image/typical_humanoid_dwarf.png +3 -0
- TRELLIS/assets/example_image/typical_humanoid_goblin.png +3 -0
- TRELLIS/assets/example_image/typical_humanoid_mech.png +3 -0
- TRELLIS/assets/example_image/typical_misc_crate.png +3 -0
- TRELLIS/assets/example_image/typical_misc_fireplace.png +3 -0
- TRELLIS/assets/example_image/typical_misc_gate.png +3 -0
- TRELLIS/assets/example_image/typical_misc_lantern.png +3 -0
- TRELLIS/assets/example_image/typical_misc_magicbook.png +3 -0
- TRELLIS/assets/example_image/typical_misc_mailbox.png +3 -0
- TRELLIS/assets/example_image/typical_misc_monster_chest.png +3 -0
- TRELLIS/assets/example_image/typical_misc_paper_machine.png +3 -0
- TRELLIS/assets/example_image/typical_misc_phonograph.png +3 -0
- TRELLIS/assets/example_image/typical_misc_portal2.png +3 -0
- TRELLIS/assets/example_image/typical_misc_storage_chest.png +3 -0
- TRELLIS/assets/example_image/typical_misc_telephone.png +3 -0
- TRELLIS/assets/example_image/typical_misc_television.png +3 -0
- TRELLIS/assets/example_image/typical_misc_workbench.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_biplane.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_bulldozer.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_cart.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_excavator.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_helicopter.png +3 -0
- TRELLIS/assets/example_image/typical_vehicle_locomotive.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
TRELLIS/assets/example_image/T.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
TRELLIS/assets/example_image/typical_building_building.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
TRELLIS/assets/example_image/typical_building_castle.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
TRELLIS/assets/example_image/typical_building_colorful_cottage.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
TRELLIS/assets/example_image/typical_building_maya_pyramid.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
TRELLIS/assets/example_image/typical_building_mushroom.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
TRELLIS/assets/example_image/typical_building_space_station.png filter=lfs diff=lfs merge=lfs -text
|
43 |
+
TRELLIS/assets/example_image/typical_creature_dragon.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
TRELLIS/assets/example_image/typical_creature_elephant.png filter=lfs diff=lfs merge=lfs -text
|
45 |
+
TRELLIS/assets/example_image/typical_creature_furry.png filter=lfs diff=lfs merge=lfs -text
|
46 |
+
TRELLIS/assets/example_image/typical_creature_quadruped.png filter=lfs diff=lfs merge=lfs -text
|
47 |
+
TRELLIS/assets/example_image/typical_creature_robot_crab.png filter=lfs diff=lfs merge=lfs -text
|
48 |
+
TRELLIS/assets/example_image/typical_creature_robot_dinosour.png filter=lfs diff=lfs merge=lfs -text
|
49 |
+
TRELLIS/assets/example_image/typical_creature_rock_monster.png filter=lfs diff=lfs merge=lfs -text
|
50 |
+
TRELLIS/assets/example_image/typical_humanoid_block_robot.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
TRELLIS/assets/example_image/typical_humanoid_dragonborn.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
TRELLIS/assets/example_image/typical_humanoid_dwarf.png filter=lfs diff=lfs merge=lfs -text
|
53 |
+
TRELLIS/assets/example_image/typical_humanoid_goblin.png filter=lfs diff=lfs merge=lfs -text
|
54 |
+
TRELLIS/assets/example_image/typical_humanoid_mech.png filter=lfs diff=lfs merge=lfs -text
|
55 |
+
TRELLIS/assets/example_image/typical_misc_crate.png filter=lfs diff=lfs merge=lfs -text
|
56 |
+
TRELLIS/assets/example_image/typical_misc_fireplace.png filter=lfs diff=lfs merge=lfs -text
|
57 |
+
TRELLIS/assets/example_image/typical_misc_gate.png filter=lfs diff=lfs merge=lfs -text
|
58 |
+
TRELLIS/assets/example_image/typical_misc_lantern.png filter=lfs diff=lfs merge=lfs -text
|
59 |
+
TRELLIS/assets/example_image/typical_misc_magicbook.png filter=lfs diff=lfs merge=lfs -text
|
60 |
+
TRELLIS/assets/example_image/typical_misc_mailbox.png filter=lfs diff=lfs merge=lfs -text
|
61 |
+
TRELLIS/assets/example_image/typical_misc_monster_chest.png filter=lfs diff=lfs merge=lfs -text
|
62 |
+
TRELLIS/assets/example_image/typical_misc_paper_machine.png filter=lfs diff=lfs merge=lfs -text
|
63 |
+
TRELLIS/assets/example_image/typical_misc_phonograph.png filter=lfs diff=lfs merge=lfs -text
|
64 |
+
TRELLIS/assets/example_image/typical_misc_portal2.png filter=lfs diff=lfs merge=lfs -text
|
65 |
+
TRELLIS/assets/example_image/typical_misc_storage_chest.png filter=lfs diff=lfs merge=lfs -text
|
66 |
+
TRELLIS/assets/example_image/typical_misc_telephone.png filter=lfs diff=lfs merge=lfs -text
|
67 |
+
TRELLIS/assets/example_image/typical_misc_television.png filter=lfs diff=lfs merge=lfs -text
|
68 |
+
TRELLIS/assets/example_image/typical_misc_workbench.png filter=lfs diff=lfs merge=lfs -text
|
69 |
+
TRELLIS/assets/example_image/typical_vehicle_biplane.png filter=lfs diff=lfs merge=lfs -text
|
70 |
+
TRELLIS/assets/example_image/typical_vehicle_bulldozer.png filter=lfs diff=lfs merge=lfs -text
|
71 |
+
TRELLIS/assets/example_image/typical_vehicle_cart.png filter=lfs diff=lfs merge=lfs -text
|
72 |
+
TRELLIS/assets/example_image/typical_vehicle_excavator.png filter=lfs diff=lfs merge=lfs -text
|
73 |
+
TRELLIS/assets/example_image/typical_vehicle_helicopter.png filter=lfs diff=lfs merge=lfs -text
|
74 |
+
TRELLIS/assets/example_image/typical_vehicle_locomotive.png filter=lfs diff=lfs merge=lfs -text
|
75 |
+
TRELLIS/assets/example_image/typical_vehicle_pirate_ship.png filter=lfs diff=lfs merge=lfs -text
|
76 |
+
TRELLIS/assets/example_image/weatherworn_misc_paper_machine3.png filter=lfs diff=lfs merge=lfs -text
|
77 |
+
TRELLIS/assets/example_multi_image/character_1.png filter=lfs diff=lfs merge=lfs -text
|
78 |
+
TRELLIS/assets/example_multi_image/character_2.png filter=lfs diff=lfs merge=lfs -text
|
79 |
+
TRELLIS/assets/example_multi_image/character_3.png filter=lfs diff=lfs merge=lfs -text
|
80 |
+
TRELLIS/assets/example_multi_image/mushroom_1.png filter=lfs diff=lfs merge=lfs -text
|
81 |
+
TRELLIS/assets/example_multi_image/mushroom_2.png filter=lfs diff=lfs merge=lfs -text
|
82 |
+
TRELLIS/assets/example_multi_image/mushroom_3.png filter=lfs diff=lfs merge=lfs -text
|
83 |
+
TRELLIS/assets/example_multi_image/orangeguy_1.png filter=lfs diff=lfs merge=lfs -text
|
84 |
+
TRELLIS/assets/example_multi_image/orangeguy_2.png filter=lfs diff=lfs merge=lfs -text
|
85 |
+
TRELLIS/assets/example_multi_image/orangeguy_3.png filter=lfs diff=lfs merge=lfs -text
|
86 |
+
TRELLIS/assets/example_multi_image/popmart_1.png filter=lfs diff=lfs merge=lfs -text
|
87 |
+
TRELLIS/assets/example_multi_image/popmart_2.png filter=lfs diff=lfs merge=lfs -text
|
88 |
+
TRELLIS/assets/example_multi_image/popmart_3.png filter=lfs diff=lfs merge=lfs -text
|
89 |
+
TRELLIS/assets/example_multi_image/rabbit_1.png filter=lfs diff=lfs merge=lfs -text
|
90 |
+
TRELLIS/assets/example_multi_image/rabbit_2.png filter=lfs diff=lfs merge=lfs -text
|
91 |
+
TRELLIS/assets/example_multi_image/rabbit_3.png filter=lfs diff=lfs merge=lfs -text
|
92 |
+
TRELLIS/assets/example_multi_image/tiger_1.png filter=lfs diff=lfs merge=lfs -text
|
93 |
+
TRELLIS/assets/example_multi_image/tiger_2.png filter=lfs diff=lfs merge=lfs -text
|
94 |
+
TRELLIS/assets/example_multi_image/tiger_3.png filter=lfs diff=lfs merge=lfs -text
|
95 |
+
TRELLIS/assets/example_multi_image/yoimiya_1.png filter=lfs diff=lfs merge=lfs -text
|
96 |
+
TRELLIS/assets/example_multi_image/yoimiya_2.png filter=lfs diff=lfs merge=lfs -text
|
97 |
+
TRELLIS/assets/example_multi_image/yoimiya_3.png filter=lfs diff=lfs merge=lfs -text
|
98 |
+
TRELLIS/assets/logo.webp filter=lfs diff=lfs merge=lfs -text
|
99 |
+
TRELLIS/assets/teaser.png filter=lfs diff=lfs merge=lfs -text
|
TRELLIS/.gitignore
ADDED
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Ignore Visual Studio temporary files, build results, and
|
2 |
+
## files generated by popular Visual Studio add-ons.
|
3 |
+
##
|
4 |
+
## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore
|
5 |
+
|
6 |
+
# User-specific files
|
7 |
+
*.rsuser
|
8 |
+
*.suo
|
9 |
+
*.user
|
10 |
+
*.userosscache
|
11 |
+
*.sln.docstates
|
12 |
+
|
13 |
+
# User-specific files (MonoDevelop/Xamarin Studio)
|
14 |
+
*.userprefs
|
15 |
+
|
16 |
+
# Mono auto generated files
|
17 |
+
mono_crash.*
|
18 |
+
|
19 |
+
# Build results
|
20 |
+
[Dd]ebug/
|
21 |
+
[Dd]ebugPublic/
|
22 |
+
[Rr]elease/
|
23 |
+
[Rr]eleases/
|
24 |
+
x64/
|
25 |
+
x86/
|
26 |
+
[Ww][Ii][Nn]32/
|
27 |
+
[Aa][Rr][Mm]/
|
28 |
+
[Aa][Rr][Mm]64/
|
29 |
+
bld/
|
30 |
+
[Bb]in/
|
31 |
+
[Oo]bj/
|
32 |
+
[Ll]og/
|
33 |
+
[Ll]ogs/
|
34 |
+
|
35 |
+
# Visual Studio 2015/2017 cache/options directory
|
36 |
+
.vs/
|
37 |
+
# Uncomment if you have tasks that create the project's static files in wwwroot
|
38 |
+
#wwwroot/
|
39 |
+
|
40 |
+
# Visual Studio 2017 auto generated files
|
41 |
+
Generated\ Files/
|
42 |
+
|
43 |
+
# MSTest test Results
|
44 |
+
[Tt]est[Rr]esult*/
|
45 |
+
[Bb]uild[Ll]og.*
|
46 |
+
|
47 |
+
# NUnit
|
48 |
+
*.VisualState.xml
|
49 |
+
TestResult.xml
|
50 |
+
nunit-*.xml
|
51 |
+
|
52 |
+
# Build Results of an ATL Project
|
53 |
+
[Dd]ebugPS/
|
54 |
+
[Rr]eleasePS/
|
55 |
+
dlldata.c
|
56 |
+
|
57 |
+
# Benchmark Results
|
58 |
+
BenchmarkDotNet.Artifacts/
|
59 |
+
|
60 |
+
# .NET Core
|
61 |
+
project.lock.json
|
62 |
+
project.fragment.lock.json
|
63 |
+
artifacts/
|
64 |
+
|
65 |
+
# ASP.NET Scaffolding
|
66 |
+
ScaffoldingReadMe.txt
|
67 |
+
|
68 |
+
# StyleCop
|
69 |
+
StyleCopReport.xml
|
70 |
+
|
71 |
+
# Files built by Visual Studio
|
72 |
+
*_i.c
|
73 |
+
*_p.c
|
74 |
+
*_h.h
|
75 |
+
*.ilk
|
76 |
+
*.meta
|
77 |
+
*.obj
|
78 |
+
*.iobj
|
79 |
+
*.pch
|
80 |
+
*.pdb
|
81 |
+
*.ipdb
|
82 |
+
*.pgc
|
83 |
+
*.pgd
|
84 |
+
*.rsp
|
85 |
+
*.sbr
|
86 |
+
*.tlb
|
87 |
+
*.tli
|
88 |
+
*.tlh
|
89 |
+
*.tmp
|
90 |
+
*.tmp_proj
|
91 |
+
*_wpftmp.csproj
|
92 |
+
*.log
|
93 |
+
*.tlog
|
94 |
+
*.vspscc
|
95 |
+
*.vssscc
|
96 |
+
.builds
|
97 |
+
*.pidb
|
98 |
+
*.svclog
|
99 |
+
*.scc
|
100 |
+
|
101 |
+
# Chutzpah Test files
|
102 |
+
_Chutzpah*
|
103 |
+
|
104 |
+
# Visual C++ cache files
|
105 |
+
ipch/
|
106 |
+
*.aps
|
107 |
+
*.ncb
|
108 |
+
*.opendb
|
109 |
+
*.opensdf
|
110 |
+
*.sdf
|
111 |
+
*.cachefile
|
112 |
+
*.VC.db
|
113 |
+
*.VC.VC.opendb
|
114 |
+
|
115 |
+
# Visual Studio profiler
|
116 |
+
*.psess
|
117 |
+
*.vsp
|
118 |
+
*.vspx
|
119 |
+
*.sap
|
120 |
+
|
121 |
+
# Visual Studio Trace Files
|
122 |
+
*.e2e
|
123 |
+
|
124 |
+
# TFS 2012 Local Workspace
|
125 |
+
$tf/
|
126 |
+
|
127 |
+
# Guidance Automation Toolkit
|
128 |
+
*.gpState
|
129 |
+
|
130 |
+
# ReSharper is a .NET coding add-in
|
131 |
+
_ReSharper*/
|
132 |
+
*.[Rr]e[Ss]harper
|
133 |
+
*.DotSettings.user
|
134 |
+
|
135 |
+
# TeamCity is a build add-in
|
136 |
+
_TeamCity*
|
137 |
+
|
138 |
+
# DotCover is a Code Coverage Tool
|
139 |
+
*.dotCover
|
140 |
+
|
141 |
+
# AxoCover is a Code Coverage Tool
|
142 |
+
.axoCover/*
|
143 |
+
!.axoCover/settings.json
|
144 |
+
|
145 |
+
# Coverlet is a free, cross platform Code Coverage Tool
|
146 |
+
coverage*.json
|
147 |
+
coverage*.xml
|
148 |
+
coverage*.info
|
149 |
+
|
150 |
+
# Visual Studio code coverage results
|
151 |
+
*.coverage
|
152 |
+
*.coveragexml
|
153 |
+
|
154 |
+
# NCrunch
|
155 |
+
_NCrunch_*
|
156 |
+
.*crunch*.local.xml
|
157 |
+
nCrunchTemp_*
|
158 |
+
|
159 |
+
# MightyMoose
|
160 |
+
*.mm.*
|
161 |
+
AutoTest.Net/
|
162 |
+
|
163 |
+
# Web workbench (sass)
|
164 |
+
.sass-cache/
|
165 |
+
|
166 |
+
# Installshield output folder
|
167 |
+
[Ee]xpress/
|
168 |
+
|
169 |
+
# DocProject is a documentation generator add-in
|
170 |
+
DocProject/buildhelp/
|
171 |
+
DocProject/Help/*.HxT
|
172 |
+
DocProject/Help/*.HxC
|
173 |
+
DocProject/Help/*.hhc
|
174 |
+
DocProject/Help/*.hhk
|
175 |
+
DocProject/Help/*.hhp
|
176 |
+
DocProject/Help/Html2
|
177 |
+
DocProject/Help/html
|
178 |
+
|
179 |
+
# Click-Once directory
|
180 |
+
publish/
|
181 |
+
|
182 |
+
# Publish Web Output
|
183 |
+
*.[Pp]ublish.xml
|
184 |
+
*.azurePubxml
|
185 |
+
# Note: Comment the next line if you want to checkin your web deploy settings,
|
186 |
+
# but database connection strings (with potential passwords) will be unencrypted
|
187 |
+
*.pubxml
|
188 |
+
*.publishproj
|
189 |
+
|
190 |
+
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
191 |
+
# checkin your Azure Web App publish settings, but sensitive information contained
|
192 |
+
# in these scripts will be unencrypted
|
193 |
+
PublishScripts/
|
194 |
+
|
195 |
+
# NuGet Packages
|
196 |
+
*.nupkg
|
197 |
+
# NuGet Symbol Packages
|
198 |
+
*.snupkg
|
199 |
+
# The packages folder can be ignored because of Package Restore
|
200 |
+
**/[Pp]ackages/*
|
201 |
+
# except build/, which is used as an MSBuild target.
|
202 |
+
!**/[Pp]ackages/build/
|
203 |
+
# Uncomment if necessary however generally it will be regenerated when needed
|
204 |
+
#!**/[Pp]ackages/repositories.config
|
205 |
+
# NuGet v3's project.json files produces more ignorable files
|
206 |
+
*.nuget.props
|
207 |
+
*.nuget.targets
|
208 |
+
|
209 |
+
# Microsoft Azure Build Output
|
210 |
+
csx/
|
211 |
+
*.build.csdef
|
212 |
+
|
213 |
+
# Microsoft Azure Emulator
|
214 |
+
ecf/
|
215 |
+
rcf/
|
216 |
+
|
217 |
+
# Windows Store app package directories and files
|
218 |
+
AppPackages/
|
219 |
+
BundleArtifacts/
|
220 |
+
Package.StoreAssociation.xml
|
221 |
+
_pkginfo.txt
|
222 |
+
*.appx
|
223 |
+
*.appxbundle
|
224 |
+
*.appxupload
|
225 |
+
|
226 |
+
# Visual Studio cache files
|
227 |
+
# files ending in .cache can be ignored
|
228 |
+
*.[Cc]ache
|
229 |
+
# but keep track of directories ending in .cache
|
230 |
+
!?*.[Cc]ache/
|
231 |
+
|
232 |
+
# Others
|
233 |
+
ClientBin/
|
234 |
+
~$*
|
235 |
+
*~
|
236 |
+
*.dbmdl
|
237 |
+
*.dbproj.schemaview
|
238 |
+
*.jfm
|
239 |
+
*.pfx
|
240 |
+
*.publishsettings
|
241 |
+
orleans.codegen.cs
|
242 |
+
|
243 |
+
# Including strong name files can present a security risk
|
244 |
+
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
|
245 |
+
#*.snk
|
246 |
+
|
247 |
+
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
248 |
+
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
249 |
+
#bower_components/
|
250 |
+
|
251 |
+
# RIA/Silverlight projects
|
252 |
+
Generated_Code/
|
253 |
+
|
254 |
+
# Backup & report files from converting an old project file
|
255 |
+
# to a newer Visual Studio version. Backup files are not needed,
|
256 |
+
# because we have git ;-)
|
257 |
+
_UpgradeReport_Files/
|
258 |
+
Backup*/
|
259 |
+
UpgradeLog*.XML
|
260 |
+
UpgradeLog*.htm
|
261 |
+
ServiceFabricBackup/
|
262 |
+
*.rptproj.bak
|
263 |
+
|
264 |
+
# SQL Server files
|
265 |
+
*.mdf
|
266 |
+
*.ldf
|
267 |
+
*.ndf
|
268 |
+
|
269 |
+
# Business Intelligence projects
|
270 |
+
*.rdl.data
|
271 |
+
*.bim.layout
|
272 |
+
*.bim_*.settings
|
273 |
+
*.rptproj.rsuser
|
274 |
+
*- [Bb]ackup.rdl
|
275 |
+
*- [Bb]ackup ([0-9]).rdl
|
276 |
+
*- [Bb]ackup ([0-9][0-9]).rdl
|
277 |
+
|
278 |
+
# Microsoft Fakes
|
279 |
+
FakesAssemblies/
|
280 |
+
|
281 |
+
# GhostDoc plugin setting file
|
282 |
+
*.GhostDoc.xml
|
283 |
+
|
284 |
+
# Node.js Tools for Visual Studio
|
285 |
+
.ntvs_analysis.dat
|
286 |
+
node_modules/
|
287 |
+
|
288 |
+
# Visual Studio 6 build log
|
289 |
+
*.plg
|
290 |
+
|
291 |
+
# Visual Studio 6 workspace options file
|
292 |
+
*.opt
|
293 |
+
|
294 |
+
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
295 |
+
*.vbw
|
296 |
+
|
297 |
+
# Visual Studio 6 auto-generated project file (contains which files were open etc.)
|
298 |
+
*.vbp
|
299 |
+
|
300 |
+
# Visual Studio 6 workspace and project file (working project files containing files to include in project)
|
301 |
+
*.dsw
|
302 |
+
*.dsp
|
303 |
+
|
304 |
+
# Visual Studio 6 technical files
|
305 |
+
*.ncb
|
306 |
+
*.aps
|
307 |
+
|
308 |
+
# Visual Studio LightSwitch build output
|
309 |
+
**/*.HTMLClient/GeneratedArtifacts
|
310 |
+
**/*.DesktopClient/GeneratedArtifacts
|
311 |
+
**/*.DesktopClient/ModelManifest.xml
|
312 |
+
**/*.Server/GeneratedArtifacts
|
313 |
+
**/*.Server/ModelManifest.xml
|
314 |
+
_Pvt_Extensions
|
315 |
+
|
316 |
+
# Paket dependency manager
|
317 |
+
.paket/paket.exe
|
318 |
+
paket-files/
|
319 |
+
|
320 |
+
# FAKE - F# Make
|
321 |
+
.fake/
|
322 |
+
|
323 |
+
# CodeRush personal settings
|
324 |
+
.cr/personal
|
325 |
+
|
326 |
+
# Python Tools for Visual Studio (PTVS)
|
327 |
+
__pycache__/
|
328 |
+
*.pyc
|
329 |
+
|
330 |
+
# Cake - Uncomment if you are using it
|
331 |
+
# tools/**
|
332 |
+
# !tools/packages.config
|
333 |
+
|
334 |
+
# Tabs Studio
|
335 |
+
*.tss
|
336 |
+
|
337 |
+
# Telerik's JustMock configuration file
|
338 |
+
*.jmconfig
|
339 |
+
|
340 |
+
# BizTalk build output
|
341 |
+
*.btp.cs
|
342 |
+
*.btm.cs
|
343 |
+
*.odx.cs
|
344 |
+
*.xsd.cs
|
345 |
+
|
346 |
+
# OpenCover UI analysis results
|
347 |
+
OpenCover/
|
348 |
+
|
349 |
+
# Azure Stream Analytics local run output
|
350 |
+
ASALocalRun/
|
351 |
+
|
352 |
+
# MSBuild Binary and Structured Log
|
353 |
+
*.binlog
|
354 |
+
|
355 |
+
# NVidia Nsight GPU debugger configuration file
|
356 |
+
*.nvuser
|
357 |
+
|
358 |
+
# MFractors (Xamarin productivity tool) working folder
|
359 |
+
.mfractor/
|
360 |
+
|
361 |
+
# Local History for Visual Studio
|
362 |
+
.localhistory/
|
363 |
+
|
364 |
+
# Visual Studio History (VSHistory) files
|
365 |
+
.vshistory/
|
366 |
+
|
367 |
+
# BeatPulse healthcheck temp database
|
368 |
+
healthchecksdb
|
369 |
+
|
370 |
+
# Backup folder for Package Reference Convert tool in Visual Studio 2017
|
371 |
+
MigrationBackup/
|
372 |
+
|
373 |
+
# Ionide (cross platform F# VS Code tools) working folder
|
374 |
+
.ionide/
|
375 |
+
|
376 |
+
# Fody - auto-generated XML schema
|
377 |
+
FodyWeavers.xsd
|
378 |
+
|
379 |
+
# VS Code files for those working on multiple tools
|
380 |
+
.vscode/*
|
381 |
+
!.vscode/settings.json
|
382 |
+
!.vscode/tasks.json
|
383 |
+
!.vscode/launch.json
|
384 |
+
!.vscode/extensions.json
|
385 |
+
*.code-workspace
|
386 |
+
|
387 |
+
# Local History for Visual Studio Code
|
388 |
+
.history/
|
389 |
+
|
390 |
+
# Windows Installer files from build outputs
|
391 |
+
*.cab
|
392 |
+
*.msi
|
393 |
+
*.msix
|
394 |
+
*.msm
|
395 |
+
*.msp
|
396 |
+
|
397 |
+
# JetBrains Rider
|
398 |
+
*.sln.iml
|
TRELLIS/.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "trellis/representations/mesh/flexicubes"]
|
2 |
+
path = trellis/representations/mesh/flexicubes
|
3 |
+
url = https://github.com/MaxtirError/FlexiCubes.git
|
TRELLIS/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Microsoft Open Source Code of Conduct
|
2 |
+
|
3 |
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
4 |
+
|
5 |
+
Resources:
|
6 |
+
|
7 |
+
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
8 |
+
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
9 |
+
- Contact [[email protected]](mailto:[email protected]) with questions or concerns
|
TRELLIS/DATASET.md
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TRELLIS-500K
|
2 |
+
|
3 |
+
TRELLIS-500K is a dataset of 500K 3D assets curated from [Objaverse(XL)](https://objaverse.allenai.org/), [ABO](https://amazon-berkeley-objects.s3.amazonaws.com/index.html), [3D-FUTURE](https://tianchi.aliyun.com/specials/promotion/alibaba-3d-future), [HSSD](https://huggingface.co/datasets/hssd/hssd-models), and [Toys4k](https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k), filtered based on aesthetic scores.
|
4 |
+
This dataset serves for 3D generation tasks.
|
5 |
+
|
6 |
+
The dataset is provided as csv files containing the 3D assets' metadata.
|
7 |
+
|
8 |
+
## Dataset Statistics
|
9 |
+
|
10 |
+
The following table summarizes the dataset's filtering and composition:
|
11 |
+
|
12 |
+
***NOTE: Some of the 3D assets lack text captions. Please filter out such assets if captions are required.***
|
13 |
+
| Source | Aesthetic Score Threshold | Filtered Size | With Captions |
|
14 |
+
|:-:|:-:|:-:|:-:|
|
15 |
+
| ObjaverseXL (sketchfab) | 5.5 | 168307 | 167638 |
|
16 |
+
| ObjaverseXL (github) | 5.5 | 311843 | 306790 |
|
17 |
+
| ABO | 4.5 | 4485 | 4390 |
|
18 |
+
| 3D-FUTURE | 4.5 | 9472 | 9291 |
|
19 |
+
| HSSD | 4.5 | 6670 | 6661 |
|
20 |
+
| All (training set) | - | 500777 | 494770 |
|
21 |
+
| Toys4k (evaluation set) | 4.5 | 3229 | 3180 |
|
22 |
+
|
23 |
+
## Dataset Location
|
24 |
+
|
25 |
+
The dataset is hosted on Hugging Face Datasets. You can preview the dataset at
|
26 |
+
|
27 |
+
[https://huggingface.co/datasets/JeffreyXiang/TRELLIS-500K](https://huggingface.co/datasets/JeffreyXiang/TRELLIS-500K)
|
28 |
+
|
29 |
+
There is no need to download the csv files manually. We provide toolkits to load and prepare the dataset.
|
30 |
+
|
31 |
+
## Dataset Toolkits
|
32 |
+
|
33 |
+
We provide [toolkits](dataset_toolkits) for data preparation.
|
34 |
+
|
35 |
+
### Step 1: Install Dependencies
|
36 |
+
|
37 |
+
```
|
38 |
+
. ./dataset_toolkits/setup.sh
|
39 |
+
```
|
40 |
+
|
41 |
+
### Step 2: Load Metadata
|
42 |
+
|
43 |
+
First, we need to load the metadata of the dataset.
|
44 |
+
|
45 |
+
```
|
46 |
+
python dataset_toolkits/build_metadata.py <SUBSET> --output_dir <OUTPUT_DIR> [--source <SOURCE>]
|
47 |
+
```
|
48 |
+
|
49 |
+
- `SUBSET`: The subset of the dataset to load. Options are `ObjaverseXL`, `ABO`, `3D-FUTURE`, `HSSD`, and `Toys4k`.
|
50 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
51 |
+
- `SOURCE`: Required if `SUBSET` is `ObjaverseXL`. Options are `sketchfab` and `github`.
|
52 |
+
|
53 |
+
For example, to load the metadata of the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
54 |
+
|
55 |
+
```
|
56 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --source sketchfab --output_dir datasets/ObjaverseXL_sketchfab
|
57 |
+
```
|
58 |
+
|
59 |
+
### Step 3: Download Data
|
60 |
+
|
61 |
+
Next, we need to download the 3D assets.
|
62 |
+
|
63 |
+
```
|
64 |
+
python dataset_toolkits/download.py <SUBSET> --output_dir <OUTPUT_DIR> [--rank <RANK> --world_size <WORLD_SIZE>]
|
65 |
+
```
|
66 |
+
|
67 |
+
- `SUBSET`: The subset of the dataset to download. Options are `ObjaverseXL`, `ABO`, `3D-FUTURE`, `HSSD`, and `Toys4k`.
|
68 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
69 |
+
|
70 |
+
You can also specify the `RANK` and `WORLD_SIZE` of the current process if you are using multiple nodes for data preparation.
|
71 |
+
|
72 |
+
For example, to download the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
73 |
+
|
74 |
+
***NOTE: The example command below sets a large `WORLD_SIZE` for demonstration purposes. Only a small portion of the dataset will be downloaded.***
|
75 |
+
|
76 |
+
```
|
77 |
+
python dataset_toolkits/download.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab --world_size 160000
|
78 |
+
```
|
79 |
+
|
80 |
+
Some datasets may require interactive login to Hugging Face or manual downloading. Please follow the instructions given by the toolkits.
|
81 |
+
|
82 |
+
After downloading, update the metadata file with:
|
83 |
+
|
84 |
+
```
|
85 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
86 |
+
```
|
87 |
+
|
88 |
+
### Step 4: Render Multiview Images
|
89 |
+
|
90 |
+
Multiview images can be rendered with:
|
91 |
+
|
92 |
+
```
|
93 |
+
python dataset_toolkits/render.py <SUBSET> --output_dir <OUTPUT_DIR> [--num_views <NUM_VIEWS>] [--rank <RANK> --world_size <WORLD_SIZE>]
|
94 |
+
```
|
95 |
+
|
96 |
+
- `SUBSET`: The subset of the dataset to render. Options are `ObjaverseXL`, `ABO`, `3D-FUTURE`, `HSSD`, and `Toys4k`.
|
97 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
98 |
+
- `NUM_VIEWS`: The number of views to render. Default is 150.
|
99 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
100 |
+
|
101 |
+
For example, to render the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
102 |
+
|
103 |
+
```
|
104 |
+
python dataset_toolkits/render.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
105 |
+
```
|
106 |
+
|
107 |
+
Don't forget to update the metadata file with:
|
108 |
+
|
109 |
+
```
|
110 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
111 |
+
```
|
112 |
+
|
113 |
+
### Step 5: Voxelize 3D Models
|
114 |
+
|
115 |
+
We can voxelize the 3D models with:
|
116 |
+
|
117 |
+
```
|
118 |
+
python dataset_toolkits/voxelize.py <SUBSET> --output_dir <OUTPUT_DIR> [--rank <RANK> --world_size <WORLD_SIZE>]
|
119 |
+
```
|
120 |
+
|
121 |
+
- `SUBSET`: The subset of the dataset to voxelize. Options are `ObjaverseXL`, `ABO`, `3D-FUTURE`, `HSSD`, and `Toys4k`.
|
122 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
123 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
124 |
+
|
125 |
+
For example, to voxelize the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
126 |
+
```
|
127 |
+
python dataset_toolkits/voxelize.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
128 |
+
```
|
129 |
+
|
130 |
+
Then update the metadata file with:
|
131 |
+
|
132 |
+
```
|
133 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
134 |
+
```
|
135 |
+
|
136 |
+
### Step 6: Extract DINO Features
|
137 |
+
|
138 |
+
To prepare the training data for SLat VAE, we need to extract DINO features from multiview images and aggregate them into sparse voxel grids.
|
139 |
+
|
140 |
+
```
|
141 |
+
python dataset_toolkits/extract_features.py --output_dir <OUTPUT_DIR> [--rank <RANK> --world_size <WORLD_SIZE>]
|
142 |
+
```
|
143 |
+
|
144 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
145 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
146 |
+
|
147 |
+
|
148 |
+
For example, to extract DINO features from the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
149 |
+
|
150 |
+
```
|
151 |
+
python dataset_toolkits/extract_feature.py --output_dir datasets/ObjaverseXL_sketchfab
|
152 |
+
```
|
153 |
+
|
154 |
+
Then update the metadata file with:
|
155 |
+
|
156 |
+
```
|
157 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
158 |
+
```
|
159 |
+
|
160 |
+
### Step 7: Encode Sparse Structures
|
161 |
+
|
162 |
+
Encoding the sparse structures into latents to train the first stage generator:
|
163 |
+
|
164 |
+
```
|
165 |
+
python dataset_toolkits/encode_ss_latent.py --output_dir <OUTPUT_DIR> [--rank <RANK> --world_size <WORLD_SIZE>]
|
166 |
+
```
|
167 |
+
|
168 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
169 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
170 |
+
|
171 |
+
For example, to encode the sparse structures into latents for the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
172 |
+
|
173 |
+
```
|
174 |
+
python dataset_toolkits/encode_ss_latent.py --output_dir datasets/ObjaverseXL_sketchfab
|
175 |
+
```
|
176 |
+
|
177 |
+
Then update the metadata file with:
|
178 |
+
|
179 |
+
```
|
180 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
181 |
+
```
|
182 |
+
|
183 |
+
### Step 8: Encode SLat
|
184 |
+
|
185 |
+
Encoding SLat for second stage generator training:
|
186 |
+
|
187 |
+
```
|
188 |
+
python dataset_toolkits/encode_latent.py --output_dir <OUTPUT_DIR> [--rank <RANK> --world_size <WORLD_SIZE>]
|
189 |
+
```
|
190 |
+
|
191 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
192 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
193 |
+
|
194 |
+
For example, to encode SLat for the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
195 |
+
|
196 |
+
```
|
197 |
+
python dataset_toolkits/encode_latent.py --output_dir datasets/ObjaverseXL_sketchfab
|
198 |
+
```
|
199 |
+
|
200 |
+
Then update the metadata file with:
|
201 |
+
|
202 |
+
```
|
203 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
204 |
+
```
|
205 |
+
|
206 |
+
### Step 9: Render Image Conditions
|
207 |
+
|
208 |
+
To train the image conditioned generator, we need to render image conditions with augmented views.
|
209 |
+
|
210 |
+
```
|
211 |
+
python dataset_toolkits/render_cond.py <SUBSET> --output_dir <OUTPUT_DIR> [--num_views <NUM_VIEWS>] [--rank <RANK> --world_size <WORLD_SIZE>]
|
212 |
+
```
|
213 |
+
|
214 |
+
- `SUBSET`: The subset of the dataset to render. Options are `ObjaverseXL`, `ABO`, `3D-FUTURE`, `HSSD`, and `Toys4k`.
|
215 |
+
- `OUTPUT_DIR`: The directory to save the data.
|
216 |
+
- `NUM_VIEWS`: The number of views to render. Default is 24.
|
217 |
+
- `RANK` and `WORLD_SIZE`: Multi-node configuration.
|
218 |
+
|
219 |
+
For example, to render image conditions for the ObjaverseXL (sketchfab) subset and save it to `datasets/ObjaverseXL_sketchfab`, we can run:
|
220 |
+
|
221 |
+
```
|
222 |
+
python dataset_toolkits/render_cond.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
223 |
+
```
|
224 |
+
|
225 |
+
Then update the metadata file with:
|
226 |
+
|
227 |
+
```
|
228 |
+
python dataset_toolkits/build_metadata.py ObjaverseXL --output_dir datasets/ObjaverseXL_sketchfab
|
229 |
+
```
|
230 |
+
|
231 |
+
|
TRELLIS/DORA.png
ADDED
![]() |
TRELLIS/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) Microsoft Corporation.
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE
|
TRELLIS/README.md
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<img src="assets/logo.webp" width="100%" align="center">
|
2 |
+
<h1 align="center">Structured 3D Latents<br>for Scalable and Versatile 3D Generation</h1>
|
3 |
+
<p align="center"><a href="https://arxiv.org/abs/2412.01506"><img src='https://img.shields.io/badge/arXiv-Paper-red?logo=arxiv&logoColor=white' alt='arXiv'></a>
|
4 |
+
<a href='https://trellis3d.github.io'><img src='https://img.shields.io/badge/Project_Page-Website-green?logo=googlechrome&logoColor=white' alt='Project Page'></a>
|
5 |
+
<a href='https://huggingface.co/spaces/JeffreyXiang/TRELLIS'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Live_Demo-blue'></a>
|
6 |
+
</p>
|
7 |
+
<p align="center"><img src="assets/teaser.png" width="100%"></p>
|
8 |
+
|
9 |
+
<span style="font-size: 16px; font-weight: 600;">T</span><span style="font-size: 12px; font-weight: 700;">RELLIS</span> is a large 3D asset generation model. It takes in text or image prompts and generates high-quality 3D assets in various formats, such as Radiance Fields, 3D Gaussians, and meshes. The cornerstone of <span style="font-size: 16px; font-weight: 600;">T</span><span style="font-size: 12px; font-weight: 700;">RELLIS</span> is a unified Structured LATent (<span style="font-size: 16px; font-weight: 600;">SL</span><span style="font-size: 12px; font-weight: 700;">AT</span>) representation that allows decoding to different output formats and Rectified Flow Transformers tailored for <span style="font-size: 16px; font-weight: 600;">SL</span><span style="font-size: 12px; font-weight: 700;">AT</span> as the powerful backbones. We provide large-scale pre-trained models with up to 2 billion parameters on a large 3D asset dataset of 500K diverse objects. <span style="font-size: 16px; font-weight: 600;">T</span><span style="font-size: 12px; font-weight: 700;">RELLIS</span> significantly surpasses existing methods, including recent ones at similar scales, and showcases flexible output format selection and local 3D editing capabilities which were not offered by previous models.
|
10 |
+
|
11 |
+
***Check out our [Project Page](https://trellis3d.github.io) for more videos and interactive demos!***
|
12 |
+
|
13 |
+
<!-- Features -->
|
14 |
+
## 🌟 Features
|
15 |
+
- **High Quality**: It produces diverse 3D assets at high quality with intricate shape and texture details.
|
16 |
+
- **Versatility**: It takes text or image prompts and can generate various final 3D representations including but not limited to *Radiance Fields*, *3D Gaussians*, and *meshes*, accommodating diverse downstream requirements.
|
17 |
+
- **Flexible Editing**: It allows for easy editings of generated 3D assets, such as generating variants of the same object or local editing of the 3D asset.
|
18 |
+
|
19 |
+
<!-- Updates -->
|
20 |
+
## ⏩ Updates
|
21 |
+
|
22 |
+
**12/26/2024**
|
23 |
+
- Release [**TRELLIS-500K**](https://github.com/microsoft/TRELLIS#-dataset) dataset and toolkits for data preparation.
|
24 |
+
|
25 |
+
**12/18/2024**
|
26 |
+
- Implementation of multi-image conditioning for TRELLIS-image model. ([#7](https://github.com/microsoft/TRELLIS/issues/7)). This is based on tuning-free algorithm without training a specialized model, so it may not give the best results for all input images.
|
27 |
+
- Add Gaussian export in `app.py` and `example.py`. ([#40](https://github.com/microsoft/TRELLIS/issues/40))
|
28 |
+
|
29 |
+
<!-- TODO List -->
|
30 |
+
## 🚧 TODO List
|
31 |
+
- [x] Release inference code and TRELLIS-image-large model
|
32 |
+
- [x] Release dataset and dataset toolkits
|
33 |
+
- [ ] Release TRELLIS-text model series
|
34 |
+
- [ ] Release training code
|
35 |
+
|
36 |
+
<!-- Installation -->
|
37 |
+
## 📦 Installation
|
38 |
+
|
39 |
+
### Prerequisites
|
40 |
+
- **System**: The code is currently tested only on **Linux**. For windows setup, you may refer to [#3](https://github.com/microsoft/TRELLIS/issues/3) (not fully tested).
|
41 |
+
- **Hardware**: An NVIDIA GPU with at least 16GB of memory is necessary. The code has been verified on NVIDIA A100 and A6000 GPUs.
|
42 |
+
- **Software**:
|
43 |
+
- The [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive) is needed to compile certain submodules. The code has been tested with CUDA versions 11.8 and 12.2.
|
44 |
+
- [Conda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install) is recommended for managing dependencies.
|
45 |
+
- Python version 3.8 or higher is required.
|
46 |
+
|
47 |
+
### Installation Steps
|
48 |
+
1. Clone the repo:
|
49 |
+
```sh
|
50 |
+
git clone --recurse-submodules https://github.com/microsoft/TRELLIS.git
|
51 |
+
cd TRELLIS
|
52 |
+
```
|
53 |
+
|
54 |
+
2. Install the dependencies:
|
55 |
+
|
56 |
+
**Before running the following command there are somethings to note:**
|
57 |
+
- By adding `--new-env`, a new conda environment named `trellis` will be created. If you want to use an existing conda environment, please remove this flag.
|
58 |
+
- By default the `trellis` environment will use pytorch 2.4.0 with CUDA 11.8. If you want to use a different version of CUDA (e.g., if you have CUDA Toolkit 12.2 installed and do not want to install another 11.8 version for submodule compilation), you can remove the `--new-env` flag and manually install the required dependencies. Refer to [PyTorch](https://pytorch.org/get-started/previous-versions/) for the installation command.
|
59 |
+
- If you have multiple CUDA Toolkit versions installed, `PATH` should be set to the correct version before running the command. For example, if you have CUDA Toolkit 11.8 and 12.2 installed, you should run `export PATH=/usr/local/cuda-11.8/bin:$PATH` before running the command.
|
60 |
+
- By default, the code uses the `flash-attn` backend for attention. For GPUs do not support `flash-attn` (e.g., NVIDIA V100), you can remove the `--flash-attn` flag to install `xformers` only and set the `ATTN_BACKEND` environment variable to `xformers` before running the code. See the [Minimal Example](#minimal-example) for more details.
|
61 |
+
- The installation may take a while due to the large number of dependencies. Please be patient. If you encounter any issues, you can try to install the dependencies one by one, specifying one flag at a time.
|
62 |
+
- If you encounter any issues during the installation, feel free to open an issue or contact us.
|
63 |
+
|
64 |
+
Create a new conda environment named `trellis` and install the dependencies:
|
65 |
+
```sh
|
66 |
+
. ./setup.sh --new-env --basic --xformers --flash-attn --diffoctreerast --spconv --mipgaussian --kaolin --nvdiffrast
|
67 |
+
```
|
68 |
+
The detailed usage of `setup.sh` can be found by running `. ./setup.sh --help`.
|
69 |
+
```sh
|
70 |
+
Usage: setup.sh [OPTIONS]
|
71 |
+
Options:
|
72 |
+
-h, --help Display this help message
|
73 |
+
--new-env Create a new conda environment
|
74 |
+
--basic Install basic dependencies
|
75 |
+
--xformers Install xformers
|
76 |
+
--flash-attn Install flash-attn
|
77 |
+
--diffoctreerast Install diffoctreerast
|
78 |
+
--vox2seq Install vox2seq
|
79 |
+
--spconv Install spconv
|
80 |
+
--mipgaussian Install mip-splatting
|
81 |
+
--kaolin Install kaolin
|
82 |
+
--nvdiffrast Install nvdiffrast
|
83 |
+
--demo Install all dependencies for demo
|
84 |
+
```
|
85 |
+
|
86 |
+
<!-- Pretrained Models -->
|
87 |
+
## 🤖 Pretrained Models
|
88 |
+
|
89 |
+
We provide the following pretrained models:
|
90 |
+
|
91 |
+
| Model | Description | #Params | Download |
|
92 |
+
| --- | --- | --- | --- |
|
93 |
+
| TRELLIS-image-large | Large image-to-3D model | 1.2B | [Download](https://huggingface.co/JeffreyXiang/TRELLIS-image-large) |
|
94 |
+
| TRELLIS-text-base | Base text-to-3D model | 342M | Coming Soon |
|
95 |
+
| TRELLIS-text-large | Large text-to-3D model | 1.1B | Coming Soon |
|
96 |
+
| TRELLIS-text-xlarge | Extra-large text-to-3D model | 2.0B | Coming Soon |
|
97 |
+
|
98 |
+
The models are hosted on Hugging Face. You can directly load the models with their repository names in the code:
|
99 |
+
```python
|
100 |
+
TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
101 |
+
```
|
102 |
+
|
103 |
+
If you prefer loading the model from local, you can download the model files from the links above and load the model with the folder path (folder structure should be maintained):
|
104 |
+
```python
|
105 |
+
TrellisImageTo3DPipeline.from_pretrained("/path/to/TRELLIS-image-large")
|
106 |
+
```
|
107 |
+
|
108 |
+
<!-- Usage -->
|
109 |
+
## 💡 Usage
|
110 |
+
|
111 |
+
### Minimal Example
|
112 |
+
|
113 |
+
Here is an [example](example.py) of how to use the pretrained models for 3D asset generation.
|
114 |
+
|
115 |
+
```python
|
116 |
+
import os
|
117 |
+
# os.environ['ATTN_BACKEND'] = 'xformers' # Can be 'flash-attn' or 'xformers', default is 'flash-attn'
|
118 |
+
os.environ['SPCONV_ALGO'] = 'native' # Can be 'native' or 'auto', default is 'auto'.
|
119 |
+
# 'auto' is faster but will do benchmarking at the beginning.
|
120 |
+
# Recommended to set to 'native' if run only once.
|
121 |
+
|
122 |
+
import imageio
|
123 |
+
from PIL import Image
|
124 |
+
from trellis.pipelines import TrellisImageTo3DPipeline
|
125 |
+
from trellis.utils import render_utils, postprocessing_utils
|
126 |
+
|
127 |
+
# Load a pipeline from a model folder or a Hugging Face model hub.
|
128 |
+
pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
129 |
+
pipeline.cuda()
|
130 |
+
|
131 |
+
# Load an image
|
132 |
+
image = Image.open("assets/example_image/T.png")
|
133 |
+
|
134 |
+
# Run the pipeline
|
135 |
+
outputs = pipeline.run(
|
136 |
+
image,
|
137 |
+
seed=1,
|
138 |
+
# Optional parameters
|
139 |
+
# sparse_structure_sampler_params={
|
140 |
+
# "steps": 12,
|
141 |
+
# "cfg_strength": 7.5,
|
142 |
+
# },
|
143 |
+
# slat_sampler_params={
|
144 |
+
# "steps": 12,
|
145 |
+
# "cfg_strength": 3,
|
146 |
+
# },
|
147 |
+
)
|
148 |
+
# outputs is a dictionary containing generated 3D assets in different formats:
|
149 |
+
# - outputs['gaussian']: a list of 3D Gaussians
|
150 |
+
# - outputs['radiance_field']: a list of radiance fields
|
151 |
+
# - outputs['mesh']: a list of meshes
|
152 |
+
|
153 |
+
# Render the outputs
|
154 |
+
video = render_utils.render_video(outputs['gaussian'][0])['color']
|
155 |
+
imageio.mimsave("sample_gs.mp4", video, fps=30)
|
156 |
+
video = render_utils.render_video(outputs['radiance_field'][0])['color']
|
157 |
+
imageio.mimsave("sample_rf.mp4", video, fps=30)
|
158 |
+
video = render_utils.render_video(outputs['mesh'][0])['normal']
|
159 |
+
imageio.mimsave("sample_mesh.mp4", video, fps=30)
|
160 |
+
|
161 |
+
# GLB files can be extracted from the outputs
|
162 |
+
glb = postprocessing_utils.to_glb(
|
163 |
+
outputs['gaussian'][0],
|
164 |
+
outputs['mesh'][0],
|
165 |
+
# Optional parameters
|
166 |
+
simplify=0.95, # Ratio of triangles to remove in the simplification process
|
167 |
+
texture_size=1024, # Size of the texture used for the GLB
|
168 |
+
)
|
169 |
+
glb.export("sample.glb")
|
170 |
+
|
171 |
+
# Save Gaussians as PLY files
|
172 |
+
outputs['gaussian'][0].save_ply("sample.ply")
|
173 |
+
```
|
174 |
+
|
175 |
+
After running the code, you will get the following files:
|
176 |
+
- `sample_gs.mp4`: a video showing the 3D Gaussian representation
|
177 |
+
- `sample_rf.mp4`: a video showing the Radiance Field representation
|
178 |
+
- `sample_mesh.mp4`: a video showing the mesh representation
|
179 |
+
- `sample.glb`: a GLB file containing the extracted textured mesh
|
180 |
+
- `sample.ply`: a PLY file containing the 3D Gaussian representation
|
181 |
+
|
182 |
+
|
183 |
+
### Web Demo
|
184 |
+
|
185 |
+
[app.py](app.py) provides a simple web demo for 3D asset generation. Since this demo is based on [Gradio](https://gradio.app/), additional dependencies are required:
|
186 |
+
```sh
|
187 |
+
. ./setup.sh --demo
|
188 |
+
```
|
189 |
+
|
190 |
+
After installing the dependencies, you can run the demo with the following command:
|
191 |
+
```sh
|
192 |
+
python app.py
|
193 |
+
```
|
194 |
+
|
195 |
+
Then, you can access the demo at the address shown in the terminal.
|
196 |
+
|
197 |
+
***The web demo is also available on [Hugging Face Spaces](https://huggingface.co/spaces/JeffreyXiang/TRELLIS)!***
|
198 |
+
|
199 |
+
|
200 |
+
<!-- Dataset -->
|
201 |
+
## 📚 Dataset
|
202 |
+
|
203 |
+
We provide **TRELLIS-500K**, a large-scale dataset containing 500K 3D assets curated from [Objaverse(XL)](https://objaverse.allenai.org/), [ABO](https://amazon-berkeley-objects.s3.amazonaws.com/index.html), [3D-FUTURE](https://tianchi.aliyun.com/specials/promotion/alibaba-3d-future), [HSSD](https://huggingface.co/datasets/hssd/hssd-models), and [Toys4k](https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k), filtered based on aesthetic scores. Please refer to the [dataset README](DATASET.md) for more details.
|
204 |
+
|
205 |
+
<!-- License -->
|
206 |
+
## ⚖️ License
|
207 |
+
|
208 |
+
TRELLIS models and the majority of the code are licensed under the [MIT License](LICENSE). The following submodules may have different licenses:
|
209 |
+
- [**diffoctreerast**](https://github.com/JeffreyXiang/diffoctreerast): We developed a CUDA-based real-time differentiable octree renderer for rendering radiance fields as part of this project. This renderer is derived from the [diff-gaussian-rasterization](https://github.com/graphdeco-inria/diff-gaussian-rasterization) project and is available under the [LICENSE](https://github.com/JeffreyXiang/diffoctreerast/blob/master/LICENSE).
|
210 |
+
|
211 |
+
|
212 |
+
- [**Modified Flexicubes**](https://github.com/MaxtirError/FlexiCubes): In this project, we used a modified version of [Flexicubes](https://github.com/nv-tlabs/FlexiCubes) to support vertex attributes. This modified version is licensed under the [LICENSE](https://github.com/nv-tlabs/FlexiCubes/blob/main/LICENSE.txt).
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
<!-- Citation -->
|
218 |
+
## 📜 Citation
|
219 |
+
|
220 |
+
If you find this work helpful, please consider citing our paper:
|
221 |
+
|
222 |
+
```bibtex
|
223 |
+
@article{xiang2024structured,
|
224 |
+
title = {Structured 3D Latents for Scalable and Versatile 3D Generation},
|
225 |
+
author = {Xiang, Jianfeng and Lv, Zelong and Xu, Sicheng and Deng, Yu and Wang, Ruicheng and Zhang, Bowen and Chen, Dong and Tong, Xin and Yang, Jiaolong},
|
226 |
+
journal = {arXiv preprint arXiv:2412.01506},
|
227 |
+
year = {2024}
|
228 |
+
}
|
229 |
+
```
|
230 |
+
|
TRELLIS/SECURITY.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
|
2 |
+
|
3 |
+
## Security
|
4 |
+
|
5 |
+
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
|
6 |
+
|
7 |
+
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
|
8 |
+
|
9 |
+
## Reporting Security Issues
|
10 |
+
|
11 |
+
**Please do not report security vulnerabilities through public GitHub issues.**
|
12 |
+
|
13 |
+
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
|
14 |
+
|
15 |
+
If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
|
16 |
+
|
17 |
+
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
|
18 |
+
|
19 |
+
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
20 |
+
|
21 |
+
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
22 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
23 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
24 |
+
* Any special configuration required to reproduce the issue
|
25 |
+
* Step-by-step instructions to reproduce the issue
|
26 |
+
* Proof-of-concept or exploit code (if possible)
|
27 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
28 |
+
|
29 |
+
This information will help us triage your report more quickly.
|
30 |
+
|
31 |
+
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
|
32 |
+
|
33 |
+
## Preferred Languages
|
34 |
+
|
35 |
+
We prefer all communications to be in English.
|
36 |
+
|
37 |
+
## Policy
|
38 |
+
|
39 |
+
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
|
40 |
+
|
41 |
+
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
TRELLIS/SUPPORT.md
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TODO: The maintainer of this repo has not yet edited this file
|
2 |
+
|
3 |
+
**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
|
4 |
+
|
5 |
+
- **No CSS support:** Fill out this template with information about how to file issues and get help.
|
6 |
+
- **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps.
|
7 |
+
- **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide.
|
8 |
+
|
9 |
+
*Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
|
10 |
+
|
11 |
+
# Support
|
12 |
+
|
13 |
+
## How to file issues and get help
|
14 |
+
|
15 |
+
This project uses GitHub Issues to track bugs and feature requests. Please search the existing
|
16 |
+
issues before filing new issues to avoid duplicates. For new issues, file your bug or
|
17 |
+
feature request as a new Issue.
|
18 |
+
|
19 |
+
For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
|
20 |
+
FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
|
21 |
+
CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
|
22 |
+
|
23 |
+
## Microsoft Support Policy
|
24 |
+
|
25 |
+
Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
|
TRELLIS/app.py
ADDED
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_litmodel3d import LitModel3D
|
3 |
+
|
4 |
+
import os
|
5 |
+
import shutil
|
6 |
+
from typing import *
|
7 |
+
import torch
|
8 |
+
import numpy as np
|
9 |
+
import imageio
|
10 |
+
from easydict import EasyDict as edict
|
11 |
+
from PIL import Image
|
12 |
+
from trellis.pipelines import TrellisImageTo3DPipeline
|
13 |
+
from trellis.representations import Gaussian, MeshExtractResult
|
14 |
+
from trellis.utils import render_utils, postprocessing_utils
|
15 |
+
|
16 |
+
|
17 |
+
MAX_SEED = np.iinfo(np.int32).max
|
18 |
+
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
19 |
+
os.makedirs(TMP_DIR, exist_ok=True)
|
20 |
+
|
21 |
+
|
22 |
+
def start_session(req: gr.Request):
|
23 |
+
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
24 |
+
os.makedirs(user_dir, exist_ok=True)
|
25 |
+
|
26 |
+
|
27 |
+
def end_session(req: gr.Request):
|
28 |
+
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
29 |
+
shutil.rmtree(user_dir)
|
30 |
+
|
31 |
+
|
32 |
+
def preprocess_image(image: Image.Image) -> Image.Image:
|
33 |
+
"""
|
34 |
+
Preprocess the input image.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
image (Image.Image): The input image.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
Image.Image: The preprocessed image.
|
41 |
+
"""
|
42 |
+
processed_image = pipeline.preprocess_image(image)
|
43 |
+
return processed_image
|
44 |
+
|
45 |
+
|
46 |
+
def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
|
47 |
+
"""
|
48 |
+
Preprocess a list of input images.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
images (List[Tuple[Image.Image, str]]): The input images.
|
52 |
+
|
53 |
+
Returns:
|
54 |
+
List[Image.Image]: The preprocessed images.
|
55 |
+
"""
|
56 |
+
images = [image[0] for image in images]
|
57 |
+
processed_images = [pipeline.preprocess_image(image) for image in images]
|
58 |
+
return processed_images
|
59 |
+
|
60 |
+
|
61 |
+
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
62 |
+
return {
|
63 |
+
'gaussian': {
|
64 |
+
**gs.init_params,
|
65 |
+
'_xyz': gs._xyz.cpu().numpy(),
|
66 |
+
'_features_dc': gs._features_dc.cpu().numpy(),
|
67 |
+
'_scaling': gs._scaling.cpu().numpy(),
|
68 |
+
'_rotation': gs._rotation.cpu().numpy(),
|
69 |
+
'_opacity': gs._opacity.cpu().numpy(),
|
70 |
+
},
|
71 |
+
'mesh': {
|
72 |
+
'vertices': mesh.vertices.cpu().numpy(),
|
73 |
+
'faces': mesh.faces.cpu().numpy(),
|
74 |
+
},
|
75 |
+
}
|
76 |
+
|
77 |
+
|
78 |
+
def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
79 |
+
gs = Gaussian(
|
80 |
+
aabb=state['gaussian']['aabb'],
|
81 |
+
sh_degree=state['gaussian']['sh_degree'],
|
82 |
+
mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
|
83 |
+
scaling_bias=state['gaussian']['scaling_bias'],
|
84 |
+
opacity_bias=state['gaussian']['opacity_bias'],
|
85 |
+
scaling_activation=state['gaussian']['scaling_activation'],
|
86 |
+
)
|
87 |
+
gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
|
88 |
+
gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
|
89 |
+
gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
|
90 |
+
gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
|
91 |
+
gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
|
92 |
+
|
93 |
+
mesh = edict(
|
94 |
+
vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
|
95 |
+
faces=torch.tensor(state['mesh']['faces'], device='cuda'),
|
96 |
+
)
|
97 |
+
|
98 |
+
return gs, mesh
|
99 |
+
|
100 |
+
|
101 |
+
def get_seed(randomize_seed: bool, seed: int) -> int:
|
102 |
+
"""
|
103 |
+
Get the random seed.
|
104 |
+
"""
|
105 |
+
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
106 |
+
|
107 |
+
|
108 |
+
def image_to_3d(
|
109 |
+
image: Image.Image,
|
110 |
+
multiimages: List[Tuple[Image.Image, str]],
|
111 |
+
is_multiimage: bool,
|
112 |
+
seed: int,
|
113 |
+
ss_guidance_strength: float,
|
114 |
+
ss_sampling_steps: int,
|
115 |
+
slat_guidance_strength: float,
|
116 |
+
slat_sampling_steps: int,
|
117 |
+
multiimage_algo: Literal["multidiffusion", "stochastic"],
|
118 |
+
req: gr.Request,
|
119 |
+
) -> Tuple[dict, str]:
|
120 |
+
"""
|
121 |
+
Convert an image to a 3D model.
|
122 |
+
|
123 |
+
Args:
|
124 |
+
image (Image.Image): The input image.
|
125 |
+
multiimages (List[Tuple[Image.Image, str]]): The input images in multi-image mode.
|
126 |
+
is_multiimage (bool): Whether is in multi-image mode.
|
127 |
+
seed (int): The random seed.
|
128 |
+
ss_guidance_strength (float): The guidance strength for sparse structure generation.
|
129 |
+
ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
|
130 |
+
slat_guidance_strength (float): The guidance strength for structured latent generation.
|
131 |
+
slat_sampling_steps (int): The number of sampling steps for structured latent generation.
|
132 |
+
multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation.
|
133 |
+
|
134 |
+
Returns:
|
135 |
+
dict: The information of the generated 3D model.
|
136 |
+
str: The path to the video of the 3D model.
|
137 |
+
"""
|
138 |
+
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
139 |
+
if not is_multiimage:
|
140 |
+
outputs = pipeline.run(
|
141 |
+
image,
|
142 |
+
seed=seed,
|
143 |
+
formats=["gaussian", "mesh"],
|
144 |
+
preprocess_image=False,
|
145 |
+
sparse_structure_sampler_params={
|
146 |
+
"steps": ss_sampling_steps,
|
147 |
+
"cfg_strength": ss_guidance_strength,
|
148 |
+
},
|
149 |
+
slat_sampler_params={
|
150 |
+
"steps": slat_sampling_steps,
|
151 |
+
"cfg_strength": slat_guidance_strength,
|
152 |
+
},
|
153 |
+
)
|
154 |
+
else:
|
155 |
+
outputs = pipeline.run_multi_image(
|
156 |
+
[image[0] for image in multiimages],
|
157 |
+
seed=seed,
|
158 |
+
formats=["gaussian", "mesh"],
|
159 |
+
preprocess_image=False,
|
160 |
+
sparse_structure_sampler_params={
|
161 |
+
"steps": ss_sampling_steps,
|
162 |
+
"cfg_strength": ss_guidance_strength,
|
163 |
+
},
|
164 |
+
slat_sampler_params={
|
165 |
+
"steps": slat_sampling_steps,
|
166 |
+
"cfg_strength": slat_guidance_strength,
|
167 |
+
},
|
168 |
+
mode=multiimage_algo,
|
169 |
+
)
|
170 |
+
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
171 |
+
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
172 |
+
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
173 |
+
video_path = os.path.join(user_dir, 'sample.mp4')
|
174 |
+
imageio.mimsave(video_path, video, fps=15)
|
175 |
+
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
|
176 |
+
torch.cuda.empty_cache()
|
177 |
+
return state, video_path
|
178 |
+
|
179 |
+
|
180 |
+
def extract_glb(
|
181 |
+
state: dict,
|
182 |
+
mesh_simplify: float,
|
183 |
+
texture_size: int,
|
184 |
+
req: gr.Request,
|
185 |
+
) -> Tuple[str, str]:
|
186 |
+
"""
|
187 |
+
Extract a GLB file from the 3D model.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
state (dict): The state of the generated 3D model.
|
191 |
+
mesh_simplify (float): The mesh simplification factor.
|
192 |
+
texture_size (int): The texture resolution.
|
193 |
+
|
194 |
+
Returns:
|
195 |
+
str: The path to the extracted GLB file.
|
196 |
+
"""
|
197 |
+
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
198 |
+
gs, mesh = unpack_state(state)
|
199 |
+
glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
|
200 |
+
glb_path = os.path.join(user_dir, 'sample.glb')
|
201 |
+
glb.export(glb_path)
|
202 |
+
torch.cuda.empty_cache()
|
203 |
+
return glb_path, glb_path
|
204 |
+
|
205 |
+
|
206 |
+
def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
|
207 |
+
"""
|
208 |
+
Extract a Gaussian file from the 3D model.
|
209 |
+
|
210 |
+
Args:
|
211 |
+
state (dict): The state of the generated 3D model.
|
212 |
+
|
213 |
+
Returns:
|
214 |
+
str: The path to the extracted Gaussian file.
|
215 |
+
"""
|
216 |
+
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
217 |
+
gs, _ = unpack_state(state)
|
218 |
+
gaussian_path = os.path.join(user_dir, 'sample.ply')
|
219 |
+
gs.save_ply(gaussian_path)
|
220 |
+
torch.cuda.empty_cache()
|
221 |
+
return gaussian_path, gaussian_path
|
222 |
+
|
223 |
+
|
224 |
+
def prepare_multi_example() -> List[Image.Image]:
|
225 |
+
multi_case = list(set([i.split('_')[0] for i in os.listdir("assets/example_multi_image")]))
|
226 |
+
images = []
|
227 |
+
for case in multi_case:
|
228 |
+
_images = []
|
229 |
+
for i in range(1, 4):
|
230 |
+
img = Image.open(f'assets/example_multi_image/{case}_{i}.png')
|
231 |
+
W, H = img.size
|
232 |
+
img = img.resize((int(W / H * 512), 512))
|
233 |
+
_images.append(np.array(img))
|
234 |
+
images.append(Image.fromarray(np.concatenate(_images, axis=1)))
|
235 |
+
return images
|
236 |
+
|
237 |
+
|
238 |
+
def split_image(image: Image.Image) -> List[Image.Image]:
|
239 |
+
"""
|
240 |
+
Split an image into multiple views.
|
241 |
+
"""
|
242 |
+
image = np.array(image)
|
243 |
+
alpha = image[..., 3]
|
244 |
+
alpha = np.any(alpha>0, axis=0)
|
245 |
+
start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
|
246 |
+
end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
|
247 |
+
images = []
|
248 |
+
for s, e in zip(start_pos, end_pos):
|
249 |
+
images.append(Image.fromarray(image[:, s:e+1]))
|
250 |
+
return [preprocess_image(image) for image in images]
|
251 |
+
|
252 |
+
|
253 |
+
with gr.Blocks(delete_cache=(600, 600)) as demo:
|
254 |
+
gr.Markdown("""
|
255 |
+
## Image to 3D Asset with [TRELLIS](https://trellis3d.github.io/)
|
256 |
+
* Upload an image and click "Generate" to create a 3D asset. If the image has alpha channel, it be used as the mask. Otherwise, we use `rembg` to remove the background.
|
257 |
+
* If you find the generated 3D asset satisfactory, click "Extract GLB" to extract the GLB file and download it.
|
258 |
+
""")
|
259 |
+
|
260 |
+
with gr.Row():
|
261 |
+
with gr.Column():
|
262 |
+
with gr.Tabs() as input_tabs:
|
263 |
+
with gr.Tab(label="Single Image", id=0) as single_image_input_tab:
|
264 |
+
image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300)
|
265 |
+
with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab:
|
266 |
+
multiimage_prompt = gr.Gallery(label="Image Prompt", format="png", type="pil", height=300, columns=3)
|
267 |
+
gr.Markdown("""
|
268 |
+
Input different views of the object in separate images.
|
269 |
+
|
270 |
+
*NOTE: this is an experimental algorithm without training a specialized model. It may not produce the best results for all images, especially those having different poses or inconsistent details.*
|
271 |
+
""")
|
272 |
+
|
273 |
+
with gr.Accordion(label="Generation Settings", open=False):
|
274 |
+
seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
|
275 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
276 |
+
gr.Markdown("Stage 1: Sparse Structure Generation")
|
277 |
+
with gr.Row():
|
278 |
+
ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
|
279 |
+
ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
280 |
+
gr.Markdown("Stage 2: Structured Latent Generation")
|
281 |
+
with gr.Row():
|
282 |
+
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
|
283 |
+
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
284 |
+
multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
|
285 |
+
|
286 |
+
generate_btn = gr.Button("Generate")
|
287 |
+
|
288 |
+
with gr.Accordion(label="GLB Extraction Settings", open=False):
|
289 |
+
mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
|
290 |
+
texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
|
291 |
+
|
292 |
+
with gr.Row():
|
293 |
+
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
|
294 |
+
extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
|
295 |
+
gr.Markdown("""
|
296 |
+
*NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*
|
297 |
+
""")
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
|
301 |
+
model_output = LitModel3D(label="Extracted GLB/Gaussian", exposure=10.0, height=300)
|
302 |
+
|
303 |
+
with gr.Row():
|
304 |
+
download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
|
305 |
+
download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
|
306 |
+
|
307 |
+
is_multiimage = gr.State(False)
|
308 |
+
output_buf = gr.State()
|
309 |
+
|
310 |
+
# Example images at the bottom of the page
|
311 |
+
with gr.Row() as single_image_example:
|
312 |
+
examples = gr.Examples(
|
313 |
+
examples=[
|
314 |
+
f'assets/example_image/{image}'
|
315 |
+
for image in os.listdir("assets/example_image")
|
316 |
+
],
|
317 |
+
inputs=[image_prompt],
|
318 |
+
fn=preprocess_image,
|
319 |
+
outputs=[image_prompt],
|
320 |
+
run_on_click=True,
|
321 |
+
examples_per_page=64,
|
322 |
+
)
|
323 |
+
with gr.Row(visible=False) as multiimage_example:
|
324 |
+
examples_multi = gr.Examples(
|
325 |
+
examples=prepare_multi_example(),
|
326 |
+
inputs=[image_prompt],
|
327 |
+
fn=split_image,
|
328 |
+
outputs=[multiimage_prompt],
|
329 |
+
run_on_click=True,
|
330 |
+
examples_per_page=8,
|
331 |
+
)
|
332 |
+
|
333 |
+
# Handlers
|
334 |
+
demo.load(start_session)
|
335 |
+
demo.unload(end_session)
|
336 |
+
|
337 |
+
single_image_input_tab.select(
|
338 |
+
lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
|
339 |
+
outputs=[is_multiimage, single_image_example, multiimage_example]
|
340 |
+
)
|
341 |
+
multiimage_input_tab.select(
|
342 |
+
lambda: tuple([True, gr.Row.update(visible=False), gr.Row.update(visible=True)]),
|
343 |
+
outputs=[is_multiimage, single_image_example, multiimage_example]
|
344 |
+
)
|
345 |
+
|
346 |
+
image_prompt.upload(
|
347 |
+
preprocess_image,
|
348 |
+
inputs=[image_prompt],
|
349 |
+
outputs=[image_prompt],
|
350 |
+
)
|
351 |
+
multiimage_prompt.upload(
|
352 |
+
preprocess_images,
|
353 |
+
inputs=[multiimage_prompt],
|
354 |
+
outputs=[multiimage_prompt],
|
355 |
+
)
|
356 |
+
|
357 |
+
generate_btn.click(
|
358 |
+
get_seed,
|
359 |
+
inputs=[randomize_seed, seed],
|
360 |
+
outputs=[seed],
|
361 |
+
).then(
|
362 |
+
image_to_3d,
|
363 |
+
inputs=[image_prompt, multiimage_prompt, is_multiimage, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo],
|
364 |
+
outputs=[output_buf, video_output],
|
365 |
+
).then(
|
366 |
+
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|
367 |
+
outputs=[extract_glb_btn, extract_gs_btn],
|
368 |
+
)
|
369 |
+
|
370 |
+
video_output.clear(
|
371 |
+
lambda: tuple([gr.Button(interactive=False), gr.Button(interactive=False)]),
|
372 |
+
outputs=[extract_glb_btn, extract_gs_btn],
|
373 |
+
)
|
374 |
+
|
375 |
+
extract_glb_btn.click(
|
376 |
+
extract_glb,
|
377 |
+
inputs=[output_buf, mesh_simplify, texture_size],
|
378 |
+
outputs=[model_output, download_glb],
|
379 |
+
).then(
|
380 |
+
lambda: gr.Button(interactive=True),
|
381 |
+
outputs=[download_glb],
|
382 |
+
)
|
383 |
+
|
384 |
+
extract_gs_btn.click(
|
385 |
+
extract_gaussian,
|
386 |
+
inputs=[output_buf],
|
387 |
+
outputs=[model_output, download_gs],
|
388 |
+
).then(
|
389 |
+
lambda: gr.Button(interactive=True),
|
390 |
+
outputs=[download_gs],
|
391 |
+
)
|
392 |
+
|
393 |
+
model_output.clear(
|
394 |
+
lambda: gr.Button(interactive=False),
|
395 |
+
outputs=[download_glb],
|
396 |
+
)
|
397 |
+
|
398 |
+
|
399 |
+
# Launch the Gradio app
|
400 |
+
if __name__ == "__main__":
|
401 |
+
pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
402 |
+
pipeline.cuda()
|
403 |
+
demo.launch()
|
TRELLIS/assets/example_image/T.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_building.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_castle.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_colorful_cottage.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_maya_pyramid.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_mushroom.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_building_space_station.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_dragon.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_elephant.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_furry.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_quadruped.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_robot_crab.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_robot_dinosour.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_creature_rock_monster.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_humanoid_block_robot.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_humanoid_dragonborn.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_humanoid_dwarf.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_humanoid_goblin.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_humanoid_mech.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_crate.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_fireplace.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_gate.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_lantern.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_magicbook.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_mailbox.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_monster_chest.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_paper_machine.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_phonograph.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_portal2.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_storage_chest.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_telephone.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_television.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_misc_workbench.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_biplane.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_bulldozer.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_cart.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_excavator.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_helicopter.png
ADDED
![]() |
Git LFS Details
|
TRELLIS/assets/example_image/typical_vehicle_locomotive.png
ADDED
![]() |
Git LFS Details
|