geyik2 commited on
Commit
21126d3
·
verified ·
1 Parent(s): 52658e3

Delete trellis

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. trellis/__init__.py +0 -6
  2. trellis/models/__init__.py +0 -70
  3. trellis/models/sparse_structure_flow.py +0 -200
  4. trellis/models/sparse_structure_vae.py +0 -306
  5. trellis/models/structured_latent_flow.py +0 -262
  6. trellis/models/structured_latent_vae/__init__.py +0 -4
  7. trellis/models/structured_latent_vae/base.py +0 -117
  8. trellis/models/structured_latent_vae/decoder_gs.py +0 -122
  9. trellis/models/structured_latent_vae/decoder_mesh.py +0 -167
  10. trellis/models/structured_latent_vae/decoder_rf.py +0 -104
  11. trellis/models/structured_latent_vae/encoder.py +0 -72
  12. trellis/modules/attention/__init__.py +0 -36
  13. trellis/modules/attention/full_attn.py +0 -140
  14. trellis/modules/attention/modules.py +0 -146
  15. trellis/modules/norm.py +0 -25
  16. trellis/modules/sparse/__init__.py +0 -102
  17. trellis/modules/sparse/attention/__init__.py +0 -4
  18. trellis/modules/sparse/attention/full_attn.py +0 -215
  19. trellis/modules/sparse/attention/modules.py +0 -139
  20. trellis/modules/sparse/attention/serialized_attn.py +0 -193
  21. trellis/modules/sparse/attention/windowed_attn.py +0 -135
  22. trellis/modules/sparse/basic.py +0 -459
  23. trellis/modules/sparse/conv/__init__.py +0 -21
  24. trellis/modules/sparse/conv/conv_spconv.py +0 -80
  25. trellis/modules/sparse/conv/conv_torchsparse.py +0 -38
  26. trellis/modules/sparse/linear.py +0 -15
  27. trellis/modules/sparse/nonlinearity.py +0 -35
  28. trellis/modules/sparse/norm.py +0 -58
  29. trellis/modules/sparse/spatial.py +0 -110
  30. trellis/modules/sparse/transformer/__init__.py +0 -2
  31. trellis/modules/sparse/transformer/blocks.py +0 -151
  32. trellis/modules/sparse/transformer/modulated.py +0 -166
  33. trellis/modules/spatial.py +0 -48
  34. trellis/modules/transformer/__init__.py +0 -2
  35. trellis/modules/transformer/blocks.py +0 -182
  36. trellis/modules/transformer/modulated.py +0 -157
  37. trellis/modules/utils.py +0 -54
  38. trellis/pipelines/__init__.py +0 -24
  39. trellis/pipelines/base.py +0 -66
  40. trellis/pipelines/samplers/__init__.py +0 -2
  41. trellis/pipelines/samplers/base.py +0 -20
  42. trellis/pipelines/samplers/classifier_free_guidance_mixin.py +0 -12
  43. trellis/pipelines/samplers/flow_euler.py +0 -199
  44. trellis/pipelines/samplers/guidance_interval_mixin.py +0 -15
  45. trellis/pipelines/trellis_image_to_3d.py +0 -376
  46. trellis/renderers/__init__.py +0 -31
  47. trellis/renderers/gaussian_render.py +0 -231
  48. trellis/renderers/mesh_renderer.py +0 -140
  49. trellis/renderers/octree_renderer.py +0 -300
  50. trellis/renderers/sh_utils.py +0 -118
trellis/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- from . import models
2
- from . import modules
3
- from . import pipelines
4
- from . import renderers
5
- from . import representations
6
- from . import utils
 
 
 
 
 
 
 
trellis/models/__init__.py DELETED
@@ -1,70 +0,0 @@
1
- import importlib
2
-
3
- __attributes = {
4
- 'SparseStructureEncoder': 'sparse_structure_vae',
5
- 'SparseStructureDecoder': 'sparse_structure_vae',
6
- 'SparseStructureFlowModel': 'sparse_structure_flow',
7
- 'SLatEncoder': 'structured_latent_vae',
8
- 'SLatGaussianDecoder': 'structured_latent_vae',
9
- 'SLatRadianceFieldDecoder': 'structured_latent_vae',
10
- 'SLatMeshDecoder': 'structured_latent_vae',
11
- 'SLatFlowModel': 'structured_latent_flow',
12
- }
13
-
14
- __submodules = []
15
-
16
- __all__ = list(__attributes.keys()) + __submodules
17
-
18
- def __getattr__(name):
19
- if name not in globals():
20
- if name in __attributes:
21
- module_name = __attributes[name]
22
- module = importlib.import_module(f".{module_name}", __name__)
23
- globals()[name] = getattr(module, name)
24
- elif name in __submodules:
25
- module = importlib.import_module(f".{name}", __name__)
26
- globals()[name] = module
27
- else:
28
- raise AttributeError(f"module {__name__} has no attribute {name}")
29
- return globals()[name]
30
-
31
-
32
- def from_pretrained(path: str, **kwargs):
33
- """
34
- Load a model from a pretrained checkpoint.
35
-
36
- Args:
37
- path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
38
- NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
39
- **kwargs: Additional arguments for the model constructor.
40
- """
41
- import os
42
- import json
43
- from safetensors.torch import load_file
44
- is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
45
-
46
- if is_local:
47
- config_file = f"{path}.json"
48
- model_file = f"{path}.safetensors"
49
- else:
50
- from huggingface_hub import hf_hub_download
51
- path_parts = path.split('/')
52
- repo_id = f'{path_parts[0]}/{path_parts[1]}'
53
- model_name = '/'.join(path_parts[2:])
54
- config_file = hf_hub_download(repo_id, f"{model_name}.json")
55
- model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
56
-
57
- with open(config_file, 'r') as f:
58
- config = json.load(f)
59
- model = __getattr__(config['name'])(**config['args'], **kwargs)
60
- model.load_state_dict(load_file(model_file))
61
-
62
- return model
63
-
64
-
65
- # For Pylance
66
- if __name__ == '__main__':
67
- from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
68
- from .sparse_structure_flow import SparseStructureFlowModel
69
- from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
70
- from .structured_latent_flow import SLatFlowModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/sparse_structure_flow.py DELETED
@@ -1,200 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- import numpy as np
6
- from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
- from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
8
- from ..modules.spatial import patchify, unpatchify
9
-
10
-
11
- class TimestepEmbedder(nn.Module):
12
- """
13
- Embeds scalar timesteps into vector representations.
14
- """
15
- def __init__(self, hidden_size, frequency_embedding_size=256):
16
- super().__init__()
17
- self.mlp = nn.Sequential(
18
- nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
- nn.SiLU(),
20
- nn.Linear(hidden_size, hidden_size, bias=True),
21
- )
22
- self.frequency_embedding_size = frequency_embedding_size
23
-
24
- @staticmethod
25
- def timestep_embedding(t, dim, max_period=10000):
26
- """
27
- Create sinusoidal timestep embeddings.
28
-
29
- Args:
30
- t: a 1-D Tensor of N indices, one per batch element.
31
- These may be fractional.
32
- dim: the dimension of the output.
33
- max_period: controls the minimum frequency of the embeddings.
34
-
35
- Returns:
36
- an (N, D) Tensor of positional embeddings.
37
- """
38
- # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
- half = dim // 2
40
- freqs = torch.exp(
41
- -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
- ).to(device=t.device)
43
- args = t[:, None].float() * freqs[None]
44
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
45
- if dim % 2:
46
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
47
- return embedding
48
-
49
- def forward(self, t):
50
- t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
51
- t_emb = self.mlp(t_freq)
52
- return t_emb
53
-
54
-
55
- class SparseStructureFlowModel(nn.Module):
56
- def __init__(
57
- self,
58
- resolution: int,
59
- in_channels: int,
60
- model_channels: int,
61
- cond_channels: int,
62
- out_channels: int,
63
- num_blocks: int,
64
- num_heads: Optional[int] = None,
65
- num_head_channels: Optional[int] = 64,
66
- mlp_ratio: float = 4,
67
- patch_size: int = 2,
68
- pe_mode: Literal["ape", "rope"] = "ape",
69
- use_fp16: bool = False,
70
- use_checkpoint: bool = False,
71
- share_mod: bool = False,
72
- qk_rms_norm: bool = False,
73
- qk_rms_norm_cross: bool = False,
74
- ):
75
- super().__init__()
76
- self.resolution = resolution
77
- self.in_channels = in_channels
78
- self.model_channels = model_channels
79
- self.cond_channels = cond_channels
80
- self.out_channels = out_channels
81
- self.num_blocks = num_blocks
82
- self.num_heads = num_heads or model_channels // num_head_channels
83
- self.mlp_ratio = mlp_ratio
84
- self.patch_size = patch_size
85
- self.pe_mode = pe_mode
86
- self.use_fp16 = use_fp16
87
- self.use_checkpoint = use_checkpoint
88
- self.share_mod = share_mod
89
- self.qk_rms_norm = qk_rms_norm
90
- self.qk_rms_norm_cross = qk_rms_norm_cross
91
- self.dtype = torch.float16 if use_fp16 else torch.float32
92
-
93
- self.t_embedder = TimestepEmbedder(model_channels)
94
- if share_mod:
95
- self.adaLN_modulation = nn.Sequential(
96
- nn.SiLU(),
97
- nn.Linear(model_channels, 6 * model_channels, bias=True)
98
- )
99
-
100
- if pe_mode == "ape":
101
- pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
102
- coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
103
- coords = torch.stack(coords, dim=-1).reshape(-1, 3)
104
- pos_emb = pos_embedder(coords)
105
- self.register_buffer("pos_emb", pos_emb)
106
-
107
- self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
108
-
109
- self.blocks = nn.ModuleList([
110
- ModulatedTransformerCrossBlock(
111
- model_channels,
112
- cond_channels,
113
- num_heads=self.num_heads,
114
- mlp_ratio=self.mlp_ratio,
115
- attn_mode='full',
116
- use_checkpoint=self.use_checkpoint,
117
- use_rope=(pe_mode == "rope"),
118
- share_mod=share_mod,
119
- qk_rms_norm=self.qk_rms_norm,
120
- qk_rms_norm_cross=self.qk_rms_norm_cross,
121
- )
122
- for _ in range(num_blocks)
123
- ])
124
-
125
- self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
126
-
127
- self.initialize_weights()
128
- if use_fp16:
129
- self.convert_to_fp16()
130
-
131
- @property
132
- def device(self) -> torch.device:
133
- """
134
- Return the device of the model.
135
- """
136
- return next(self.parameters()).device
137
-
138
- def convert_to_fp16(self) -> None:
139
- """
140
- Convert the torso of the model to float16.
141
- """
142
- self.blocks.apply(convert_module_to_f16)
143
-
144
- def convert_to_fp32(self) -> None:
145
- """
146
- Convert the torso of the model to float32.
147
- """
148
- self.blocks.apply(convert_module_to_f32)
149
-
150
- def initialize_weights(self) -> None:
151
- # Initialize transformer layers:
152
- def _basic_init(module):
153
- if isinstance(module, nn.Linear):
154
- torch.nn.init.xavier_uniform_(module.weight)
155
- if module.bias is not None:
156
- nn.init.constant_(module.bias, 0)
157
- self.apply(_basic_init)
158
-
159
- # Initialize timestep embedding MLP:
160
- nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
161
- nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
162
-
163
- # Zero-out adaLN modulation layers in DiT blocks:
164
- if self.share_mod:
165
- nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
166
- nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
167
- else:
168
- for block in self.blocks:
169
- nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
170
- nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
171
-
172
- # Zero-out output layers:
173
- nn.init.constant_(self.out_layer.weight, 0)
174
- nn.init.constant_(self.out_layer.bias, 0)
175
-
176
- def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
177
- assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
178
- f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
179
-
180
- h = patchify(x, self.patch_size)
181
- h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
182
-
183
- h = self.input_layer(h)
184
- h = h + self.pos_emb[None]
185
- t_emb = self.t_embedder(t)
186
- if self.share_mod:
187
- t_emb = self.adaLN_modulation(t_emb)
188
- t_emb = t_emb.type(self.dtype)
189
- h = h.type(self.dtype)
190
- cond = cond.type(self.dtype)
191
- for block in self.blocks:
192
- h = block(h, t_emb, cond)
193
- h = h.type(x.dtype)
194
- h = F.layer_norm(h, h.shape[-1:])
195
- h = self.out_layer(h)
196
-
197
- h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
198
- h = unpatchify(h, self.patch_size).contiguous()
199
-
200
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/sparse_structure_vae.py DELETED
@@ -1,306 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from ..modules.norm import GroupNorm32, ChannelLayerNorm32
6
- from ..modules.spatial import pixel_shuffle_3d
7
- from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
8
-
9
-
10
- def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
11
- """
12
- Return a normalization layer.
13
- """
14
- if norm_type == "group":
15
- return GroupNorm32(32, *args, **kwargs)
16
- elif norm_type == "layer":
17
- return ChannelLayerNorm32(*args, **kwargs)
18
- else:
19
- raise ValueError(f"Invalid norm type {norm_type}")
20
-
21
-
22
- class ResBlock3d(nn.Module):
23
- def __init__(
24
- self,
25
- channels: int,
26
- out_channels: Optional[int] = None,
27
- norm_type: Literal["group", "layer"] = "layer",
28
- ):
29
- super().__init__()
30
- self.channels = channels
31
- self.out_channels = out_channels or channels
32
-
33
- self.norm1 = norm_layer(norm_type, channels)
34
- self.norm2 = norm_layer(norm_type, self.out_channels)
35
- self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
36
- self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
37
- self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
38
-
39
- def forward(self, x: torch.Tensor) -> torch.Tensor:
40
- h = self.norm1(x)
41
- h = F.silu(h)
42
- h = self.conv1(h)
43
- h = self.norm2(h)
44
- h = F.silu(h)
45
- h = self.conv2(h)
46
- h = h + self.skip_connection(x)
47
- return h
48
-
49
-
50
- class DownsampleBlock3d(nn.Module):
51
- def __init__(
52
- self,
53
- in_channels: int,
54
- out_channels: int,
55
- mode: Literal["conv", "avgpool"] = "conv",
56
- ):
57
- assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
58
-
59
- super().__init__()
60
- self.in_channels = in_channels
61
- self.out_channels = out_channels
62
-
63
- if mode == "conv":
64
- self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
65
- elif mode == "avgpool":
66
- assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
67
-
68
- def forward(self, x: torch.Tensor) -> torch.Tensor:
69
- if hasattr(self, "conv"):
70
- return self.conv(x)
71
- else:
72
- return F.avg_pool3d(x, 2)
73
-
74
-
75
- class UpsampleBlock3d(nn.Module):
76
- def __init__(
77
- self,
78
- in_channels: int,
79
- out_channels: int,
80
- mode: Literal["conv", "nearest"] = "conv",
81
- ):
82
- assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
83
-
84
- super().__init__()
85
- self.in_channels = in_channels
86
- self.out_channels = out_channels
87
-
88
- if mode == "conv":
89
- self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
90
- elif mode == "nearest":
91
- assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
92
-
93
- def forward(self, x: torch.Tensor) -> torch.Tensor:
94
- if hasattr(self, "conv"):
95
- x = self.conv(x)
96
- return pixel_shuffle_3d(x, 2)
97
- else:
98
- return F.interpolate(x, scale_factor=2, mode="nearest")
99
-
100
-
101
- class SparseStructureEncoder(nn.Module):
102
- """
103
- Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
104
-
105
- Args:
106
- in_channels (int): Channels of the input.
107
- latent_channels (int): Channels of the latent representation.
108
- num_res_blocks (int): Number of residual blocks at each resolution.
109
- channels (List[int]): Channels of the encoder blocks.
110
- num_res_blocks_middle (int): Number of residual blocks in the middle.
111
- norm_type (Literal["group", "layer"]): Type of normalization layer.
112
- use_fp16 (bool): Whether to use FP16.
113
- """
114
- def __init__(
115
- self,
116
- in_channels: int,
117
- latent_channels: int,
118
- num_res_blocks: int,
119
- channels: List[int],
120
- num_res_blocks_middle: int = 2,
121
- norm_type: Literal["group", "layer"] = "layer",
122
- use_fp16: bool = False,
123
- ):
124
- super().__init__()
125
- self.in_channels = in_channels
126
- self.latent_channels = latent_channels
127
- self.num_res_blocks = num_res_blocks
128
- self.channels = channels
129
- self.num_res_blocks_middle = num_res_blocks_middle
130
- self.norm_type = norm_type
131
- self.use_fp16 = use_fp16
132
- self.dtype = torch.float16 if use_fp16 else torch.float32
133
-
134
- self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
135
-
136
- self.blocks = nn.ModuleList([])
137
- for i, ch in enumerate(channels):
138
- self.blocks.extend([
139
- ResBlock3d(ch, ch)
140
- for _ in range(num_res_blocks)
141
- ])
142
- if i < len(channels) - 1:
143
- self.blocks.append(
144
- DownsampleBlock3d(ch, channels[i+1])
145
- )
146
-
147
- self.middle_block = nn.Sequential(*[
148
- ResBlock3d(channels[-1], channels[-1])
149
- for _ in range(num_res_blocks_middle)
150
- ])
151
-
152
- self.out_layer = nn.Sequential(
153
- norm_layer(norm_type, channels[-1]),
154
- nn.SiLU(),
155
- nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
156
- )
157
-
158
- if use_fp16:
159
- self.convert_to_fp16()
160
-
161
- @property
162
- def device(self) -> torch.device:
163
- """
164
- Return the device of the model.
165
- """
166
- return next(self.parameters()).device
167
-
168
- def convert_to_fp16(self) -> None:
169
- """
170
- Convert the torso of the model to float16.
171
- """
172
- self.use_fp16 = True
173
- self.dtype = torch.float16
174
- self.blocks.apply(convert_module_to_f16)
175
- self.middle_block.apply(convert_module_to_f16)
176
-
177
- def convert_to_fp32(self) -> None:
178
- """
179
- Convert the torso of the model to float32.
180
- """
181
- self.use_fp16 = False
182
- self.dtype = torch.float32
183
- self.blocks.apply(convert_module_to_f32)
184
- self.middle_block.apply(convert_module_to_f32)
185
-
186
- def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
187
- h = self.input_layer(x)
188
- h = h.type(self.dtype)
189
-
190
- for block in self.blocks:
191
- h = block(h)
192
- h = self.middle_block(h)
193
-
194
- h = h.type(x.dtype)
195
- h = self.out_layer(h)
196
-
197
- mean, logvar = h.chunk(2, dim=1)
198
-
199
- if sample_posterior:
200
- std = torch.exp(0.5 * logvar)
201
- z = mean + std * torch.randn_like(std)
202
- else:
203
- z = mean
204
-
205
- if return_raw:
206
- return z, mean, logvar
207
- return z
208
-
209
-
210
- class SparseStructureDecoder(nn.Module):
211
- """
212
- Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
213
-
214
- Args:
215
- out_channels (int): Channels of the output.
216
- latent_channels (int): Channels of the latent representation.
217
- num_res_blocks (int): Number of residual blocks at each resolution.
218
- channels (List[int]): Channels of the decoder blocks.
219
- num_res_blocks_middle (int): Number of residual blocks in the middle.
220
- norm_type (Literal["group", "layer"]): Type of normalization layer.
221
- use_fp16 (bool): Whether to use FP16.
222
- """
223
- def __init__(
224
- self,
225
- out_channels: int,
226
- latent_channels: int,
227
- num_res_blocks: int,
228
- channels: List[int],
229
- num_res_blocks_middle: int = 2,
230
- norm_type: Literal["group", "layer"] = "layer",
231
- use_fp16: bool = False,
232
- ):
233
- super().__init__()
234
- self.out_channels = out_channels
235
- self.latent_channels = latent_channels
236
- self.num_res_blocks = num_res_blocks
237
- self.channels = channels
238
- self.num_res_blocks_middle = num_res_blocks_middle
239
- self.norm_type = norm_type
240
- self.use_fp16 = use_fp16
241
- self.dtype = torch.float16 if use_fp16 else torch.float32
242
-
243
- self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
244
-
245
- self.middle_block = nn.Sequential(*[
246
- ResBlock3d(channels[0], channels[0])
247
- for _ in range(num_res_blocks_middle)
248
- ])
249
-
250
- self.blocks = nn.ModuleList([])
251
- for i, ch in enumerate(channels):
252
- self.blocks.extend([
253
- ResBlock3d(ch, ch)
254
- for _ in range(num_res_blocks)
255
- ])
256
- if i < len(channels) - 1:
257
- self.blocks.append(
258
- UpsampleBlock3d(ch, channels[i+1])
259
- )
260
-
261
- self.out_layer = nn.Sequential(
262
- norm_layer(norm_type, channels[-1]),
263
- nn.SiLU(),
264
- nn.Conv3d(channels[-1], out_channels, 3, padding=1)
265
- )
266
-
267
- if use_fp16:
268
- self.convert_to_fp16()
269
-
270
- @property
271
- def device(self) -> torch.device:
272
- """
273
- Return the device of the model.
274
- """
275
- return next(self.parameters()).device
276
-
277
- def convert_to_fp16(self) -> None:
278
- """
279
- Convert the torso of the model to float16.
280
- """
281
- self.use_fp16 = True
282
- self.dtype = torch.float16
283
- self.blocks.apply(convert_module_to_f16)
284
- self.middle_block.apply(convert_module_to_f16)
285
-
286
- def convert_to_fp32(self) -> None:
287
- """
288
- Convert the torso of the model to float32.
289
- """
290
- self.use_fp16 = False
291
- self.dtype = torch.float32
292
- self.blocks.apply(convert_module_to_f32)
293
- self.middle_block.apply(convert_module_to_f32)
294
-
295
- def forward(self, x: torch.Tensor) -> torch.Tensor:
296
- h = self.input_layer(x)
297
-
298
- h = h.type(self.dtype)
299
-
300
- h = self.middle_block(h)
301
- for block in self.blocks:
302
- h = block(h)
303
-
304
- h = h.type(x.dtype)
305
- h = self.out_layer(h)
306
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_flow.py DELETED
@@ -1,262 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- import numpy as np
6
- from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
- from ..modules.transformer import AbsolutePositionEmbedder
8
- from ..modules.norm import LayerNorm32
9
- from ..modules import sparse as sp
10
- from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
11
- from .sparse_structure_flow import TimestepEmbedder
12
-
13
-
14
- class SparseResBlock3d(nn.Module):
15
- def __init__(
16
- self,
17
- channels: int,
18
- emb_channels: int,
19
- out_channels: Optional[int] = None,
20
- downsample: bool = False,
21
- upsample: bool = False,
22
- ):
23
- super().__init__()
24
- self.channels = channels
25
- self.emb_channels = emb_channels
26
- self.out_channels = out_channels or channels
27
- self.downsample = downsample
28
- self.upsample = upsample
29
-
30
- assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
-
32
- self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
- self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
- self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
- self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
- self.emb_layers = nn.Sequential(
37
- nn.SiLU(),
38
- nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
- )
40
- self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
- self.updown = None
42
- if self.downsample:
43
- self.updown = sp.SparseDownsample(2)
44
- elif self.upsample:
45
- self.updown = sp.SparseUpsample(2)
46
-
47
- def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
- if self.updown is not None:
49
- x = self.updown(x)
50
- return x
51
-
52
- def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
- emb_out = self.emb_layers(emb).type(x.dtype)
54
- scale, shift = torch.chunk(emb_out, 2, dim=1)
55
-
56
- x = self._updown(x)
57
- h = x.replace(self.norm1(x.feats))
58
- h = h.replace(F.silu(h.feats))
59
- h = self.conv1(h)
60
- h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
61
- h = h.replace(F.silu(h.feats))
62
- h = self.conv2(h)
63
- h = h + self.skip_connection(x)
64
-
65
- return h
66
-
67
-
68
- class SLatFlowModel(nn.Module):
69
- def __init__(
70
- self,
71
- resolution: int,
72
- in_channels: int,
73
- model_channels: int,
74
- cond_channels: int,
75
- out_channels: int,
76
- num_blocks: int,
77
- num_heads: Optional[int] = None,
78
- num_head_channels: Optional[int] = 64,
79
- mlp_ratio: float = 4,
80
- patch_size: int = 2,
81
- num_io_res_blocks: int = 2,
82
- io_block_channels: List[int] = None,
83
- pe_mode: Literal["ape", "rope"] = "ape",
84
- use_fp16: bool = False,
85
- use_checkpoint: bool = False,
86
- use_skip_connection: bool = True,
87
- share_mod: bool = False,
88
- qk_rms_norm: bool = False,
89
- qk_rms_norm_cross: bool = False,
90
- ):
91
- super().__init__()
92
- self.resolution = resolution
93
- self.in_channels = in_channels
94
- self.model_channels = model_channels
95
- self.cond_channels = cond_channels
96
- self.out_channels = out_channels
97
- self.num_blocks = num_blocks
98
- self.num_heads = num_heads or model_channels // num_head_channels
99
- self.mlp_ratio = mlp_ratio
100
- self.patch_size = patch_size
101
- self.num_io_res_blocks = num_io_res_blocks
102
- self.io_block_channels = io_block_channels
103
- self.pe_mode = pe_mode
104
- self.use_fp16 = use_fp16
105
- self.use_checkpoint = use_checkpoint
106
- self.use_skip_connection = use_skip_connection
107
- self.share_mod = share_mod
108
- self.qk_rms_norm = qk_rms_norm
109
- self.qk_rms_norm_cross = qk_rms_norm_cross
110
- self.dtype = torch.float16 if use_fp16 else torch.float32
111
-
112
- assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
113
- assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
114
-
115
- self.t_embedder = TimestepEmbedder(model_channels)
116
- if share_mod:
117
- self.adaLN_modulation = nn.Sequential(
118
- nn.SiLU(),
119
- nn.Linear(model_channels, 6 * model_channels, bias=True)
120
- )
121
-
122
- if pe_mode == "ape":
123
- self.pos_embedder = AbsolutePositionEmbedder(model_channels)
124
-
125
- self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
126
- self.input_blocks = nn.ModuleList([])
127
- for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
128
- self.input_blocks.extend([
129
- SparseResBlock3d(
130
- chs,
131
- model_channels,
132
- out_channels=chs,
133
- )
134
- for _ in range(num_io_res_blocks-1)
135
- ])
136
- self.input_blocks.append(
137
- SparseResBlock3d(
138
- chs,
139
- model_channels,
140
- out_channels=next_chs,
141
- downsample=True,
142
- )
143
- )
144
-
145
- self.blocks = nn.ModuleList([
146
- ModulatedSparseTransformerCrossBlock(
147
- model_channels,
148
- cond_channels,
149
- num_heads=self.num_heads,
150
- mlp_ratio=self.mlp_ratio,
151
- attn_mode='full',
152
- use_checkpoint=self.use_checkpoint,
153
- use_rope=(pe_mode == "rope"),
154
- share_mod=self.share_mod,
155
- qk_rms_norm=self.qk_rms_norm,
156
- qk_rms_norm_cross=self.qk_rms_norm_cross,
157
- )
158
- for _ in range(num_blocks)
159
- ])
160
-
161
- self.out_blocks = nn.ModuleList([])
162
- for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
163
- self.out_blocks.append(
164
- SparseResBlock3d(
165
- prev_chs * 2 if self.use_skip_connection else prev_chs,
166
- model_channels,
167
- out_channels=chs,
168
- upsample=True,
169
- )
170
- )
171
- self.out_blocks.extend([
172
- SparseResBlock3d(
173
- chs * 2 if self.use_skip_connection else chs,
174
- model_channels,
175
- out_channels=chs,
176
- )
177
- for _ in range(num_io_res_blocks-1)
178
- ])
179
- self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
180
-
181
- self.initialize_weights()
182
- if use_fp16:
183
- self.convert_to_fp16()
184
-
185
- @property
186
- def device(self) -> torch.device:
187
- """
188
- Return the device of the model.
189
- """
190
- return next(self.parameters()).device
191
-
192
- def convert_to_fp16(self) -> None:
193
- """
194
- Convert the torso of the model to float16.
195
- """
196
- self.input_blocks.apply(convert_module_to_f16)
197
- self.blocks.apply(convert_module_to_f16)
198
- self.out_blocks.apply(convert_module_to_f16)
199
-
200
- def convert_to_fp32(self) -> None:
201
- """
202
- Convert the torso of the model to float32.
203
- """
204
- self.input_blocks.apply(convert_module_to_f32)
205
- self.blocks.apply(convert_module_to_f32)
206
- self.out_blocks.apply(convert_module_to_f32)
207
-
208
- def initialize_weights(self) -> None:
209
- # Initialize transformer layers:
210
- def _basic_init(module):
211
- if isinstance(module, nn.Linear):
212
- torch.nn.init.xavier_uniform_(module.weight)
213
- if module.bias is not None:
214
- nn.init.constant_(module.bias, 0)
215
- self.apply(_basic_init)
216
-
217
- # Initialize timestep embedding MLP:
218
- nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
219
- nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
220
-
221
- # Zero-out adaLN modulation layers in DiT blocks:
222
- if self.share_mod:
223
- nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
224
- nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
225
- else:
226
- for block in self.blocks:
227
- nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
228
- nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
229
-
230
- # Zero-out output layers:
231
- nn.init.constant_(self.out_layer.weight, 0)
232
- nn.init.constant_(self.out_layer.bias, 0)
233
-
234
- def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
235
- h = self.input_layer(x).type(self.dtype)
236
- t_emb = self.t_embedder(t)
237
- if self.share_mod:
238
- t_emb = self.adaLN_modulation(t_emb)
239
- t_emb = t_emb.type(self.dtype)
240
- cond = cond.type(self.dtype)
241
-
242
- skips = []
243
- # pack with input blocks
244
- for block in self.input_blocks:
245
- h = block(h, t_emb)
246
- skips.append(h.feats)
247
-
248
- if self.pe_mode == "ape":
249
- h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
250
- for block in self.blocks:
251
- h = block(h, t_emb, cond)
252
-
253
- # unpack with output blocks
254
- for block, skip in zip(self.out_blocks, reversed(skips)):
255
- if self.use_skip_connection:
256
- h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
257
- else:
258
- h = block(h, t_emb)
259
-
260
- h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
261
- h = self.out_layer(h.type(x.dtype))
262
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_vae/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .encoder import SLatEncoder
2
- from .decoder_gs import SLatGaussianDecoder
3
- from .decoder_rf import SLatRadianceFieldDecoder
4
- from .decoder_mesh import SLatMeshDecoder
 
 
 
 
 
trellis/models/structured_latent_vae/base.py DELETED
@@ -1,117 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from ...modules.utils import convert_module_to_f16, convert_module_to_f32
5
- from ...modules import sparse as sp
6
- from ...modules.transformer import AbsolutePositionEmbedder
7
- from ...modules.sparse.transformer import SparseTransformerBlock
8
-
9
-
10
- def block_attn_config(self):
11
- """
12
- Return the attention configuration of the model.
13
- """
14
- for i in range(self.num_blocks):
15
- if self.attn_mode == "shift_window":
16
- yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
17
- elif self.attn_mode == "shift_sequence":
18
- yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
19
- elif self.attn_mode == "shift_order":
20
- yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
21
- elif self.attn_mode == "full":
22
- yield "full", None, None, None, None
23
- elif self.attn_mode == "swin":
24
- yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
25
-
26
-
27
- class SparseTransformerBase(nn.Module):
28
- """
29
- Sparse Transformer without output layers.
30
- Serve as the base class for encoder and decoder.
31
- """
32
- def __init__(
33
- self,
34
- in_channels: int,
35
- model_channels: int,
36
- num_blocks: int,
37
- num_heads: Optional[int] = None,
38
- num_head_channels: Optional[int] = 64,
39
- mlp_ratio: float = 4.0,
40
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
41
- window_size: Optional[int] = None,
42
- pe_mode: Literal["ape", "rope"] = "ape",
43
- use_fp16: bool = False,
44
- use_checkpoint: bool = False,
45
- qk_rms_norm: bool = False,
46
- ):
47
- super().__init__()
48
- self.in_channels = in_channels
49
- self.model_channels = model_channels
50
- self.num_blocks = num_blocks
51
- self.window_size = window_size
52
- self.num_heads = num_heads or model_channels // num_head_channels
53
- self.mlp_ratio = mlp_ratio
54
- self.attn_mode = attn_mode
55
- self.pe_mode = pe_mode
56
- self.use_fp16 = use_fp16
57
- self.use_checkpoint = use_checkpoint
58
- self.qk_rms_norm = qk_rms_norm
59
- self.dtype = torch.float16 if use_fp16 else torch.float32
60
-
61
- if pe_mode == "ape":
62
- self.pos_embedder = AbsolutePositionEmbedder(model_channels)
63
-
64
- self.input_layer = sp.SparseLinear(in_channels, model_channels)
65
- self.blocks = nn.ModuleList([
66
- SparseTransformerBlock(
67
- model_channels,
68
- num_heads=self.num_heads,
69
- mlp_ratio=self.mlp_ratio,
70
- attn_mode=attn_mode,
71
- window_size=window_size,
72
- shift_sequence=shift_sequence,
73
- shift_window=shift_window,
74
- serialize_mode=serialize_mode,
75
- use_checkpoint=self.use_checkpoint,
76
- use_rope=(pe_mode == "rope"),
77
- qk_rms_norm=self.qk_rms_norm,
78
- )
79
- for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
80
- ])
81
-
82
- @property
83
- def device(self) -> torch.device:
84
- """
85
- Return the device of the model.
86
- """
87
- return next(self.parameters()).device
88
-
89
- def convert_to_fp16(self) -> None:
90
- """
91
- Convert the torso of the model to float16.
92
- """
93
- self.blocks.apply(convert_module_to_f16)
94
-
95
- def convert_to_fp32(self) -> None:
96
- """
97
- Convert the torso of the model to float32.
98
- """
99
- self.blocks.apply(convert_module_to_f32)
100
-
101
- def initialize_weights(self) -> None:
102
- # Initialize transformer layers:
103
- def _basic_init(module):
104
- if isinstance(module, nn.Linear):
105
- torch.nn.init.xavier_uniform_(module.weight)
106
- if module.bias is not None:
107
- nn.init.constant_(module.bias, 0)
108
- self.apply(_basic_init)
109
-
110
- def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
111
- h = self.input_layer(x)
112
- if self.pe_mode == "ape":
113
- h = h + self.pos_embedder(x.coords[:, 1:])
114
- h = h.type(self.dtype)
115
- for block in self.blocks:
116
- h = block(h)
117
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_vae/decoder_gs.py DELETED
@@ -1,122 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from ...modules import sparse as sp
6
- from ...utils.random_utils import hammersley_sequence
7
- from .base import SparseTransformerBase
8
- from ...representations import Gaussian
9
-
10
-
11
- class SLatGaussianDecoder(SparseTransformerBase):
12
- def __init__(
13
- self,
14
- resolution: int,
15
- model_channels: int,
16
- latent_channels: int,
17
- num_blocks: int,
18
- num_heads: Optional[int] = None,
19
- num_head_channels: Optional[int] = 64,
20
- mlp_ratio: float = 4,
21
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
- window_size: int = 8,
23
- pe_mode: Literal["ape", "rope"] = "ape",
24
- use_fp16: bool = False,
25
- use_checkpoint: bool = False,
26
- qk_rms_norm: bool = False,
27
- representation_config: dict = None,
28
- ):
29
- super().__init__(
30
- in_channels=latent_channels,
31
- model_channels=model_channels,
32
- num_blocks=num_blocks,
33
- num_heads=num_heads,
34
- num_head_channels=num_head_channels,
35
- mlp_ratio=mlp_ratio,
36
- attn_mode=attn_mode,
37
- window_size=window_size,
38
- pe_mode=pe_mode,
39
- use_fp16=use_fp16,
40
- use_checkpoint=use_checkpoint,
41
- qk_rms_norm=qk_rms_norm,
42
- )
43
- self.resolution = resolution
44
- self.rep_config = representation_config
45
- self._calc_layout()
46
- self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
- self._build_perturbation()
48
-
49
- self.initialize_weights()
50
- if use_fp16:
51
- self.convert_to_fp16()
52
-
53
- def initialize_weights(self) -> None:
54
- super().initialize_weights()
55
- # Zero-out output layers:
56
- nn.init.constant_(self.out_layer.weight, 0)
57
- nn.init.constant_(self.out_layer.bias, 0)
58
-
59
- def _build_perturbation(self) -> None:
60
- perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
61
- perturbation = torch.tensor(perturbation).float() * 2 - 1
62
- perturbation = perturbation / self.rep_config['voxel_size']
63
- perturbation = torch.atanh(perturbation).to(self.device)
64
- self.register_buffer('offset_perturbation', perturbation)
65
-
66
- def _calc_layout(self) -> None:
67
- self.layout = {
68
- '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
69
- '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
70
- '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
71
- '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
72
- '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
73
- }
74
- start = 0
75
- for k, v in self.layout.items():
76
- v['range'] = (start, start + v['size'])
77
- start += v['size']
78
- self.out_channels = start
79
-
80
- def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
81
- """
82
- Convert a batch of network outputs to 3D representations.
83
-
84
- Args:
85
- x: The [N x * x C] sparse tensor output by the network.
86
-
87
- Returns:
88
- list of representations
89
- """
90
- ret = []
91
- for i in range(x.shape[0]):
92
- representation = Gaussian(
93
- sh_degree=0,
94
- aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
95
- mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
96
- scaling_bias = self.rep_config['scaling_bias'],
97
- opacity_bias = self.rep_config['opacity_bias'],
98
- scaling_activation = self.rep_config['scaling_activation']
99
- )
100
- xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
101
- for k, v in self.layout.items():
102
- if k == '_xyz':
103
- offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
104
- offset = offset * self.rep_config['lr'][k]
105
- if self.rep_config['perturb_offset']:
106
- offset = offset + self.offset_perturbation
107
- offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
108
- _xyz = xyz.unsqueeze(1) + offset
109
- setattr(representation, k, _xyz.flatten(0, 1))
110
- else:
111
- feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
112
- feats = feats * self.rep_config['lr'][k]
113
- setattr(representation, k, feats)
114
- ret.append(representation)
115
- return ret
116
-
117
- def forward(self, x: sp.SparseTensor) -> List[Gaussian]:
118
- h = super().forward(x)
119
- h = h.type(x.dtype)
120
- h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
121
- h = self.out_layer(h)
122
- return self.to_representation(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_vae/decoder_mesh.py DELETED
@@ -1,167 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- import numpy as np
6
- from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
- from ...modules import sparse as sp
8
- from .base import SparseTransformerBase
9
- from ...representations import MeshExtractResult
10
- from ...representations.mesh import SparseFeatures2Mesh
11
-
12
-
13
- class SparseSubdivideBlock3d(nn.Module):
14
- """
15
- A 3D subdivide block that can subdivide the sparse tensor.
16
-
17
- Args:
18
- channels: channels in the inputs and outputs.
19
- out_channels: if specified, the number of output channels.
20
- num_groups: the number of groups for the group norm.
21
- """
22
- def __init__(
23
- self,
24
- channels: int,
25
- resolution: int,
26
- out_channels: Optional[int] = None,
27
- num_groups: int = 32
28
- ):
29
- super().__init__()
30
- self.channels = channels
31
- self.resolution = resolution
32
- self.out_resolution = resolution * 2
33
- self.out_channels = out_channels or channels
34
-
35
- self.act_layers = nn.Sequential(
36
- sp.SparseGroupNorm32(num_groups, channels),
37
- sp.SparseSiLU()
38
- )
39
-
40
- self.sub = sp.SparseSubdivide()
41
-
42
- self.out_layers = nn.Sequential(
43
- sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
44
- sp.SparseGroupNorm32(num_groups, self.out_channels),
45
- sp.SparseSiLU(),
46
- zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
47
- )
48
-
49
- if self.out_channels == channels:
50
- self.skip_connection = nn.Identity()
51
- else:
52
- self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
53
-
54
- def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
55
- """
56
- Apply the block to a Tensor, conditioned on a timestep embedding.
57
-
58
- Args:
59
- x: an [N x C x ...] Tensor of features.
60
- Returns:
61
- an [N x C x ...] Tensor of outputs.
62
- """
63
- h = self.act_layers(x)
64
- h = self.sub(h)
65
- x = self.sub(x)
66
- h = self.out_layers(h)
67
- h = h + self.skip_connection(x)
68
- return h
69
-
70
-
71
- class SLatMeshDecoder(SparseTransformerBase):
72
- def __init__(
73
- self,
74
- resolution: int,
75
- model_channels: int,
76
- latent_channels: int,
77
- num_blocks: int,
78
- num_heads: Optional[int] = None,
79
- num_head_channels: Optional[int] = 64,
80
- mlp_ratio: float = 4,
81
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
82
- window_size: int = 8,
83
- pe_mode: Literal["ape", "rope"] = "ape",
84
- use_fp16: bool = False,
85
- use_checkpoint: bool = False,
86
- qk_rms_norm: bool = False,
87
- representation_config: dict = None,
88
- ):
89
- super().__init__(
90
- in_channels=latent_channels,
91
- model_channels=model_channels,
92
- num_blocks=num_blocks,
93
- num_heads=num_heads,
94
- num_head_channels=num_head_channels,
95
- mlp_ratio=mlp_ratio,
96
- attn_mode=attn_mode,
97
- window_size=window_size,
98
- pe_mode=pe_mode,
99
- use_fp16=use_fp16,
100
- use_checkpoint=use_checkpoint,
101
- qk_rms_norm=qk_rms_norm,
102
- )
103
- self.resolution = resolution
104
- self.rep_config = representation_config
105
- self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
106
- self.out_channels = self.mesh_extractor.feats_channels
107
- self.upsample = nn.ModuleList([
108
- SparseSubdivideBlock3d(
109
- channels=model_channels,
110
- resolution=resolution,
111
- out_channels=model_channels // 4
112
- ),
113
- SparseSubdivideBlock3d(
114
- channels=model_channels // 4,
115
- resolution=resolution * 2,
116
- out_channels=model_channels // 8
117
- )
118
- ])
119
- self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
120
-
121
- self.initialize_weights()
122
- if use_fp16:
123
- self.convert_to_fp16()
124
-
125
- def initialize_weights(self) -> None:
126
- super().initialize_weights()
127
- # Zero-out output layers:
128
- nn.init.constant_(self.out_layer.weight, 0)
129
- nn.init.constant_(self.out_layer.bias, 0)
130
-
131
- def convert_to_fp16(self) -> None:
132
- """
133
- Convert the torso of the model to float16.
134
- """
135
- super().convert_to_fp16()
136
- self.upsample.apply(convert_module_to_f16)
137
-
138
- def convert_to_fp32(self) -> None:
139
- """
140
- Convert the torso of the model to float32.
141
- """
142
- super().convert_to_fp32()
143
- self.upsample.apply(convert_module_to_f32)
144
-
145
- def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
146
- """
147
- Convert a batch of network outputs to 3D representations.
148
-
149
- Args:
150
- x: The [N x * x C] sparse tensor output by the network.
151
-
152
- Returns:
153
- list of representations
154
- """
155
- ret = []
156
- for i in range(x.shape[0]):
157
- mesh = self.mesh_extractor(x[i], training=self.training)
158
- ret.append(mesh)
159
- return ret
160
-
161
- def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
162
- h = super().forward(x)
163
- for block in self.upsample:
164
- h = block(h)
165
- h = h.type(x.dtype)
166
- h = self.out_layer(h)
167
- return self.to_representation(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_vae/decoder_rf.py DELETED
@@ -1,104 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- import numpy as np
6
- from ...modules import sparse as sp
7
- from .base import SparseTransformerBase
8
- from ...representations import Strivec
9
-
10
-
11
- class SLatRadianceFieldDecoder(SparseTransformerBase):
12
- def __init__(
13
- self,
14
- resolution: int,
15
- model_channels: int,
16
- latent_channels: int,
17
- num_blocks: int,
18
- num_heads: Optional[int] = None,
19
- num_head_channels: Optional[int] = 64,
20
- mlp_ratio: float = 4,
21
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
- window_size: int = 8,
23
- pe_mode: Literal["ape", "rope"] = "ape",
24
- use_fp16: bool = False,
25
- use_checkpoint: bool = False,
26
- qk_rms_norm: bool = False,
27
- representation_config: dict = None,
28
- ):
29
- super().__init__(
30
- in_channels=latent_channels,
31
- model_channels=model_channels,
32
- num_blocks=num_blocks,
33
- num_heads=num_heads,
34
- num_head_channels=num_head_channels,
35
- mlp_ratio=mlp_ratio,
36
- attn_mode=attn_mode,
37
- window_size=window_size,
38
- pe_mode=pe_mode,
39
- use_fp16=use_fp16,
40
- use_checkpoint=use_checkpoint,
41
- qk_rms_norm=qk_rms_norm,
42
- )
43
- self.resolution = resolution
44
- self.rep_config = representation_config
45
- self._calc_layout()
46
- self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
-
48
- self.initialize_weights()
49
- if use_fp16:
50
- self.convert_to_fp16()
51
-
52
- def initialize_weights(self) -> None:
53
- super().initialize_weights()
54
- # Zero-out output layers:
55
- nn.init.constant_(self.out_layer.weight, 0)
56
- nn.init.constant_(self.out_layer.bias, 0)
57
-
58
- def _calc_layout(self) -> None:
59
- self.layout = {
60
- 'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']},
61
- 'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']},
62
- 'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3},
63
- }
64
- start = 0
65
- for k, v in self.layout.items():
66
- v['range'] = (start, start + v['size'])
67
- start += v['size']
68
- self.out_channels = start
69
-
70
- def to_representation(self, x: sp.SparseTensor) -> List[Strivec]:
71
- """
72
- Convert a batch of network outputs to 3D representations.
73
-
74
- Args:
75
- x: The [N x * x C] sparse tensor output by the network.
76
-
77
- Returns:
78
- list of representations
79
- """
80
- ret = []
81
- for i in range(x.shape[0]):
82
- representation = Strivec(
83
- sh_degree=0,
84
- resolution=self.resolution,
85
- aabb=[-0.5, -0.5, -0.5, 1, 1, 1],
86
- rank=self.rep_config['rank'],
87
- dim=self.rep_config['dim'],
88
- device='cuda',
89
- )
90
- representation.density_shift = 0.0
91
- representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
92
- representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda')
93
- for k, v in self.layout.items():
94
- setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']))
95
- representation.trivec = representation.trivec + 1
96
- ret.append(representation)
97
- return ret
98
-
99
- def forward(self, x: sp.SparseTensor) -> List[Strivec]:
100
- h = super().forward(x)
101
- h = h.type(x.dtype)
102
- h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
103
- h = self.out_layer(h)
104
- return self.to_representation(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/models/structured_latent_vae/encoder.py DELETED
@@ -1,72 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from ...modules import sparse as sp
6
- from .base import SparseTransformerBase
7
-
8
-
9
- class SLatEncoder(SparseTransformerBase):
10
- def __init__(
11
- self,
12
- resolution: int,
13
- in_channels: int,
14
- model_channels: int,
15
- latent_channels: int,
16
- num_blocks: int,
17
- num_heads: Optional[int] = None,
18
- num_head_channels: Optional[int] = 64,
19
- mlp_ratio: float = 4,
20
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
21
- window_size: int = 8,
22
- pe_mode: Literal["ape", "rope"] = "ape",
23
- use_fp16: bool = False,
24
- use_checkpoint: bool = False,
25
- qk_rms_norm: bool = False,
26
- ):
27
- super().__init__(
28
- in_channels=in_channels,
29
- model_channels=model_channels,
30
- num_blocks=num_blocks,
31
- num_heads=num_heads,
32
- num_head_channels=num_head_channels,
33
- mlp_ratio=mlp_ratio,
34
- attn_mode=attn_mode,
35
- window_size=window_size,
36
- pe_mode=pe_mode,
37
- use_fp16=use_fp16,
38
- use_checkpoint=use_checkpoint,
39
- qk_rms_norm=qk_rms_norm,
40
- )
41
- self.resolution = resolution
42
- self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
43
-
44
- self.initialize_weights()
45
- if use_fp16:
46
- self.convert_to_fp16()
47
-
48
- def initialize_weights(self) -> None:
49
- super().initialize_weights()
50
- # Zero-out output layers:
51
- nn.init.constant_(self.out_layer.weight, 0)
52
- nn.init.constant_(self.out_layer.bias, 0)
53
-
54
- def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
55
- h = super().forward(x)
56
- h = h.type(x.dtype)
57
- h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
58
- h = self.out_layer(h)
59
-
60
- # Sample from the posterior distribution
61
- mean, logvar = h.feats.chunk(2, dim=-1)
62
- if sample_posterior:
63
- std = torch.exp(0.5 * logvar)
64
- z = mean + std * torch.randn_like(std)
65
- else:
66
- z = mean
67
- z = h.replace(z)
68
-
69
- if return_raw:
70
- return z, mean, logvar
71
- else:
72
- return z
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/attention/__init__.py DELETED
@@ -1,36 +0,0 @@
1
- from typing import *
2
-
3
- BACKEND = 'flash_attn'
4
- DEBUG = False
5
-
6
- def __from_env():
7
- import os
8
-
9
- global BACKEND
10
- global DEBUG
11
-
12
- env_attn_backend = os.environ.get('ATTN_BACKEND')
13
- env_sttn_debug = os.environ.get('ATTN_DEBUG')
14
-
15
- if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
16
- BACKEND = env_attn_backend
17
- if env_sttn_debug is not None:
18
- DEBUG = env_sttn_debug == '1'
19
-
20
- print(f"[ATTENTION] Using backend: {BACKEND}")
21
-
22
-
23
- __from_env()
24
-
25
-
26
- def set_backend(backend: Literal['xformers', 'flash_attn']):
27
- global BACKEND
28
- BACKEND = backend
29
-
30
- def set_debug(debug: bool):
31
- global DEBUG
32
- DEBUG = debug
33
-
34
-
35
- from .full_attn import *
36
- from .modules import *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/attention/full_attn.py DELETED
@@ -1,140 +0,0 @@
1
- from typing import *
2
- import torch
3
- import math
4
- from . import DEBUG, BACKEND
5
-
6
- if BACKEND == 'xformers':
7
- import xformers.ops as xops
8
- elif BACKEND == 'flash_attn':
9
- import flash_attn
10
- elif BACKEND == 'sdpa':
11
- from torch.nn.functional import scaled_dot_product_attention as sdpa
12
- elif BACKEND == 'naive':
13
- pass
14
- else:
15
- raise ValueError(f"Unknown attention backend: {BACKEND}")
16
-
17
-
18
- __all__ = [
19
- 'scaled_dot_product_attention',
20
- ]
21
-
22
-
23
- def _naive_sdpa(q, k, v):
24
- """
25
- Naive implementation of scaled dot product attention.
26
- """
27
- q = q.permute(0, 2, 1, 3) # [N, H, L, C]
28
- k = k.permute(0, 2, 1, 3) # [N, H, L, C]
29
- v = v.permute(0, 2, 1, 3) # [N, H, L, C]
30
- scale_factor = 1 / math.sqrt(q.size(-1))
31
- attn_weight = q @ k.transpose(-2, -1) * scale_factor
32
- attn_weight = torch.softmax(attn_weight, dim=-1)
33
- out = attn_weight @ v
34
- out = out.permute(0, 2, 1, 3) # [N, L, H, C]
35
- return out
36
-
37
-
38
- @overload
39
- def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
40
- """
41
- Apply scaled dot product attention.
42
-
43
- Args:
44
- qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
45
- """
46
- ...
47
-
48
- @overload
49
- def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
50
- """
51
- Apply scaled dot product attention.
52
-
53
- Args:
54
- q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
55
- kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
56
- """
57
- ...
58
-
59
- @overload
60
- def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
61
- """
62
- Apply scaled dot product attention.
63
-
64
- Args:
65
- q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
66
- k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
67
- v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
68
-
69
- Note:
70
- k and v are assumed to have the same coordinate map.
71
- """
72
- ...
73
-
74
- def scaled_dot_product_attention(*args, **kwargs):
75
- arg_names_dict = {
76
- 1: ['qkv'],
77
- 2: ['q', 'kv'],
78
- 3: ['q', 'k', 'v']
79
- }
80
- num_all_args = len(args) + len(kwargs)
81
- assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
82
- for key in arg_names_dict[num_all_args][len(args):]:
83
- assert key in kwargs, f"Missing argument {key}"
84
-
85
- if num_all_args == 1:
86
- qkv = args[0] if len(args) > 0 else kwargs['qkv']
87
- assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
88
- device = qkv.device
89
-
90
- elif num_all_args == 2:
91
- q = args[0] if len(args) > 0 else kwargs['q']
92
- kv = args[1] if len(args) > 1 else kwargs['kv']
93
- assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
94
- assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
95
- assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
96
- device = q.device
97
-
98
- elif num_all_args == 3:
99
- q = args[0] if len(args) > 0 else kwargs['q']
100
- k = args[1] if len(args) > 1 else kwargs['k']
101
- v = args[2] if len(args) > 2 else kwargs['v']
102
- assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
103
- assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
104
- assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
105
- assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
106
- device = q.device
107
-
108
- if BACKEND == 'xformers':
109
- if num_all_args == 1:
110
- q, k, v = qkv.unbind(dim=2)
111
- elif num_all_args == 2:
112
- k, v = kv.unbind(dim=2)
113
- out = xops.memory_efficient_attention(q, k, v)
114
- elif BACKEND == 'flash_attn':
115
- if num_all_args == 1:
116
- out = flash_attn.flash_attn_qkvpacked_func(qkv)
117
- elif num_all_args == 2:
118
- out = flash_attn.flash_attn_kvpacked_func(q, kv)
119
- elif num_all_args == 3:
120
- out = flash_attn.flash_attn_func(q, k, v)
121
- elif BACKEND == 'sdpa':
122
- if num_all_args == 1:
123
- q, k, v = qkv.unbind(dim=2)
124
- elif num_all_args == 2:
125
- k, v = kv.unbind(dim=2)
126
- q = q.permute(0, 2, 1, 3) # [N, H, L, C]
127
- k = k.permute(0, 2, 1, 3) # [N, H, L, C]
128
- v = v.permute(0, 2, 1, 3) # [N, H, L, C]
129
- out = sdpa(q, k, v) # [N, H, L, C]
130
- out = out.permute(0, 2, 1, 3) # [N, L, H, C]
131
- elif BACKEND == 'naive':
132
- if num_all_args == 1:
133
- q, k, v = qkv.unbind(dim=2)
134
- elif num_all_args == 2:
135
- k, v = kv.unbind(dim=2)
136
- out = _naive_sdpa(q, k, v)
137
- else:
138
- raise ValueError(f"Unknown attention module: {BACKEND}")
139
-
140
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/attention/modules.py DELETED
@@ -1,146 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from .full_attn import scaled_dot_product_attention
6
-
7
-
8
- class MultiHeadRMSNorm(nn.Module):
9
- def __init__(self, dim: int, heads: int):
10
- super().__init__()
11
- self.scale = dim ** 0.5
12
- self.gamma = nn.Parameter(torch.ones(heads, dim))
13
-
14
- def forward(self, x: torch.Tensor) -> torch.Tensor:
15
- return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
16
-
17
-
18
- class RotaryPositionEmbedder(nn.Module):
19
- def __init__(self, hidden_size: int, in_channels: int = 3):
20
- super().__init__()
21
- assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
22
- self.hidden_size = hidden_size
23
- self.in_channels = in_channels
24
- self.freq_dim = hidden_size // in_channels // 2
25
- self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
26
- self.freqs = 1.0 / (10000 ** self.freqs)
27
-
28
- def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
29
- self.freqs = self.freqs.to(indices.device)
30
- phases = torch.outer(indices, self.freqs)
31
- phases = torch.polar(torch.ones_like(phases), phases)
32
- return phases
33
-
34
- def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
35
- x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
36
- x_rotated = x_complex * phases
37
- x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
38
- return x_embed
39
-
40
- def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
41
- """
42
- Args:
43
- q (sp.SparseTensor): [..., N, D] tensor of queries
44
- k (sp.SparseTensor): [..., N, D] tensor of keys
45
- indices (torch.Tensor): [..., N, C] tensor of spatial positions
46
- """
47
- if indices is None:
48
- indices = torch.arange(q.shape[-2], device=q.device)
49
- if len(q.shape) > 2:
50
- indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
51
-
52
- phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
53
- if phases.shape[1] < self.hidden_size // 2:
54
- phases = torch.cat([phases, torch.polar(
55
- torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
56
- torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
57
- )], dim=-1)
58
- q_embed = self._rotary_embedding(q, phases)
59
- k_embed = self._rotary_embedding(k, phases)
60
- return q_embed, k_embed
61
-
62
-
63
- class MultiHeadAttention(nn.Module):
64
- def __init__(
65
- self,
66
- channels: int,
67
- num_heads: int,
68
- ctx_channels: Optional[int]=None,
69
- type: Literal["self", "cross"] = "self",
70
- attn_mode: Literal["full", "windowed"] = "full",
71
- window_size: Optional[int] = None,
72
- shift_window: Optional[Tuple[int, int, int]] = None,
73
- qkv_bias: bool = True,
74
- use_rope: bool = False,
75
- qk_rms_norm: bool = False,
76
- ):
77
- super().__init__()
78
- assert channels % num_heads == 0
79
- assert type in ["self", "cross"], f"Invalid attention type: {type}"
80
- assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
81
- assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
82
-
83
- if attn_mode == "windowed":
84
- raise NotImplementedError("Windowed attention is not yet implemented")
85
-
86
- self.channels = channels
87
- self.head_dim = channels // num_heads
88
- self.ctx_channels = ctx_channels if ctx_channels is not None else channels
89
- self.num_heads = num_heads
90
- self._type = type
91
- self.attn_mode = attn_mode
92
- self.window_size = window_size
93
- self.shift_window = shift_window
94
- self.use_rope = use_rope
95
- self.qk_rms_norm = qk_rms_norm
96
-
97
- if self._type == "self":
98
- self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
99
- else:
100
- self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
101
- self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
102
-
103
- if self.qk_rms_norm:
104
- self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
105
- self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
106
-
107
- self.to_out = nn.Linear(channels, channels)
108
-
109
- if use_rope:
110
- self.rope = RotaryPositionEmbedder(channels)
111
-
112
- def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
113
- B, L, C = x.shape
114
- if self._type == "self":
115
- qkv = self.to_qkv(x)
116
- qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
117
- if self.use_rope:
118
- q, k, v = qkv.unbind(dim=2)
119
- q, k = self.rope(q, k, indices)
120
- qkv = torch.stack([q, k, v], dim=2)
121
- if self.attn_mode == "full":
122
- if self.qk_rms_norm:
123
- q, k, v = qkv.unbind(dim=2)
124
- q = self.q_rms_norm(q)
125
- k = self.k_rms_norm(k)
126
- h = scaled_dot_product_attention(q, k, v)
127
- else:
128
- h = scaled_dot_product_attention(qkv)
129
- elif self.attn_mode == "windowed":
130
- raise NotImplementedError("Windowed attention is not yet implemented")
131
- else:
132
- Lkv = context.shape[1]
133
- q = self.to_q(x)
134
- kv = self.to_kv(context)
135
- q = q.reshape(B, L, self.num_heads, -1)
136
- kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
137
- if self.qk_rms_norm:
138
- q = self.q_rms_norm(q)
139
- k, v = kv.unbind(dim=2)
140
- k = self.k_rms_norm(k)
141
- h = scaled_dot_product_attention(q, k, v)
142
- else:
143
- h = scaled_dot_product_attention(q, kv)
144
- h = h.reshape(B, L, -1)
145
- h = self.to_out(h)
146
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/norm.py DELETED
@@ -1,25 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
-
5
- class LayerNorm32(nn.LayerNorm):
6
- def forward(self, x: torch.Tensor) -> torch.Tensor:
7
- return super().forward(x.float()).type(x.dtype)
8
-
9
-
10
- class GroupNorm32(nn.GroupNorm):
11
- """
12
- A GroupNorm layer that converts to float32 before the forward pass.
13
- """
14
- def forward(self, x: torch.Tensor) -> torch.Tensor:
15
- return super().forward(x.float()).type(x.dtype)
16
-
17
-
18
- class ChannelLayerNorm32(LayerNorm32):
19
- def forward(self, x: torch.Tensor) -> torch.Tensor:
20
- DIM = x.dim()
21
- x = x.permute(0, *range(2, DIM), 1).contiguous()
22
- x = super().forward(x)
23
- x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
24
- return x
25
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/__init__.py DELETED
@@ -1,102 +0,0 @@
1
- from typing import *
2
-
3
- BACKEND = 'spconv'
4
- DEBUG = False
5
- ATTN = 'flash_attn'
6
-
7
- def __from_env():
8
- import os
9
-
10
- global BACKEND
11
- global DEBUG
12
- global ATTN
13
-
14
- env_sparse_backend = os.environ.get('SPARSE_BACKEND')
15
- env_sparse_debug = os.environ.get('SPARSE_DEBUG')
16
- env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
17
- if env_sparse_attn is None:
18
- env_sparse_attn = os.environ.get('ATTN_BACKEND')
19
-
20
- if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
21
- BACKEND = env_sparse_backend
22
- if env_sparse_debug is not None:
23
- DEBUG = env_sparse_debug == '1'
24
- if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
25
- ATTN = env_sparse_attn
26
-
27
- print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
28
-
29
-
30
- __from_env()
31
-
32
-
33
- def set_backend(backend: Literal['spconv', 'torchsparse']):
34
- global BACKEND
35
- BACKEND = backend
36
-
37
- def set_debug(debug: bool):
38
- global DEBUG
39
- DEBUG = debug
40
-
41
- def set_attn(attn: Literal['xformers', 'flash_attn']):
42
- global ATTN
43
- ATTN = attn
44
-
45
-
46
- import importlib
47
-
48
- __attributes = {
49
- 'SparseTensor': 'basic',
50
- 'sparse_batch_broadcast': 'basic',
51
- 'sparse_batch_op': 'basic',
52
- 'sparse_cat': 'basic',
53
- 'sparse_unbind': 'basic',
54
- 'SparseGroupNorm': 'norm',
55
- 'SparseLayerNorm': 'norm',
56
- 'SparseGroupNorm32': 'norm',
57
- 'SparseLayerNorm32': 'norm',
58
- 'SparseReLU': 'nonlinearity',
59
- 'SparseSiLU': 'nonlinearity',
60
- 'SparseGELU': 'nonlinearity',
61
- 'SparseActivation': 'nonlinearity',
62
- 'SparseLinear': 'linear',
63
- 'sparse_scaled_dot_product_attention': 'attention',
64
- 'SerializeMode': 'attention',
65
- 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
66
- 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
67
- 'SparseMultiHeadAttention': 'attention',
68
- 'SparseConv3d': 'conv',
69
- 'SparseInverseConv3d': 'conv',
70
- 'SparseDownsample': 'spatial',
71
- 'SparseUpsample': 'spatial',
72
- 'SparseSubdivide' : 'spatial'
73
- }
74
-
75
- __submodules = ['transformer']
76
-
77
- __all__ = list(__attributes.keys()) + __submodules
78
-
79
- def __getattr__(name):
80
- if name not in globals():
81
- if name in __attributes:
82
- module_name = __attributes[name]
83
- module = importlib.import_module(f".{module_name}", __name__)
84
- globals()[name] = getattr(module, name)
85
- elif name in __submodules:
86
- module = importlib.import_module(f".{name}", __name__)
87
- globals()[name] = module
88
- else:
89
- raise AttributeError(f"module {__name__} has no attribute {name}")
90
- return globals()[name]
91
-
92
-
93
- # For Pylance
94
- if __name__ == '__main__':
95
- from .basic import *
96
- from .norm import *
97
- from .nonlinearity import *
98
- from .linear import *
99
- from .attention import *
100
- from .conv import *
101
- from .spatial import *
102
- import transformer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/attention/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .full_attn import *
2
- from .serialized_attn import *
3
- from .windowed_attn import *
4
- from .modules import *
 
 
 
 
 
trellis/modules/sparse/attention/full_attn.py DELETED
@@ -1,215 +0,0 @@
1
- from typing import *
2
- import torch
3
- from .. import SparseTensor
4
- from .. import DEBUG, ATTN
5
-
6
- if ATTN == 'xformers':
7
- import xformers.ops as xops
8
- elif ATTN == 'flash_attn':
9
- import flash_attn
10
- else:
11
- raise ValueError(f"Unknown attention module: {ATTN}")
12
-
13
-
14
- __all__ = [
15
- 'sparse_scaled_dot_product_attention',
16
- ]
17
-
18
-
19
- @overload
20
- def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
21
- """
22
- Apply scaled dot product attention to a sparse tensor.
23
-
24
- Args:
25
- qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
26
- """
27
- ...
28
-
29
- @overload
30
- def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
31
- """
32
- Apply scaled dot product attention to a sparse tensor.
33
-
34
- Args:
35
- q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
36
- kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
37
- """
38
- ...
39
-
40
- @overload
41
- def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
42
- """
43
- Apply scaled dot product attention to a sparse tensor.
44
-
45
- Args:
46
- q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
47
- kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
48
- """
49
- ...
50
-
51
- @overload
52
- def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
53
- """
54
- Apply scaled dot product attention to a sparse tensor.
55
-
56
- Args:
57
- q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
58
- k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
59
- v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
60
-
61
- Note:
62
- k and v are assumed to have the same coordinate map.
63
- """
64
- ...
65
-
66
- @overload
67
- def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
68
- """
69
- Apply scaled dot product attention to a sparse tensor.
70
-
71
- Args:
72
- q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
73
- k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
74
- v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
75
- """
76
- ...
77
-
78
- @overload
79
- def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
80
- """
81
- Apply scaled dot product attention to a sparse tensor.
82
-
83
- Args:
84
- q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
85
- k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
86
- v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
87
- """
88
- ...
89
-
90
- def sparse_scaled_dot_product_attention(*args, **kwargs):
91
- arg_names_dict = {
92
- 1: ['qkv'],
93
- 2: ['q', 'kv'],
94
- 3: ['q', 'k', 'v']
95
- }
96
- num_all_args = len(args) + len(kwargs)
97
- assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
98
- for key in arg_names_dict[num_all_args][len(args):]:
99
- assert key in kwargs, f"Missing argument {key}"
100
-
101
- if num_all_args == 1:
102
- qkv = args[0] if len(args) > 0 else kwargs['qkv']
103
- assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
104
- assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
105
- device = qkv.device
106
-
107
- s = qkv
108
- q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
109
- kv_seqlen = q_seqlen
110
- qkv = qkv.feats # [T, 3, H, C]
111
-
112
- elif num_all_args == 2:
113
- q = args[0] if len(args) > 0 else kwargs['q']
114
- kv = args[1] if len(args) > 1 else kwargs['kv']
115
- assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
116
- isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
117
- f"Invalid types, got {type(q)} and {type(kv)}"
118
- assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
119
- device = q.device
120
-
121
- if isinstance(q, SparseTensor):
122
- assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
123
- s = q
124
- q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
125
- q = q.feats # [T_Q, H, C]
126
- else:
127
- assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
128
- s = None
129
- N, L, H, C = q.shape
130
- q_seqlen = [L] * N
131
- q = q.reshape(N * L, H, C) # [T_Q, H, C]
132
-
133
- if isinstance(kv, SparseTensor):
134
- assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
135
- kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
136
- kv = kv.feats # [T_KV, 2, H, C]
137
- else:
138
- assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
139
- N, L, _, H, C = kv.shape
140
- kv_seqlen = [L] * N
141
- kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
142
-
143
- elif num_all_args == 3:
144
- q = args[0] if len(args) > 0 else kwargs['q']
145
- k = args[1] if len(args) > 1 else kwargs['k']
146
- v = args[2] if len(args) > 2 else kwargs['v']
147
- assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
148
- isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
149
- f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
150
- assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
151
- device = q.device
152
-
153
- if isinstance(q, SparseTensor):
154
- assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
155
- s = q
156
- q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
157
- q = q.feats # [T_Q, H, Ci]
158
- else:
159
- assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
160
- s = None
161
- N, L, H, CI = q.shape
162
- q_seqlen = [L] * N
163
- q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
164
-
165
- if isinstance(k, SparseTensor):
166
- assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
167
- assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
168
- kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
169
- k = k.feats # [T_KV, H, Ci]
170
- v = v.feats # [T_KV, H, Co]
171
- else:
172
- assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
173
- assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
174
- N, L, H, CI, CO = *k.shape, v.shape[-1]
175
- kv_seqlen = [L] * N
176
- k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
177
- v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
178
-
179
- if DEBUG:
180
- if s is not None:
181
- for i in range(s.shape[0]):
182
- assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
183
- if num_all_args in [2, 3]:
184
- assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
185
- if num_all_args == 3:
186
- assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
187
- assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
188
-
189
- if ATTN == 'xformers':
190
- if num_all_args == 1:
191
- q, k, v = qkv.unbind(dim=1)
192
- elif num_all_args == 2:
193
- k, v = kv.unbind(dim=1)
194
- q = q.unsqueeze(0)
195
- k = k.unsqueeze(0)
196
- v = v.unsqueeze(0)
197
- mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
198
- out = xops.memory_efficient_attention(q, k, v, mask)[0]
199
- elif ATTN == 'flash_attn':
200
- cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
201
- if num_all_args in [2, 3]:
202
- cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
203
- if num_all_args == 1:
204
- out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
205
- elif num_all_args == 2:
206
- out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
207
- elif num_all_args == 3:
208
- out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
209
- else:
210
- raise ValueError(f"Unknown attention module: {ATTN}")
211
-
212
- if s is not None:
213
- return s.replace(out)
214
- else:
215
- return out.reshape(N, L, H, -1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/attention/modules.py DELETED
@@ -1,139 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from .. import SparseTensor
6
- from .full_attn import sparse_scaled_dot_product_attention
7
- from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
8
- from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
9
- from ...attention import RotaryPositionEmbedder
10
-
11
-
12
- class SparseMultiHeadRMSNorm(nn.Module):
13
- def __init__(self, dim: int, heads: int):
14
- super().__init__()
15
- self.scale = dim ** 0.5
16
- self.gamma = nn.Parameter(torch.ones(heads, dim))
17
-
18
- def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
19
- x_type = x.dtype
20
- x = x.float()
21
- if isinstance(x, SparseTensor):
22
- x = x.replace(F.normalize(x.feats, dim=-1))
23
- else:
24
- x = F.normalize(x, dim=-1)
25
- return (x * self.gamma * self.scale).to(x_type)
26
-
27
-
28
- class SparseMultiHeadAttention(nn.Module):
29
- def __init__(
30
- self,
31
- channels: int,
32
- num_heads: int,
33
- ctx_channels: Optional[int] = None,
34
- type: Literal["self", "cross"] = "self",
35
- attn_mode: Literal["full", "serialized", "windowed"] = "full",
36
- window_size: Optional[int] = None,
37
- shift_sequence: Optional[int] = None,
38
- shift_window: Optional[Tuple[int, int, int]] = None,
39
- serialize_mode: Optional[SerializeMode] = None,
40
- qkv_bias: bool = True,
41
- use_rope: bool = False,
42
- qk_rms_norm: bool = False,
43
- ):
44
- super().__init__()
45
- assert channels % num_heads == 0
46
- assert type in ["self", "cross"], f"Invalid attention type: {type}"
47
- assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
48
- assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
49
- assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
50
- self.channels = channels
51
- self.ctx_channels = ctx_channels if ctx_channels is not None else channels
52
- self.num_heads = num_heads
53
- self._type = type
54
- self.attn_mode = attn_mode
55
- self.window_size = window_size
56
- self.shift_sequence = shift_sequence
57
- self.shift_window = shift_window
58
- self.serialize_mode = serialize_mode
59
- self.use_rope = use_rope
60
- self.qk_rms_norm = qk_rms_norm
61
-
62
- if self._type == "self":
63
- self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
64
- else:
65
- self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
66
- self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
67
-
68
- if self.qk_rms_norm:
69
- self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
70
- self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
71
-
72
- self.to_out = nn.Linear(channels, channels)
73
-
74
- if use_rope:
75
- self.rope = RotaryPositionEmbedder(channels)
76
-
77
- @staticmethod
78
- def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
79
- if isinstance(x, SparseTensor):
80
- return x.replace(module(x.feats))
81
- else:
82
- return module(x)
83
-
84
- @staticmethod
85
- def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
86
- if isinstance(x, SparseTensor):
87
- return x.reshape(*shape)
88
- else:
89
- return x.reshape(*x.shape[:2], *shape)
90
-
91
- def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
92
- if isinstance(x, SparseTensor):
93
- x_feats = x.feats.unsqueeze(0)
94
- else:
95
- x_feats = x
96
- x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
97
- return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
98
-
99
- def _rope(self, qkv: SparseTensor) -> SparseTensor:
100
- q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
101
- q, k = self.rope(q, k, qkv.coords[:, 1:])
102
- qkv = qkv.replace(torch.stack([q, k, v], dim=1))
103
- return qkv
104
-
105
- def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
106
- if self._type == "self":
107
- qkv = self._linear(self.to_qkv, x)
108
- qkv = self._fused_pre(qkv, num_fused=3)
109
- if self.use_rope:
110
- qkv = self._rope(qkv)
111
- if self.qk_rms_norm:
112
- q, k, v = qkv.unbind(dim=1)
113
- q = self.q_rms_norm(q)
114
- k = self.k_rms_norm(k)
115
- qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
116
- if self.attn_mode == "full":
117
- h = sparse_scaled_dot_product_attention(qkv)
118
- elif self.attn_mode == "serialized":
119
- h = sparse_serialized_scaled_dot_product_self_attention(
120
- qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
121
- )
122
- elif self.attn_mode == "windowed":
123
- h = sparse_windowed_scaled_dot_product_self_attention(
124
- qkv, self.window_size, shift_window=self.shift_window
125
- )
126
- else:
127
- q = self._linear(self.to_q, x)
128
- q = self._reshape_chs(q, (self.num_heads, -1))
129
- kv = self._linear(self.to_kv, context)
130
- kv = self._fused_pre(kv, num_fused=2)
131
- if self.qk_rms_norm:
132
- q = self.q_rms_norm(q)
133
- k, v = kv.unbind(dim=1)
134
- k = self.k_rms_norm(k)
135
- kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
136
- h = sparse_scaled_dot_product_attention(q, kv)
137
- h = self._reshape_chs(h, (-1,))
138
- h = self._linear(self.to_out, h)
139
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/attention/serialized_attn.py DELETED
@@ -1,193 +0,0 @@
1
- from typing import *
2
- from enum import Enum
3
- import torch
4
- import math
5
- from .. import SparseTensor
6
- from .. import DEBUG, ATTN
7
-
8
- if ATTN == 'xformers':
9
- import xformers.ops as xops
10
- elif ATTN == 'flash_attn':
11
- import flash_attn
12
- else:
13
- raise ValueError(f"Unknown attention module: {ATTN}")
14
-
15
-
16
- __all__ = [
17
- 'sparse_serialized_scaled_dot_product_self_attention',
18
- ]
19
-
20
-
21
- class SerializeMode(Enum):
22
- Z_ORDER = 0
23
- Z_ORDER_TRANSPOSED = 1
24
- HILBERT = 2
25
- HILBERT_TRANSPOSED = 3
26
-
27
-
28
- SerializeModes = [
29
- SerializeMode.Z_ORDER,
30
- SerializeMode.Z_ORDER_TRANSPOSED,
31
- SerializeMode.HILBERT,
32
- SerializeMode.HILBERT_TRANSPOSED
33
- ]
34
-
35
-
36
- def calc_serialization(
37
- tensor: SparseTensor,
38
- window_size: int,
39
- serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
40
- shift_sequence: int = 0,
41
- shift_window: Tuple[int, int, int] = (0, 0, 0)
42
- ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
43
- """
44
- Calculate serialization and partitioning for a set of coordinates.
45
-
46
- Args:
47
- tensor (SparseTensor): The input tensor.
48
- window_size (int): The window size to use.
49
- serialize_mode (SerializeMode): The serialization mode to use.
50
- shift_sequence (int): The shift of serialized sequence.
51
- shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
52
-
53
- Returns:
54
- (torch.Tensor, torch.Tensor): Forwards and backwards indices.
55
- """
56
- fwd_indices = []
57
- bwd_indices = []
58
- seq_lens = []
59
- seq_batch_indices = []
60
- offsets = [0]
61
-
62
- if 'vox2seq' not in globals():
63
- import vox2seq
64
-
65
- # Serialize the input
66
- serialize_coords = tensor.coords[:, 1:].clone()
67
- serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
68
- if serialize_mode == SerializeMode.Z_ORDER:
69
- code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
70
- elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
71
- code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
72
- elif serialize_mode == SerializeMode.HILBERT:
73
- code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
74
- elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
75
- code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
76
- else:
77
- raise ValueError(f"Unknown serialize mode: {serialize_mode}")
78
-
79
- for bi, s in enumerate(tensor.layout):
80
- num_points = s.stop - s.start
81
- num_windows = (num_points + window_size - 1) // window_size
82
- valid_window_size = num_points / num_windows
83
- to_ordered = torch.argsort(code[s.start:s.stop])
84
- if num_windows == 1:
85
- fwd_indices.append(to_ordered)
86
- bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
87
- fwd_indices[-1] += s.start
88
- bwd_indices[-1] += offsets[-1]
89
- seq_lens.append(num_points)
90
- seq_batch_indices.append(bi)
91
- offsets.append(offsets[-1] + seq_lens[-1])
92
- else:
93
- # Partition the input
94
- offset = 0
95
- mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
96
- split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
97
- bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
98
- for i in range(num_windows):
99
- mid = mids[i]
100
- valid_start = split[i]
101
- valid_end = split[i + 1]
102
- padded_start = math.floor(mid - 0.5 * window_size)
103
- padded_end = padded_start + window_size
104
- fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
105
- offset += valid_start - padded_start
106
- bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
107
- offset += padded_end - valid_start
108
- fwd_indices[-1] += s.start
109
- seq_lens.extend([window_size] * num_windows)
110
- seq_batch_indices.extend([bi] * num_windows)
111
- bwd_indices.append(bwd_index + offsets[-1])
112
- offsets.append(offsets[-1] + num_windows * window_size)
113
-
114
- fwd_indices = torch.cat(fwd_indices)
115
- bwd_indices = torch.cat(bwd_indices)
116
-
117
- return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
118
-
119
-
120
- def sparse_serialized_scaled_dot_product_self_attention(
121
- qkv: SparseTensor,
122
- window_size: int,
123
- serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
124
- shift_sequence: int = 0,
125
- shift_window: Tuple[int, int, int] = (0, 0, 0)
126
- ) -> SparseTensor:
127
- """
128
- Apply serialized scaled dot product self attention to a sparse tensor.
129
-
130
- Args:
131
- qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
132
- window_size (int): The window size to use.
133
- serialize_mode (SerializeMode): The serialization mode to use.
134
- shift_sequence (int): The shift of serialized sequence.
135
- shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
136
- shift (int): The shift to use.
137
- """
138
- assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
139
-
140
- serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
141
- serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
142
- if serialization_spatial_cache is None:
143
- fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
144
- qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
145
- else:
146
- fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
147
-
148
- M = fwd_indices.shape[0]
149
- T = qkv.feats.shape[0]
150
- H = qkv.feats.shape[2]
151
- C = qkv.feats.shape[3]
152
-
153
- qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
154
-
155
- if DEBUG:
156
- start = 0
157
- qkv_coords = qkv.coords[fwd_indices]
158
- for i in range(len(seq_lens)):
159
- assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
160
- start += seq_lens[i]
161
-
162
- if all([seq_len == window_size for seq_len in seq_lens]):
163
- B = len(seq_lens)
164
- N = window_size
165
- qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
166
- if ATTN == 'xformers':
167
- q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
168
- out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
169
- elif ATTN == 'flash_attn':
170
- out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
171
- else:
172
- raise ValueError(f"Unknown attention module: {ATTN}")
173
- out = out.reshape(B * N, H, C) # [M, H, C]
174
- else:
175
- if ATTN == 'xformers':
176
- q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
177
- q = q.unsqueeze(0) # [1, M, H, C]
178
- k = k.unsqueeze(0) # [1, M, H, C]
179
- v = v.unsqueeze(0) # [1, M, H, C]
180
- mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
181
- out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
182
- elif ATTN == 'flash_attn':
183
- cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
184
- .to(qkv.device).int()
185
- out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
186
-
187
- out = out[bwd_indices] # [T, H, C]
188
-
189
- if DEBUG:
190
- qkv_coords = qkv_coords[bwd_indices]
191
- assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
192
-
193
- return qkv.replace(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/attention/windowed_attn.py DELETED
@@ -1,135 +0,0 @@
1
- from typing import *
2
- import torch
3
- import math
4
- from .. import SparseTensor
5
- from .. import DEBUG, ATTN
6
-
7
- if ATTN == 'xformers':
8
- import xformers.ops as xops
9
- elif ATTN == 'flash_attn':
10
- import flash_attn
11
- else:
12
- raise ValueError(f"Unknown attention module: {ATTN}")
13
-
14
-
15
- __all__ = [
16
- 'sparse_windowed_scaled_dot_product_self_attention',
17
- ]
18
-
19
-
20
- def calc_window_partition(
21
- tensor: SparseTensor,
22
- window_size: Union[int, Tuple[int, ...]],
23
- shift_window: Union[int, Tuple[int, ...]] = 0
24
- ) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
25
- """
26
- Calculate serialization and partitioning for a set of coordinates.
27
-
28
- Args:
29
- tensor (SparseTensor): The input tensor.
30
- window_size (int): The window size to use.
31
- shift_window (Tuple[int, ...]): The shift of serialized coordinates.
32
-
33
- Returns:
34
- (torch.Tensor): Forwards indices.
35
- (torch.Tensor): Backwards indices.
36
- (List[int]): Sequence lengths.
37
- (List[int]): Sequence batch indices.
38
- """
39
- DIM = tensor.coords.shape[1] - 1
40
- shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
41
- window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
42
- shifted_coords = tensor.coords.clone().detach()
43
- shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
44
-
45
- MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist()
46
- NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
47
- OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
48
-
49
- shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
50
- shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
51
- fwd_indices = torch.argsort(shifted_indices)
52
- bwd_indices = torch.empty_like(fwd_indices)
53
- bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
54
- seq_lens = torch.bincount(shifted_indices)
55
- seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0]
56
- mask = seq_lens != 0
57
- seq_lens = seq_lens[mask].tolist()
58
- seq_batch_indices = seq_batch_indices[mask].tolist()
59
-
60
- return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
61
-
62
-
63
- def sparse_windowed_scaled_dot_product_self_attention(
64
- qkv: SparseTensor,
65
- window_size: int,
66
- shift_window: Tuple[int, int, int] = (0, 0, 0)
67
- ) -> SparseTensor:
68
- """
69
- Apply windowed scaled dot product self attention to a sparse tensor.
70
-
71
- Args:
72
- qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
73
- window_size (int): The window size to use.
74
- shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
75
- shift (int): The shift to use.
76
- """
77
- assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
78
-
79
- serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}'
80
- serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
81
- if serialization_spatial_cache is None:
82
- fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window)
83
- qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
84
- else:
85
- fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
86
-
87
- M = fwd_indices.shape[0]
88
- T = qkv.feats.shape[0]
89
- H = qkv.feats.shape[2]
90
- C = qkv.feats.shape[3]
91
-
92
- qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
93
-
94
- if DEBUG:
95
- start = 0
96
- qkv_coords = qkv.coords[fwd_indices]
97
- for i in range(len(seq_lens)):
98
- seq_coords = qkv_coords[start:start+seq_lens[i]]
99
- assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
100
- assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \
101
- f"SparseWindowedScaledDotProductSelfAttention: window size exceeded"
102
- start += seq_lens[i]
103
-
104
- if all([seq_len == window_size for seq_len in seq_lens]):
105
- B = len(seq_lens)
106
- N = window_size
107
- qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
108
- if ATTN == 'xformers':
109
- q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
110
- out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
111
- elif ATTN == 'flash_attn':
112
- out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
113
- else:
114
- raise ValueError(f"Unknown attention module: {ATTN}")
115
- out = out.reshape(B * N, H, C) # [M, H, C]
116
- else:
117
- if ATTN == 'xformers':
118
- q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
119
- q = q.unsqueeze(0) # [1, M, H, C]
120
- k = k.unsqueeze(0) # [1, M, H, C]
121
- v = v.unsqueeze(0) # [1, M, H, C]
122
- mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
123
- out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
124
- elif ATTN == 'flash_attn':
125
- cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
126
- .to(qkv.device).int()
127
- out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
128
-
129
- out = out[bwd_indices] # [T, H, C]
130
-
131
- if DEBUG:
132
- qkv_coords = qkv_coords[bwd_indices]
133
- assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
134
-
135
- return qkv.replace(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/basic.py DELETED
@@ -1,459 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from . import BACKEND, DEBUG
5
- SparseTensorData = None # Lazy import
6
-
7
-
8
- __all__ = [
9
- 'SparseTensor',
10
- 'sparse_batch_broadcast',
11
- 'sparse_batch_op',
12
- 'sparse_cat',
13
- 'sparse_unbind',
14
- ]
15
-
16
-
17
- class SparseTensor:
18
- """
19
- Sparse tensor with support for both torchsparse and spconv backends.
20
-
21
- Parameters:
22
- - feats (torch.Tensor): Features of the sparse tensor.
23
- - coords (torch.Tensor): Coordinates of the sparse tensor.
24
- - shape (torch.Size): Shape of the sparse tensor.
25
- - layout (List[slice]): Layout of the sparse tensor for each batch
26
- - data (SparseTensorData): Sparse tensor data used for convolusion
27
-
28
- NOTE:
29
- - Data corresponding to a same batch should be contiguous.
30
- - Coords should be in [0, 1023]
31
- """
32
- @overload
33
- def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
34
-
35
- @overload
36
- def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
37
-
38
- def __init__(self, *args, **kwargs):
39
- # Lazy import of sparse tensor backend
40
- global SparseTensorData
41
- if SparseTensorData is None:
42
- import importlib
43
- if BACKEND == 'torchsparse':
44
- SparseTensorData = importlib.import_module('torchsparse').SparseTensor
45
- elif BACKEND == 'spconv':
46
- SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor
47
-
48
- method_id = 0
49
- if len(args) != 0:
50
- method_id = 0 if isinstance(args[0], torch.Tensor) else 1
51
- else:
52
- method_id = 1 if 'data' in kwargs else 0
53
-
54
- if method_id == 0:
55
- feats, coords, shape, layout = args + (None,) * (4 - len(args))
56
- if 'feats' in kwargs:
57
- feats = kwargs['feats']
58
- del kwargs['feats']
59
- if 'coords' in kwargs:
60
- coords = kwargs['coords']
61
- del kwargs['coords']
62
- if 'shape' in kwargs:
63
- shape = kwargs['shape']
64
- del kwargs['shape']
65
- if 'layout' in kwargs:
66
- layout = kwargs['layout']
67
- del kwargs['layout']
68
-
69
- if shape is None:
70
- shape = self.__cal_shape(feats, coords)
71
- if layout is None:
72
- layout = self.__cal_layout(coords, shape[0])
73
- if BACKEND == 'torchsparse':
74
- self.data = SparseTensorData(feats, coords, **kwargs)
75
- elif BACKEND == 'spconv':
76
- spatial_shape = list(coords.max(0)[0] + 1)[1:]
77
- self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs)
78
- self.data._features = feats
79
- elif method_id == 1:
80
- data, shape, layout = args + (None,) * (3 - len(args))
81
- if 'data' in kwargs:
82
- data = kwargs['data']
83
- del kwargs['data']
84
- if 'shape' in kwargs:
85
- shape = kwargs['shape']
86
- del kwargs['shape']
87
- if 'layout' in kwargs:
88
- layout = kwargs['layout']
89
- del kwargs['layout']
90
-
91
- self.data = data
92
- if shape is None:
93
- shape = self.__cal_shape(self.feats, self.coords)
94
- if layout is None:
95
- layout = self.__cal_layout(self.coords, shape[0])
96
-
97
- self._shape = shape
98
- self._layout = layout
99
- self._scale = kwargs.get('scale', (1, 1, 1))
100
- self._spatial_cache = kwargs.get('spatial_cache', {})
101
-
102
- if DEBUG:
103
- try:
104
- assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}"
105
- assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}"
106
- assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}"
107
- for i in range(self.shape[0]):
108
- assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous"
109
- except Exception as e:
110
- print('Debugging information:')
111
- print(f"- Shape: {self.shape}")
112
- print(f"- Layout: {self.layout}")
113
- print(f"- Scale: {self._scale}")
114
- print(f"- Coords: {self.coords}")
115
- raise e
116
-
117
- def __cal_shape(self, feats, coords):
118
- shape = []
119
- shape.append(coords[:, 0].max().item() + 1)
120
- shape.extend([*feats.shape[1:]])
121
- return torch.Size(shape)
122
-
123
- def __cal_layout(self, coords, batch_size):
124
- seq_len = torch.bincount(coords[:, 0], minlength=batch_size)
125
- offset = torch.cumsum(seq_len, dim=0)
126
- layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)]
127
- return layout
128
-
129
- @property
130
- def shape(self) -> torch.Size:
131
- return self._shape
132
-
133
- def dim(self) -> int:
134
- return len(self.shape)
135
-
136
- @property
137
- def layout(self) -> List[slice]:
138
- return self._layout
139
-
140
- @property
141
- def feats(self) -> torch.Tensor:
142
- if BACKEND == 'torchsparse':
143
- return self.data.F
144
- elif BACKEND == 'spconv':
145
- return self.data.features
146
-
147
- @feats.setter
148
- def feats(self, value: torch.Tensor):
149
- if BACKEND == 'torchsparse':
150
- self.data.F = value
151
- elif BACKEND == 'spconv':
152
- self.data.features = value
153
-
154
- @property
155
- def coords(self) -> torch.Tensor:
156
- if BACKEND == 'torchsparse':
157
- return self.data.C
158
- elif BACKEND == 'spconv':
159
- return self.data.indices
160
-
161
- @coords.setter
162
- def coords(self, value: torch.Tensor):
163
- if BACKEND == 'torchsparse':
164
- self.data.C = value
165
- elif BACKEND == 'spconv':
166
- self.data.indices = value
167
-
168
- @property
169
- def dtype(self):
170
- return self.feats.dtype
171
-
172
- @property
173
- def device(self):
174
- return self.feats.device
175
-
176
- @overload
177
- def to(self, dtype: torch.dtype) -> 'SparseTensor': ...
178
-
179
- @overload
180
- def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ...
181
-
182
- def to(self, *args, **kwargs) -> 'SparseTensor':
183
- device = None
184
- dtype = None
185
- if len(args) == 2:
186
- device, dtype = args
187
- elif len(args) == 1:
188
- if isinstance(args[0], torch.dtype):
189
- dtype = args[0]
190
- else:
191
- device = args[0]
192
- if 'dtype' in kwargs:
193
- assert dtype is None, "to() received multiple values for argument 'dtype'"
194
- dtype = kwargs['dtype']
195
- if 'device' in kwargs:
196
- assert device is None, "to() received multiple values for argument 'device'"
197
- device = kwargs['device']
198
-
199
- new_feats = self.feats.to(device=device, dtype=dtype)
200
- new_coords = self.coords.to(device=device)
201
- return self.replace(new_feats, new_coords)
202
-
203
- def type(self, dtype):
204
- new_feats = self.feats.type(dtype)
205
- return self.replace(new_feats)
206
-
207
- def cpu(self) -> 'SparseTensor':
208
- new_feats = self.feats.cpu()
209
- new_coords = self.coords.cpu()
210
- return self.replace(new_feats, new_coords)
211
-
212
- def cuda(self) -> 'SparseTensor':
213
- new_feats = self.feats.cuda()
214
- new_coords = self.coords.cuda()
215
- return self.replace(new_feats, new_coords)
216
-
217
- def half(self) -> 'SparseTensor':
218
- new_feats = self.feats.half()
219
- return self.replace(new_feats)
220
-
221
- def float(self) -> 'SparseTensor':
222
- new_feats = self.feats.float()
223
- return self.replace(new_feats)
224
-
225
- def detach(self) -> 'SparseTensor':
226
- new_coords = self.coords.detach()
227
- new_feats = self.feats.detach()
228
- return self.replace(new_feats, new_coords)
229
-
230
- def dense(self) -> torch.Tensor:
231
- if BACKEND == 'torchsparse':
232
- return self.data.dense()
233
- elif BACKEND == 'spconv':
234
- return self.data.dense()
235
-
236
- def reshape(self, *shape) -> 'SparseTensor':
237
- new_feats = self.feats.reshape(self.feats.shape[0], *shape)
238
- return self.replace(new_feats)
239
-
240
- def unbind(self, dim: int) -> List['SparseTensor']:
241
- return sparse_unbind(self, dim)
242
-
243
- def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor':
244
- new_shape = [self.shape[0]]
245
- new_shape.extend(feats.shape[1:])
246
- if BACKEND == 'torchsparse':
247
- new_data = SparseTensorData(
248
- feats=feats,
249
- coords=self.data.coords if coords is None else coords,
250
- stride=self.data.stride,
251
- spatial_range=self.data.spatial_range,
252
- )
253
- new_data._caches = self.data._caches
254
- elif BACKEND == 'spconv':
255
- new_data = SparseTensorData(
256
- self.data.features.reshape(self.data.features.shape[0], -1),
257
- self.data.indices,
258
- self.data.spatial_shape,
259
- self.data.batch_size,
260
- self.data.grid,
261
- self.data.voxel_num,
262
- self.data.indice_dict
263
- )
264
- new_data._features = feats
265
- new_data.benchmark = self.data.benchmark
266
- new_data.benchmark_record = self.data.benchmark_record
267
- new_data.thrust_allocator = self.data.thrust_allocator
268
- new_data._timer = self.data._timer
269
- new_data.force_algo = self.data.force_algo
270
- new_data.int8_scale = self.data.int8_scale
271
- if coords is not None:
272
- new_data.indices = coords
273
- new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache)
274
- return new_tensor
275
-
276
- @staticmethod
277
- def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor':
278
- N, C = dim
279
- x = torch.arange(aabb[0], aabb[3] + 1)
280
- y = torch.arange(aabb[1], aabb[4] + 1)
281
- z = torch.arange(aabb[2], aabb[5] + 1)
282
- coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3)
283
- coords = torch.cat([
284
- torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1),
285
- coords.repeat(N, 1),
286
- ], dim=1).to(dtype=torch.int32, device=device)
287
- feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device)
288
- return SparseTensor(feats=feats, coords=coords)
289
-
290
- def __merge_sparse_cache(self, other: 'SparseTensor') -> dict:
291
- new_cache = {}
292
- for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())):
293
- if k in self._spatial_cache:
294
- new_cache[k] = self._spatial_cache[k]
295
- if k in other._spatial_cache:
296
- if k not in new_cache:
297
- new_cache[k] = other._spatial_cache[k]
298
- else:
299
- new_cache[k].update(other._spatial_cache[k])
300
- return new_cache
301
-
302
- def __neg__(self) -> 'SparseTensor':
303
- return self.replace(-self.feats)
304
-
305
- def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor':
306
- if isinstance(other, torch.Tensor):
307
- try:
308
- other = torch.broadcast_to(other, self.shape)
309
- other = sparse_batch_broadcast(self, other)
310
- except:
311
- pass
312
- if isinstance(other, SparseTensor):
313
- other = other.feats
314
- new_feats = op(self.feats, other)
315
- new_tensor = self.replace(new_feats)
316
- if isinstance(other, SparseTensor):
317
- new_tensor._spatial_cache = self.__merge_sparse_cache(other)
318
- return new_tensor
319
-
320
- def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
321
- return self.__elemwise__(other, torch.add)
322
-
323
- def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
324
- return self.__elemwise__(other, torch.add)
325
-
326
- def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
327
- return self.__elemwise__(other, torch.sub)
328
-
329
- def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
330
- return self.__elemwise__(other, lambda x, y: torch.sub(y, x))
331
-
332
- def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
333
- return self.__elemwise__(other, torch.mul)
334
-
335
- def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
336
- return self.__elemwise__(other, torch.mul)
337
-
338
- def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
339
- return self.__elemwise__(other, torch.div)
340
-
341
- def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
342
- return self.__elemwise__(other, lambda x, y: torch.div(y, x))
343
-
344
- def __getitem__(self, idx):
345
- if isinstance(idx, int):
346
- idx = [idx]
347
- elif isinstance(idx, slice):
348
- idx = range(*idx.indices(self.shape[0]))
349
- elif isinstance(idx, torch.Tensor):
350
- if idx.dtype == torch.bool:
351
- assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}"
352
- idx = idx.nonzero().squeeze(1)
353
- elif idx.dtype in [torch.int32, torch.int64]:
354
- assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}"
355
- else:
356
- raise ValueError(f"Unknown index type: {idx.dtype}")
357
- else:
358
- raise ValueError(f"Unknown index type: {type(idx)}")
359
-
360
- coords = []
361
- feats = []
362
- for new_idx, old_idx in enumerate(idx):
363
- coords.append(self.coords[self.layout[old_idx]].clone())
364
- coords[-1][:, 0] = new_idx
365
- feats.append(self.feats[self.layout[old_idx]])
366
- coords = torch.cat(coords, dim=0).contiguous()
367
- feats = torch.cat(feats, dim=0).contiguous()
368
- return SparseTensor(feats=feats, coords=coords)
369
-
370
- def register_spatial_cache(self, key, value) -> None:
371
- """
372
- Register a spatial cache.
373
- The spatial cache can be any thing you want to cache.
374
- The registery and retrieval of the cache is based on current scale.
375
- """
376
- scale_key = str(self._scale)
377
- if scale_key not in self._spatial_cache:
378
- self._spatial_cache[scale_key] = {}
379
- self._spatial_cache[scale_key][key] = value
380
-
381
- def get_spatial_cache(self, key=None):
382
- """
383
- Get a spatial cache.
384
- """
385
- scale_key = str(self._scale)
386
- cur_scale_cache = self._spatial_cache.get(scale_key, {})
387
- if key is None:
388
- return cur_scale_cache
389
- return cur_scale_cache.get(key, None)
390
-
391
-
392
- def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor:
393
- """
394
- Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
395
-
396
- Args:
397
- input (torch.Tensor): 1D tensor to broadcast.
398
- target (SparseTensor): Sparse tensor to broadcast to.
399
- op (callable): Operation to perform after broadcasting. Defaults to torch.add.
400
- """
401
- coords, feats = input.coords, input.feats
402
- broadcasted = torch.zeros_like(feats)
403
- for k in range(input.shape[0]):
404
- broadcasted[input.layout[k]] = other[k]
405
- return broadcasted
406
-
407
-
408
- def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor:
409
- """
410
- Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
411
-
412
- Args:
413
- input (torch.Tensor): 1D tensor to broadcast.
414
- target (SparseTensor): Sparse tensor to broadcast to.
415
- op (callable): Operation to perform after broadcasting. Defaults to torch.add.
416
- """
417
- return input.replace(op(input.feats, sparse_batch_broadcast(input, other)))
418
-
419
-
420
- def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor:
421
- """
422
- Concatenate a list of sparse tensors.
423
-
424
- Args:
425
- inputs (List[SparseTensor]): List of sparse tensors to concatenate.
426
- """
427
- if dim == 0:
428
- start = 0
429
- coords = []
430
- for input in inputs:
431
- coords.append(input.coords.clone())
432
- coords[-1][:, 0] += start
433
- start += input.shape[0]
434
- coords = torch.cat(coords, dim=0)
435
- feats = torch.cat([input.feats for input in inputs], dim=0)
436
- output = SparseTensor(
437
- coords=coords,
438
- feats=feats,
439
- )
440
- else:
441
- feats = torch.cat([input.feats for input in inputs], dim=dim)
442
- output = inputs[0].replace(feats)
443
-
444
- return output
445
-
446
-
447
- def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]:
448
- """
449
- Unbind a sparse tensor along a dimension.
450
-
451
- Args:
452
- input (SparseTensor): Sparse tensor to unbind.
453
- dim (int): Dimension to unbind.
454
- """
455
- if dim == 0:
456
- return [input[i] for i in range(input.shape[0])]
457
- else:
458
- feats = input.feats.unbind(dim)
459
- return [input.replace(f) for f in feats]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/conv/__init__.py DELETED
@@ -1,21 +0,0 @@
1
- from .. import BACKEND
2
-
3
-
4
- SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native'
5
-
6
- def __from_env():
7
- import os
8
-
9
- global SPCONV_ALGO
10
- env_spconv_algo = os.environ.get('SPCONV_ALGO')
11
- if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']:
12
- SPCONV_ALGO = env_spconv_algo
13
- print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}")
14
-
15
-
16
- __from_env()
17
-
18
- if BACKEND == 'torchsparse':
19
- from .conv_torchsparse import *
20
- elif BACKEND == 'spconv':
21
- from .conv_spconv import *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/conv/conv_spconv.py DELETED
@@ -1,80 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from .. import SparseTensor
4
- from .. import DEBUG
5
- from . import SPCONV_ALGO
6
-
7
- class SparseConv3d(nn.Module):
8
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=None, bias=True, indice_key=None):
9
- super(SparseConv3d, self).__init__()
10
- if 'spconv' not in globals():
11
- import spconv.pytorch as spconv
12
- algo = None
13
- if SPCONV_ALGO == 'native':
14
- algo = spconv.ConvAlgo.Native
15
- elif SPCONV_ALGO == 'implicit_gemm':
16
- algo = spconv.ConvAlgo.MaskImplicitGemm
17
- if stride == 1 and (padding is None):
18
- self.conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, dilation=dilation, bias=bias, indice_key=indice_key, algo=algo)
19
- else:
20
- self.conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, indice_key=indice_key, algo=algo)
21
- self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
22
- self.padding = padding
23
-
24
- def forward(self, x: SparseTensor) -> SparseTensor:
25
- spatial_changed = any(s != 1 for s in self.stride) or (self.padding is not None)
26
- new_data = self.conv(x.data)
27
- new_shape = [x.shape[0], self.conv.out_channels]
28
- new_layout = None if spatial_changed else x.layout
29
-
30
- if spatial_changed and (x.shape[0] != 1):
31
- # spconv was non-1 stride will break the contiguous of the output tensor, sort by the coords
32
- fwd = new_data.indices[:, 0].argsort()
33
- bwd = torch.zeros_like(fwd).scatter_(0, fwd, torch.arange(fwd.shape[0], device=fwd.device))
34
- sorted_feats = new_data.features[fwd]
35
- sorted_coords = new_data.indices[fwd]
36
- unsorted_data = new_data
37
- new_data = spconv.SparseConvTensor(sorted_feats, sorted_coords, unsorted_data.spatial_shape, unsorted_data.batch_size) # type: ignore
38
-
39
- out = SparseTensor(
40
- new_data, shape=torch.Size(new_shape), layout=new_layout,
41
- scale=tuple([s * stride for s, stride in zip(x._scale, self.stride)]),
42
- spatial_cache=x._spatial_cache,
43
- )
44
-
45
- if spatial_changed and (x.shape[0] != 1):
46
- out.register_spatial_cache(f'conv_{self.stride}_unsorted_data', unsorted_data)
47
- out.register_spatial_cache(f'conv_{self.stride}_sort_bwd', bwd)
48
-
49
- return out
50
-
51
-
52
- class SparseInverseConv3d(nn.Module):
53
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
54
- super(SparseInverseConv3d, self).__init__()
55
- if 'spconv' not in globals():
56
- import spconv.pytorch as spconv
57
- self.conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, bias=bias, indice_key=indice_key)
58
- self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
59
-
60
- def forward(self, x: SparseTensor) -> SparseTensor:
61
- spatial_changed = any(s != 1 for s in self.stride)
62
- if spatial_changed:
63
- # recover the original spconv order
64
- data = x.get_spatial_cache(f'conv_{self.stride}_unsorted_data')
65
- bwd = x.get_spatial_cache(f'conv_{self.stride}_sort_bwd')
66
- data = data.replace_feature(x.feats[bwd])
67
- if DEBUG:
68
- assert torch.equal(data.indices, x.coords[bwd]), 'Recover the original order failed'
69
- else:
70
- data = x.data
71
-
72
- new_data = self.conv(data)
73
- new_shape = [x.shape[0], self.conv.out_channels]
74
- new_layout = None if spatial_changed else x.layout
75
- out = SparseTensor(
76
- new_data, shape=torch.Size(new_shape), layout=new_layout,
77
- scale=tuple([s // stride for s, stride in zip(x._scale, self.stride)]),
78
- spatial_cache=x._spatial_cache,
79
- )
80
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/conv/conv_torchsparse.py DELETED
@@ -1,38 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from .. import SparseTensor
4
-
5
-
6
- class SparseConv3d(nn.Module):
7
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
8
- super(SparseConv3d, self).__init__()
9
- if 'torchsparse' not in globals():
10
- import torchsparse
11
- self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias)
12
-
13
- def forward(self, x: SparseTensor) -> SparseTensor:
14
- out = self.conv(x.data)
15
- new_shape = [x.shape[0], self.conv.out_channels]
16
- out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
17
- out._spatial_cache = x._spatial_cache
18
- out._scale = tuple([s * stride for s, stride in zip(x._scale, self.conv.stride)])
19
- return out
20
-
21
-
22
- class SparseInverseConv3d(nn.Module):
23
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
24
- super(SparseInverseConv3d, self).__init__()
25
- if 'torchsparse' not in globals():
26
- import torchsparse
27
- self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias, transposed=True)
28
-
29
- def forward(self, x: SparseTensor) -> SparseTensor:
30
- out = self.conv(x.data)
31
- new_shape = [x.shape[0], self.conv.out_channels]
32
- out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
33
- out._spatial_cache = x._spatial_cache
34
- out._scale = tuple([s // stride for s, stride in zip(x._scale, self.conv.stride)])
35
- return out
36
-
37
-
38
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/linear.py DELETED
@@ -1,15 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from . import SparseTensor
4
-
5
- __all__ = [
6
- 'SparseLinear'
7
- ]
8
-
9
-
10
- class SparseLinear(nn.Linear):
11
- def __init__(self, in_features, out_features, bias=True):
12
- super(SparseLinear, self).__init__(in_features, out_features, bias)
13
-
14
- def forward(self, input: SparseTensor) -> SparseTensor:
15
- return input.replace(super().forward(input.feats))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/nonlinearity.py DELETED
@@ -1,35 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from . import SparseTensor
4
-
5
- __all__ = [
6
- 'SparseReLU',
7
- 'SparseSiLU',
8
- 'SparseGELU',
9
- 'SparseActivation'
10
- ]
11
-
12
-
13
- class SparseReLU(nn.ReLU):
14
- def forward(self, input: SparseTensor) -> SparseTensor:
15
- return input.replace(super().forward(input.feats))
16
-
17
-
18
- class SparseSiLU(nn.SiLU):
19
- def forward(self, input: SparseTensor) -> SparseTensor:
20
- return input.replace(super().forward(input.feats))
21
-
22
-
23
- class SparseGELU(nn.GELU):
24
- def forward(self, input: SparseTensor) -> SparseTensor:
25
- return input.replace(super().forward(input.feats))
26
-
27
-
28
- class SparseActivation(nn.Module):
29
- def __init__(self, activation: nn.Module):
30
- super().__init__()
31
- self.activation = activation
32
-
33
- def forward(self, input: SparseTensor) -> SparseTensor:
34
- return input.replace(self.activation(input.feats))
35
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/norm.py DELETED
@@ -1,58 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from . import SparseTensor
4
- from . import DEBUG
5
-
6
- __all__ = [
7
- 'SparseGroupNorm',
8
- 'SparseLayerNorm',
9
- 'SparseGroupNorm32',
10
- 'SparseLayerNorm32',
11
- ]
12
-
13
-
14
- class SparseGroupNorm(nn.GroupNorm):
15
- def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
16
- super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
17
-
18
- def forward(self, input: SparseTensor) -> SparseTensor:
19
- nfeats = torch.zeros_like(input.feats)
20
- for k in range(input.shape[0]):
21
- if DEBUG:
22
- assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch"
23
- bfeats = input.feats[input.layout[k]]
24
- bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
25
- bfeats = super().forward(bfeats)
26
- bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
27
- nfeats[input.layout[k]] = bfeats
28
- return input.replace(nfeats)
29
-
30
-
31
- class SparseLayerNorm(nn.LayerNorm):
32
- def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
33
- super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
34
-
35
- def forward(self, input: SparseTensor) -> SparseTensor:
36
- nfeats = torch.zeros_like(input.feats)
37
- for k in range(input.shape[0]):
38
- bfeats = input.feats[input.layout[k]]
39
- bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
40
- bfeats = super().forward(bfeats)
41
- bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
42
- nfeats[input.layout[k]] = bfeats
43
- return input.replace(nfeats)
44
-
45
-
46
- class SparseGroupNorm32(SparseGroupNorm):
47
- """
48
- A GroupNorm layer that converts to float32 before the forward pass.
49
- """
50
- def forward(self, x: SparseTensor) -> SparseTensor:
51
- return super().forward(x.float()).type(x.dtype)
52
-
53
- class SparseLayerNorm32(SparseLayerNorm):
54
- """
55
- A LayerNorm layer that converts to float32 before the forward pass.
56
- """
57
- def forward(self, x: SparseTensor) -> SparseTensor:
58
- return super().forward(x.float()).type(x.dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/spatial.py DELETED
@@ -1,110 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from . import SparseTensor
5
-
6
- __all__ = [
7
- 'SparseDownsample',
8
- 'SparseUpsample',
9
- 'SparseSubdivide'
10
- ]
11
-
12
-
13
- class SparseDownsample(nn.Module):
14
- """
15
- Downsample a sparse tensor by a factor of `factor`.
16
- Implemented as average pooling.
17
- """
18
- def __init__(self, factor: Union[int, Tuple[int, ...], List[int]]):
19
- super(SparseDownsample, self).__init__()
20
- self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
21
-
22
- def forward(self, input: SparseTensor) -> SparseTensor:
23
- DIM = input.coords.shape[-1] - 1
24
- factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
25
- assert DIM == len(factor), 'Input coordinates must have the same dimension as the downsample factor.'
26
-
27
- coord = list(input.coords.unbind(dim=-1))
28
- for i, f in enumerate(factor):
29
- coord[i+1] = coord[i+1] // f
30
-
31
- MAX = [coord[i+1].max().item() + 1 for i in range(DIM)]
32
- OFFSET = torch.cumprod(torch.tensor(MAX[::-1]), 0).tolist()[::-1] + [1]
33
- code = sum([c * o for c, o in zip(coord, OFFSET)])
34
- code, idx = code.unique(return_inverse=True)
35
-
36
- new_feats = torch.scatter_reduce(
37
- torch.zeros(code.shape[0], input.feats.shape[1], device=input.feats.device, dtype=input.feats.dtype),
38
- dim=0,
39
- index=idx.unsqueeze(1).expand(-1, input.feats.shape[1]),
40
- src=input.feats,
41
- reduce='mean'
42
- )
43
- new_coords = torch.stack(
44
- [code // OFFSET[0]] +
45
- [(code // OFFSET[i+1]) % MAX[i] for i in range(DIM)],
46
- dim=-1
47
- )
48
- out = SparseTensor(new_feats, new_coords, input.shape,)
49
- out._scale = tuple([s // f for s, f in zip(input._scale, factor)])
50
- out._spatial_cache = input._spatial_cache
51
-
52
- out.register_spatial_cache(f'upsample_{factor}_coords', input.coords)
53
- out.register_spatial_cache(f'upsample_{factor}_layout', input.layout)
54
- out.register_spatial_cache(f'upsample_{factor}_idx', idx)
55
-
56
- return out
57
-
58
-
59
- class SparseUpsample(nn.Module):
60
- """
61
- Upsample a sparse tensor by a factor of `factor`.
62
- Implemented as nearest neighbor interpolation.
63
- """
64
- def __init__(self, factor: Union[int, Tuple[int, int, int], List[int]]):
65
- super(SparseUpsample, self).__init__()
66
- self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
67
-
68
- def forward(self, input: SparseTensor) -> SparseTensor:
69
- DIM = input.coords.shape[-1] - 1
70
- factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
71
- assert DIM == len(factor), 'Input coordinates must have the same dimension as the upsample factor.'
72
-
73
- new_coords = input.get_spatial_cache(f'upsample_{factor}_coords')
74
- new_layout = input.get_spatial_cache(f'upsample_{factor}_layout')
75
- idx = input.get_spatial_cache(f'upsample_{factor}_idx')
76
- if any([x is None for x in [new_coords, new_layout, idx]]):
77
- raise ValueError('Upsample cache not found. SparseUpsample must be paired with SparseDownsample.')
78
- new_feats = input.feats[idx]
79
- out = SparseTensor(new_feats, new_coords, input.shape, new_layout)
80
- out._scale = tuple([s * f for s, f in zip(input._scale, factor)])
81
- out._spatial_cache = input._spatial_cache
82
- return out
83
-
84
- class SparseSubdivide(nn.Module):
85
- """
86
- Upsample a sparse tensor by a factor of `factor`.
87
- Implemented as nearest neighbor interpolation.
88
- """
89
- def __init__(self):
90
- super(SparseSubdivide, self).__init__()
91
-
92
- def forward(self, input: SparseTensor) -> SparseTensor:
93
- DIM = input.coords.shape[-1] - 1
94
- # upsample scale=2^DIM
95
- n_cube = torch.ones([2] * DIM, device=input.device, dtype=torch.int)
96
- n_coords = torch.nonzero(n_cube)
97
- n_coords = torch.cat([torch.zeros_like(n_coords[:, :1]), n_coords], dim=-1)
98
- factor = n_coords.shape[0]
99
- assert factor == 2 ** DIM
100
- # print(n_coords.shape)
101
- new_coords = input.coords.clone()
102
- new_coords[:, 1:] *= 2
103
- new_coords = new_coords.unsqueeze(1) + n_coords.unsqueeze(0).to(new_coords.dtype)
104
-
105
- new_feats = input.feats.unsqueeze(1).expand(input.feats.shape[0], factor, *input.feats.shape[1:])
106
- out = SparseTensor(new_feats.flatten(0, 1), new_coords.flatten(0, 1), input.shape)
107
- out._scale = input._scale * 2
108
- out._spatial_cache = input._spatial_cache
109
- return out
110
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/transformer/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .blocks import *
2
- from .modulated import *
 
 
 
trellis/modules/sparse/transformer/blocks.py DELETED
@@ -1,151 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from ..basic import SparseTensor
5
- from ..linear import SparseLinear
6
- from ..nonlinearity import SparseGELU
7
- from ..attention import SparseMultiHeadAttention, SerializeMode
8
- from ...norm import LayerNorm32
9
-
10
-
11
- class SparseFeedForwardNet(nn.Module):
12
- def __init__(self, channels: int, mlp_ratio: float = 4.0):
13
- super().__init__()
14
- self.mlp = nn.Sequential(
15
- SparseLinear(channels, int(channels * mlp_ratio)),
16
- SparseGELU(approximate="tanh"),
17
- SparseLinear(int(channels * mlp_ratio), channels),
18
- )
19
-
20
- def forward(self, x: SparseTensor) -> SparseTensor:
21
- return self.mlp(x)
22
-
23
-
24
- class SparseTransformerBlock(nn.Module):
25
- """
26
- Sparse Transformer block (MSA + FFN).
27
- """
28
- def __init__(
29
- self,
30
- channels: int,
31
- num_heads: int,
32
- mlp_ratio: float = 4.0,
33
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
34
- window_size: Optional[int] = None,
35
- shift_sequence: Optional[int] = None,
36
- shift_window: Optional[Tuple[int, int, int]] = None,
37
- serialize_mode: Optional[SerializeMode] = None,
38
- use_checkpoint: bool = False,
39
- use_rope: bool = False,
40
- qk_rms_norm: bool = False,
41
- qkv_bias: bool = True,
42
- ln_affine: bool = False,
43
- ):
44
- super().__init__()
45
- self.use_checkpoint = use_checkpoint
46
- self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
47
- self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
48
- self.attn = SparseMultiHeadAttention(
49
- channels,
50
- num_heads=num_heads,
51
- attn_mode=attn_mode,
52
- window_size=window_size,
53
- shift_sequence=shift_sequence,
54
- shift_window=shift_window,
55
- serialize_mode=serialize_mode,
56
- qkv_bias=qkv_bias,
57
- use_rope=use_rope,
58
- qk_rms_norm=qk_rms_norm,
59
- )
60
- self.mlp = SparseFeedForwardNet(
61
- channels,
62
- mlp_ratio=mlp_ratio,
63
- )
64
-
65
- def _forward(self, x: SparseTensor) -> SparseTensor:
66
- h = x.replace(self.norm1(x.feats))
67
- h = self.attn(h)
68
- x = x + h
69
- h = x.replace(self.norm2(x.feats))
70
- h = self.mlp(h)
71
- x = x + h
72
- return x
73
-
74
- def forward(self, x: SparseTensor) -> SparseTensor:
75
- if self.use_checkpoint:
76
- return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
77
- else:
78
- return self._forward(x)
79
-
80
-
81
- class SparseTransformerCrossBlock(nn.Module):
82
- """
83
- Sparse Transformer cross-attention block (MSA + MCA + FFN).
84
- """
85
- def __init__(
86
- self,
87
- channels: int,
88
- ctx_channels: int,
89
- num_heads: int,
90
- mlp_ratio: float = 4.0,
91
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
- window_size: Optional[int] = None,
93
- shift_sequence: Optional[int] = None,
94
- shift_window: Optional[Tuple[int, int, int]] = None,
95
- serialize_mode: Optional[SerializeMode] = None,
96
- use_checkpoint: bool = False,
97
- use_rope: bool = False,
98
- qk_rms_norm: bool = False,
99
- qk_rms_norm_cross: bool = False,
100
- qkv_bias: bool = True,
101
- ln_affine: bool = False,
102
- ):
103
- super().__init__()
104
- self.use_checkpoint = use_checkpoint
105
- self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
106
- self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
107
- self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
108
- self.self_attn = SparseMultiHeadAttention(
109
- channels,
110
- num_heads=num_heads,
111
- type="self",
112
- attn_mode=attn_mode,
113
- window_size=window_size,
114
- shift_sequence=shift_sequence,
115
- shift_window=shift_window,
116
- serialize_mode=serialize_mode,
117
- qkv_bias=qkv_bias,
118
- use_rope=use_rope,
119
- qk_rms_norm=qk_rms_norm,
120
- )
121
- self.cross_attn = SparseMultiHeadAttention(
122
- channels,
123
- ctx_channels=ctx_channels,
124
- num_heads=num_heads,
125
- type="cross",
126
- attn_mode="full",
127
- qkv_bias=qkv_bias,
128
- qk_rms_norm=qk_rms_norm_cross,
129
- )
130
- self.mlp = SparseFeedForwardNet(
131
- channels,
132
- mlp_ratio=mlp_ratio,
133
- )
134
-
135
- def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor):
136
- h = x.replace(self.norm1(x.feats))
137
- h = self.self_attn(h)
138
- x = x + h
139
- h = x.replace(self.norm2(x.feats))
140
- h = self.cross_attn(h, context)
141
- x = x + h
142
- h = x.replace(self.norm3(x.feats))
143
- h = self.mlp(h)
144
- x = x + h
145
- return x
146
-
147
- def forward(self, x: SparseTensor, context: torch.Tensor):
148
- if self.use_checkpoint:
149
- return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
150
- else:
151
- return self._forward(x, context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/sparse/transformer/modulated.py DELETED
@@ -1,166 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from ..basic import SparseTensor
5
- from ..attention import SparseMultiHeadAttention, SerializeMode
6
- from ...norm import LayerNorm32
7
- from .blocks import SparseFeedForwardNet
8
-
9
-
10
- class ModulatedSparseTransformerBlock(nn.Module):
11
- """
12
- Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning.
13
- """
14
- def __init__(
15
- self,
16
- channels: int,
17
- num_heads: int,
18
- mlp_ratio: float = 4.0,
19
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
20
- window_size: Optional[int] = None,
21
- shift_sequence: Optional[int] = None,
22
- shift_window: Optional[Tuple[int, int, int]] = None,
23
- serialize_mode: Optional[SerializeMode] = None,
24
- use_checkpoint: bool = False,
25
- use_rope: bool = False,
26
- qk_rms_norm: bool = False,
27
- qkv_bias: bool = True,
28
- share_mod: bool = False,
29
- ):
30
- super().__init__()
31
- self.use_checkpoint = use_checkpoint
32
- self.share_mod = share_mod
33
- self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
34
- self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
35
- self.attn = SparseMultiHeadAttention(
36
- channels,
37
- num_heads=num_heads,
38
- attn_mode=attn_mode,
39
- window_size=window_size,
40
- shift_sequence=shift_sequence,
41
- shift_window=shift_window,
42
- serialize_mode=serialize_mode,
43
- qkv_bias=qkv_bias,
44
- use_rope=use_rope,
45
- qk_rms_norm=qk_rms_norm,
46
- )
47
- self.mlp = SparseFeedForwardNet(
48
- channels,
49
- mlp_ratio=mlp_ratio,
50
- )
51
- if not share_mod:
52
- self.adaLN_modulation = nn.Sequential(
53
- nn.SiLU(),
54
- nn.Linear(channels, 6 * channels, bias=True)
55
- )
56
-
57
- def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
58
- if self.share_mod:
59
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
60
- else:
61
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
62
- h = x.replace(self.norm1(x.feats))
63
- h = h * (1 + scale_msa) + shift_msa
64
- h = self.attn(h)
65
- h = h * gate_msa
66
- x = x + h
67
- h = x.replace(self.norm2(x.feats))
68
- h = h * (1 + scale_mlp) + shift_mlp
69
- h = self.mlp(h)
70
- h = h * gate_mlp
71
- x = x + h
72
- return x
73
-
74
- def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
75
- if self.use_checkpoint:
76
- return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
77
- else:
78
- return self._forward(x, mod)
79
-
80
-
81
- class ModulatedSparseTransformerCrossBlock(nn.Module):
82
- """
83
- Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
84
- """
85
- def __init__(
86
- self,
87
- channels: int,
88
- ctx_channels: int,
89
- num_heads: int,
90
- mlp_ratio: float = 4.0,
91
- attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
- window_size: Optional[int] = None,
93
- shift_sequence: Optional[int] = None,
94
- shift_window: Optional[Tuple[int, int, int]] = None,
95
- serialize_mode: Optional[SerializeMode] = None,
96
- use_checkpoint: bool = False,
97
- use_rope: bool = False,
98
- qk_rms_norm: bool = False,
99
- qk_rms_norm_cross: bool = False,
100
- qkv_bias: bool = True,
101
- share_mod: bool = False,
102
-
103
- ):
104
- super().__init__()
105
- self.use_checkpoint = use_checkpoint
106
- self.share_mod = share_mod
107
- self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
108
- self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
109
- self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
110
- self.self_attn = SparseMultiHeadAttention(
111
- channels,
112
- num_heads=num_heads,
113
- type="self",
114
- attn_mode=attn_mode,
115
- window_size=window_size,
116
- shift_sequence=shift_sequence,
117
- shift_window=shift_window,
118
- serialize_mode=serialize_mode,
119
- qkv_bias=qkv_bias,
120
- use_rope=use_rope,
121
- qk_rms_norm=qk_rms_norm,
122
- )
123
- self.cross_attn = SparseMultiHeadAttention(
124
- channels,
125
- ctx_channels=ctx_channels,
126
- num_heads=num_heads,
127
- type="cross",
128
- attn_mode="full",
129
- qkv_bias=qkv_bias,
130
- qk_rms_norm=qk_rms_norm_cross,
131
- )
132
- self.mlp = SparseFeedForwardNet(
133
- channels,
134
- mlp_ratio=mlp_ratio,
135
- )
136
- if not share_mod:
137
- self.adaLN_modulation = nn.Sequential(
138
- nn.SiLU(),
139
- nn.Linear(channels, 6 * channels, bias=True)
140
- )
141
-
142
- def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
143
- if self.share_mod:
144
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
145
- else:
146
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
147
- h = x.replace(self.norm1(x.feats))
148
- h = h * (1 + scale_msa) + shift_msa
149
- h = self.self_attn(h)
150
- h = h * gate_msa
151
- x = x + h
152
- h = x.replace(self.norm2(x.feats))
153
- h = self.cross_attn(h, context)
154
- x = x + h
155
- h = x.replace(self.norm3(x.feats))
156
- h = h * (1 + scale_mlp) + shift_mlp
157
- h = self.mlp(h)
158
- h = h * gate_mlp
159
- x = x + h
160
- return x
161
-
162
- def forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
163
- if self.use_checkpoint:
164
- return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
165
- else:
166
- return self._forward(x, mod, context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/spatial.py DELETED
@@ -1,48 +0,0 @@
1
- import torch
2
-
3
-
4
- def pixel_shuffle_3d(x: torch.Tensor, scale_factor: int) -> torch.Tensor:
5
- """
6
- 3D pixel shuffle.
7
- """
8
- B, C, H, W, D = x.shape
9
- C_ = C // scale_factor**3
10
- x = x.reshape(B, C_, scale_factor, scale_factor, scale_factor, H, W, D)
11
- x = x.permute(0, 1, 5, 2, 6, 3, 7, 4)
12
- x = x.reshape(B, C_, H*scale_factor, W*scale_factor, D*scale_factor)
13
- return x
14
-
15
-
16
- def patchify(x: torch.Tensor, patch_size: int):
17
- """
18
- Patchify a tensor.
19
-
20
- Args:
21
- x (torch.Tensor): (N, C, *spatial) tensor
22
- patch_size (int): Patch size
23
- """
24
- DIM = x.dim() - 2
25
- for d in range(2, DIM + 2):
26
- assert x.shape[d] % patch_size == 0, f"Dimension {d} of input tensor must be divisible by patch size, got {x.shape[d]} and {patch_size}"
27
-
28
- x = x.reshape(*x.shape[:2], *sum([[x.shape[d] // patch_size, patch_size] for d in range(2, DIM + 2)], []))
29
- x = x.permute(0, 1, *([2 * i + 3 for i in range(DIM)] + [2 * i + 2 for i in range(DIM)]))
30
- x = x.reshape(x.shape[0], x.shape[1] * (patch_size ** DIM), *(x.shape[-DIM:]))
31
- return x
32
-
33
-
34
- def unpatchify(x: torch.Tensor, patch_size: int):
35
- """
36
- Unpatchify a tensor.
37
-
38
- Args:
39
- x (torch.Tensor): (N, C, *spatial) tensor
40
- patch_size (int): Patch size
41
- """
42
- DIM = x.dim() - 2
43
- assert x.shape[1] % (patch_size ** DIM) == 0, f"Second dimension of input tensor must be divisible by patch size to unpatchify, got {x.shape[1]} and {patch_size ** DIM}"
44
-
45
- x = x.reshape(x.shape[0], x.shape[1] // (patch_size ** DIM), *([patch_size] * DIM), *(x.shape[-DIM:]))
46
- x = x.permute(0, 1, *(sum([[2 + DIM + i, 2 + i] for i in range(DIM)], [])))
47
- x = x.reshape(x.shape[0], x.shape[1], *[x.shape[2 + 2 * i] * patch_size for i in range(DIM)])
48
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/transformer/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .blocks import *
2
- from .modulated import *
 
 
 
trellis/modules/transformer/blocks.py DELETED
@@ -1,182 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from ..attention import MultiHeadAttention
5
- from ..norm import LayerNorm32
6
-
7
-
8
- class AbsolutePositionEmbedder(nn.Module):
9
- """
10
- Embeds spatial positions into vector representations.
11
- """
12
- def __init__(self, channels: int, in_channels: int = 3):
13
- super().__init__()
14
- self.channels = channels
15
- self.in_channels = in_channels
16
- self.freq_dim = channels // in_channels // 2
17
- self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
18
- self.freqs = 1.0 / (10000 ** self.freqs)
19
-
20
- def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor:
21
- """
22
- Create sinusoidal position embeddings.
23
-
24
- Args:
25
- x: a 1-D Tensor of N indices
26
-
27
- Returns:
28
- an (N, D) Tensor of positional embeddings.
29
- """
30
- self.freqs = self.freqs.to(x.device)
31
- out = torch.outer(x, self.freqs)
32
- out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1)
33
- return out
34
-
35
- def forward(self, x: torch.Tensor) -> torch.Tensor:
36
- """
37
- Args:
38
- x (torch.Tensor): (N, D) tensor of spatial positions
39
- """
40
- N, D = x.shape
41
- assert D == self.in_channels, "Input dimension must match number of input channels"
42
- embed = self._sin_cos_embedding(x.reshape(-1))
43
- embed = embed.reshape(N, -1)
44
- if embed.shape[1] < self.channels:
45
- embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1)
46
- return embed
47
-
48
-
49
- class FeedForwardNet(nn.Module):
50
- def __init__(self, channels: int, mlp_ratio: float = 4.0):
51
- super().__init__()
52
- self.mlp = nn.Sequential(
53
- nn.Linear(channels, int(channels * mlp_ratio)),
54
- nn.GELU(approximate="tanh"),
55
- nn.Linear(int(channels * mlp_ratio), channels),
56
- )
57
-
58
- def forward(self, x: torch.Tensor) -> torch.Tensor:
59
- return self.mlp(x)
60
-
61
-
62
- class TransformerBlock(nn.Module):
63
- """
64
- Transformer block (MSA + FFN).
65
- """
66
- def __init__(
67
- self,
68
- channels: int,
69
- num_heads: int,
70
- mlp_ratio: float = 4.0,
71
- attn_mode: Literal["full", "windowed"] = "full",
72
- window_size: Optional[int] = None,
73
- shift_window: Optional[int] = None,
74
- use_checkpoint: bool = False,
75
- use_rope: bool = False,
76
- qk_rms_norm: bool = False,
77
- qkv_bias: bool = True,
78
- ln_affine: bool = False,
79
- ):
80
- super().__init__()
81
- self.use_checkpoint = use_checkpoint
82
- self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
83
- self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
84
- self.attn = MultiHeadAttention(
85
- channels,
86
- num_heads=num_heads,
87
- attn_mode=attn_mode,
88
- window_size=window_size,
89
- shift_window=shift_window,
90
- qkv_bias=qkv_bias,
91
- use_rope=use_rope,
92
- qk_rms_norm=qk_rms_norm,
93
- )
94
- self.mlp = FeedForwardNet(
95
- channels,
96
- mlp_ratio=mlp_ratio,
97
- )
98
-
99
- def _forward(self, x: torch.Tensor) -> torch.Tensor:
100
- h = self.norm1(x)
101
- h = self.attn(h)
102
- x = x + h
103
- h = self.norm2(x)
104
- h = self.mlp(h)
105
- x = x + h
106
- return x
107
-
108
- def forward(self, x: torch.Tensor) -> torch.Tensor:
109
- if self.use_checkpoint:
110
- return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
111
- else:
112
- return self._forward(x)
113
-
114
-
115
- class TransformerCrossBlock(nn.Module):
116
- """
117
- Transformer cross-attention block (MSA + MCA + FFN).
118
- """
119
- def __init__(
120
- self,
121
- channels: int,
122
- ctx_channels: int,
123
- num_heads: int,
124
- mlp_ratio: float = 4.0,
125
- attn_mode: Literal["full", "windowed"] = "full",
126
- window_size: Optional[int] = None,
127
- shift_window: Optional[Tuple[int, int, int]] = None,
128
- use_checkpoint: bool = False,
129
- use_rope: bool = False,
130
- qk_rms_norm: bool = False,
131
- qk_rms_norm_cross: bool = False,
132
- qkv_bias: bool = True,
133
- ln_affine: bool = False,
134
- ):
135
- super().__init__()
136
- self.use_checkpoint = use_checkpoint
137
- self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
138
- self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
139
- self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
140
- self.self_attn = MultiHeadAttention(
141
- channels,
142
- num_heads=num_heads,
143
- type="self",
144
- attn_mode=attn_mode,
145
- window_size=window_size,
146
- shift_window=shift_window,
147
- qkv_bias=qkv_bias,
148
- use_rope=use_rope,
149
- qk_rms_norm=qk_rms_norm,
150
- )
151
- self.cross_attn = MultiHeadAttention(
152
- channels,
153
- ctx_channels=ctx_channels,
154
- num_heads=num_heads,
155
- type="cross",
156
- attn_mode="full",
157
- qkv_bias=qkv_bias,
158
- qk_rms_norm=qk_rms_norm_cross,
159
- )
160
- self.mlp = FeedForwardNet(
161
- channels,
162
- mlp_ratio=mlp_ratio,
163
- )
164
-
165
- def _forward(self, x: torch.Tensor, context: torch.Tensor):
166
- h = self.norm1(x)
167
- h = self.self_attn(h)
168
- x = x + h
169
- h = self.norm2(x)
170
- h = self.cross_attn(h, context)
171
- x = x + h
172
- h = self.norm3(x)
173
- h = self.mlp(h)
174
- x = x + h
175
- return x
176
-
177
- def forward(self, x: torch.Tensor, context: torch.Tensor):
178
- if self.use_checkpoint:
179
- return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
180
- else:
181
- return self._forward(x, context)
182
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/transformer/modulated.py DELETED
@@ -1,157 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from ..attention import MultiHeadAttention
5
- from ..norm import LayerNorm32
6
- from .blocks import FeedForwardNet
7
-
8
-
9
- class ModulatedTransformerBlock(nn.Module):
10
- """
11
- Transformer block (MSA + FFN) with adaptive layer norm conditioning.
12
- """
13
- def __init__(
14
- self,
15
- channels: int,
16
- num_heads: int,
17
- mlp_ratio: float = 4.0,
18
- attn_mode: Literal["full", "windowed"] = "full",
19
- window_size: Optional[int] = None,
20
- shift_window: Optional[Tuple[int, int, int]] = None,
21
- use_checkpoint: bool = False,
22
- use_rope: bool = False,
23
- qk_rms_norm: bool = False,
24
- qkv_bias: bool = True,
25
- share_mod: bool = False,
26
- ):
27
- super().__init__()
28
- self.use_checkpoint = use_checkpoint
29
- self.share_mod = share_mod
30
- self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
31
- self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
32
- self.attn = MultiHeadAttention(
33
- channels,
34
- num_heads=num_heads,
35
- attn_mode=attn_mode,
36
- window_size=window_size,
37
- shift_window=shift_window,
38
- qkv_bias=qkv_bias,
39
- use_rope=use_rope,
40
- qk_rms_norm=qk_rms_norm,
41
- )
42
- self.mlp = FeedForwardNet(
43
- channels,
44
- mlp_ratio=mlp_ratio,
45
- )
46
- if not share_mod:
47
- self.adaLN_modulation = nn.Sequential(
48
- nn.SiLU(),
49
- nn.Linear(channels, 6 * channels, bias=True)
50
- )
51
-
52
- def _forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
53
- if self.share_mod:
54
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
55
- else:
56
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
57
- h = self.norm1(x)
58
- h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
59
- h = self.attn(h)
60
- h = h * gate_msa.unsqueeze(1)
61
- x = x + h
62
- h = self.norm2(x)
63
- h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
64
- h = self.mlp(h)
65
- h = h * gate_mlp.unsqueeze(1)
66
- x = x + h
67
- return x
68
-
69
- def forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
70
- if self.use_checkpoint:
71
- return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
72
- else:
73
- return self._forward(x, mod)
74
-
75
-
76
- class ModulatedTransformerCrossBlock(nn.Module):
77
- """
78
- Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
79
- """
80
- def __init__(
81
- self,
82
- channels: int,
83
- ctx_channels: int,
84
- num_heads: int,
85
- mlp_ratio: float = 4.0,
86
- attn_mode: Literal["full", "windowed"] = "full",
87
- window_size: Optional[int] = None,
88
- shift_window: Optional[Tuple[int, int, int]] = None,
89
- use_checkpoint: bool = False,
90
- use_rope: bool = False,
91
- qk_rms_norm: bool = False,
92
- qk_rms_norm_cross: bool = False,
93
- qkv_bias: bool = True,
94
- share_mod: bool = False,
95
- ):
96
- super().__init__()
97
- self.use_checkpoint = use_checkpoint
98
- self.share_mod = share_mod
99
- self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
100
- self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
101
- self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
102
- self.self_attn = MultiHeadAttention(
103
- channels,
104
- num_heads=num_heads,
105
- type="self",
106
- attn_mode=attn_mode,
107
- window_size=window_size,
108
- shift_window=shift_window,
109
- qkv_bias=qkv_bias,
110
- use_rope=use_rope,
111
- qk_rms_norm=qk_rms_norm,
112
- )
113
- self.cross_attn = MultiHeadAttention(
114
- channels,
115
- ctx_channels=ctx_channels,
116
- num_heads=num_heads,
117
- type="cross",
118
- attn_mode="full",
119
- qkv_bias=qkv_bias,
120
- qk_rms_norm=qk_rms_norm_cross,
121
- )
122
- self.mlp = FeedForwardNet(
123
- channels,
124
- mlp_ratio=mlp_ratio,
125
- )
126
- if not share_mod:
127
- self.adaLN_modulation = nn.Sequential(
128
- nn.SiLU(),
129
- nn.Linear(channels, 6 * channels, bias=True)
130
- )
131
-
132
- def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
133
- if self.share_mod:
134
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
135
- else:
136
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
137
- h = self.norm1(x)
138
- h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
139
- h = self.self_attn(h)
140
- h = h * gate_msa.unsqueeze(1)
141
- x = x + h
142
- h = self.norm2(x)
143
- h = self.cross_attn(h, context)
144
- x = x + h
145
- h = self.norm3(x)
146
- h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
147
- h = self.mlp(h)
148
- h = h * gate_mlp.unsqueeze(1)
149
- x = x + h
150
- return x
151
-
152
- def forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
153
- if self.use_checkpoint:
154
- return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
155
- else:
156
- return self._forward(x, mod, context)
157
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/modules/utils.py DELETED
@@ -1,54 +0,0 @@
1
- import torch.nn as nn
2
- from ..modules import sparse as sp
3
-
4
- FP16_MODULES = (
5
- nn.Conv1d,
6
- nn.Conv2d,
7
- nn.Conv3d,
8
- nn.ConvTranspose1d,
9
- nn.ConvTranspose2d,
10
- nn.ConvTranspose3d,
11
- nn.Linear,
12
- sp.SparseConv3d,
13
- sp.SparseInverseConv3d,
14
- sp.SparseLinear,
15
- )
16
-
17
- def convert_module_to_f16(l):
18
- """
19
- Convert primitive modules to float16.
20
- """
21
- if isinstance(l, FP16_MODULES):
22
- for p in l.parameters():
23
- p.data = p.data.half()
24
-
25
-
26
- def convert_module_to_f32(l):
27
- """
28
- Convert primitive modules to float32, undoing convert_module_to_f16().
29
- """
30
- if isinstance(l, FP16_MODULES):
31
- for p in l.parameters():
32
- p.data = p.data.float()
33
-
34
-
35
- def zero_module(module):
36
- """
37
- Zero out the parameters of a module and return it.
38
- """
39
- for p in module.parameters():
40
- p.detach().zero_()
41
- return module
42
-
43
-
44
- def scale_module(module, scale):
45
- """
46
- Scale the parameters of a module and return it.
47
- """
48
- for p in module.parameters():
49
- p.detach().mul_(scale)
50
- return module
51
-
52
-
53
- def modulate(x, shift, scale):
54
- return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/__init__.py DELETED
@@ -1,24 +0,0 @@
1
- from . import samplers
2
- from .trellis_image_to_3d import TrellisImageTo3DPipeline
3
-
4
-
5
- def from_pretrained(path: str):
6
- """
7
- Load a pipeline from a model folder or a Hugging Face model hub.
8
-
9
- Args:
10
- path: The path to the model. Can be either local path or a Hugging Face model name.
11
- """
12
- import os
13
- import json
14
- is_local = os.path.exists(f"{path}/pipeline.json")
15
-
16
- if is_local:
17
- config_file = f"{path}/pipeline.json"
18
- else:
19
- from huggingface_hub import hf_hub_download
20
- config_file = hf_hub_download(path, "pipeline.json")
21
-
22
- with open(config_file, 'r') as f:
23
- config = json.load(f)
24
- return globals()[config['name']].from_pretrained(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/base.py DELETED
@@ -1,66 +0,0 @@
1
- from typing import *
2
- import torch
3
- import torch.nn as nn
4
- from .. import models
5
-
6
-
7
- class Pipeline:
8
- """
9
- A base class for pipelines.
10
- """
11
- def __init__(
12
- self,
13
- models: dict[str, nn.Module] = None,
14
- ):
15
- if models is None:
16
- return
17
- self.models = models
18
- for model in self.models.values():
19
- model.eval()
20
-
21
- @staticmethod
22
- def from_pretrained(path: str) -> "Pipeline":
23
- """
24
- Load a pretrained model.
25
- """
26
- import os
27
- import json
28
- is_local = os.path.exists(f"{path}/pipeline.json")
29
-
30
- if is_local:
31
- config_file = f"{path}/pipeline.json"
32
- else:
33
- from huggingface_hub import hf_hub_download
34
- config_file = hf_hub_download(path, "pipeline.json")
35
-
36
- with open(config_file, 'r') as f:
37
- args = json.load(f)['args']
38
-
39
- _models = {
40
- k: models.from_pretrained(f"{path}/{v}")
41
- for k, v in args['models'].items()
42
- }
43
-
44
- new_pipeline = Pipeline(_models)
45
- new_pipeline._pretrained_args = args
46
- return new_pipeline
47
-
48
- @property
49
- def device(self) -> torch.device:
50
- for model in self.models.values():
51
- if hasattr(model, 'device'):
52
- return model.device
53
- for model in self.models.values():
54
- if hasattr(model, 'parameters'):
55
- return next(model.parameters()).device
56
- raise RuntimeError("No device found.")
57
-
58
- def to(self, device: torch.device) -> None:
59
- for model in self.models.values():
60
- model.to(device)
61
-
62
- def cuda(self) -> None:
63
- self.to(torch.device("cuda"))
64
-
65
- def cpu(self) -> None:
66
- self.to(torch.device("cpu"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/samplers/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .base import Sampler
2
- from .flow_euler import FlowEulerSampler, FlowEulerCfgSampler, FlowEulerGuidanceIntervalSampler
 
 
 
trellis/pipelines/samplers/base.py DELETED
@@ -1,20 +0,0 @@
1
- from typing import *
2
- from abc import ABC, abstractmethod
3
-
4
-
5
- class Sampler(ABC):
6
- """
7
- A base class for samplers.
8
- """
9
-
10
- @abstractmethod
11
- def sample(
12
- self,
13
- model,
14
- **kwargs
15
- ):
16
- """
17
- Sample from a model.
18
- """
19
- pass
20
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/samplers/classifier_free_guidance_mixin.py DELETED
@@ -1,12 +0,0 @@
1
- from typing import *
2
-
3
-
4
- class ClassifierFreeGuidanceSamplerMixin:
5
- """
6
- A mixin class for samplers that apply classifier-free guidance.
7
- """
8
-
9
- def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, **kwargs):
10
- pred = super()._inference_model(model, x_t, t, cond, **kwargs)
11
- neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
12
- return (1 + cfg_strength) * pred - cfg_strength * neg_pred
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/samplers/flow_euler.py DELETED
@@ -1,199 +0,0 @@
1
- from typing import *
2
- import torch
3
- import numpy as np
4
- from tqdm import tqdm
5
- from easydict import EasyDict as edict
6
- from .base import Sampler
7
- from .classifier_free_guidance_mixin import ClassifierFreeGuidanceSamplerMixin
8
- from .guidance_interval_mixin import GuidanceIntervalSamplerMixin
9
-
10
-
11
- class FlowEulerSampler(Sampler):
12
- """
13
- Generate samples from a flow-matching model using Euler sampling.
14
-
15
- Args:
16
- sigma_min: The minimum scale of noise in flow.
17
- """
18
- def __init__(
19
- self,
20
- sigma_min: float,
21
- ):
22
- self.sigma_min = sigma_min
23
-
24
- def _eps_to_xstart(self, x_t, t, eps):
25
- assert x_t.shape == eps.shape
26
- return (x_t - (self.sigma_min + (1 - self.sigma_min) * t) * eps) / (1 - t)
27
-
28
- def _xstart_to_eps(self, x_t, t, x_0):
29
- assert x_t.shape == x_0.shape
30
- return (x_t - (1 - t) * x_0) / (self.sigma_min + (1 - self.sigma_min) * t)
31
-
32
- def _v_to_xstart_eps(self, x_t, t, v):
33
- assert x_t.shape == v.shape
34
- eps = (1 - t) * v + x_t
35
- x_0 = (1 - self.sigma_min) * x_t - (self.sigma_min + (1 - self.sigma_min) * t) * v
36
- return x_0, eps
37
-
38
- def _inference_model(self, model, x_t, t, cond=None, **kwargs):
39
- t = torch.tensor([1000 * t] * x_t.shape[0], device=x_t.device, dtype=torch.float32)
40
- return model(x_t, t, cond, **kwargs)
41
-
42
- def _get_model_prediction(self, model, x_t, t, cond=None, **kwargs):
43
- pred_v = self._inference_model(model, x_t, t, cond, **kwargs)
44
- pred_x_0, pred_eps = self._v_to_xstart_eps(x_t=x_t, t=t, v=pred_v)
45
- return pred_x_0, pred_eps, pred_v
46
-
47
- @torch.no_grad()
48
- def sample_once(
49
- self,
50
- model,
51
- x_t,
52
- t: float,
53
- t_prev: float,
54
- cond: Optional[Any] = None,
55
- **kwargs
56
- ):
57
- """
58
- Sample x_{t-1} from the model using Euler method.
59
-
60
- Args:
61
- model: The model to sample from.
62
- x_t: The [N x C x ...] tensor of noisy inputs at time t.
63
- t: The current timestep.
64
- t_prev: The previous timestep.
65
- cond: conditional information.
66
- **kwargs: Additional arguments for model inference.
67
-
68
- Returns:
69
- a dict containing the following
70
- - 'pred_x_prev': x_{t-1}.
71
- - 'pred_x_0': a prediction of x_0.
72
- """
73
- pred_x_0, pred_eps, pred_v = self._get_model_prediction(model, x_t, t, cond, **kwargs)
74
- pred_x_prev = x_t - (t - t_prev) * pred_v
75
- return edict({"pred_x_prev": pred_x_prev, "pred_x_0": pred_x_0})
76
-
77
- @torch.no_grad()
78
- def sample(
79
- self,
80
- model,
81
- noise,
82
- cond: Optional[Any] = None,
83
- steps: int = 50,
84
- rescale_t: float = 1.0,
85
- verbose: bool = True,
86
- **kwargs
87
- ):
88
- """
89
- Generate samples from the model using Euler method.
90
-
91
- Args:
92
- model: The model to sample from.
93
- noise: The initial noise tensor.
94
- cond: conditional information.
95
- steps: The number of steps to sample.
96
- rescale_t: The rescale factor for t.
97
- verbose: If True, show a progress bar.
98
- **kwargs: Additional arguments for model_inference.
99
-
100
- Returns:
101
- a dict containing the following
102
- - 'samples': the model samples.
103
- - 'pred_x_t': a list of prediction of x_t.
104
- - 'pred_x_0': a list of prediction of x_0.
105
- """
106
- sample = noise
107
- t_seq = np.linspace(1, 0, steps + 1)
108
- t_seq = rescale_t * t_seq / (1 + (rescale_t - 1) * t_seq)
109
- t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(steps))
110
- ret = edict({"samples": None, "pred_x_t": [], "pred_x_0": []})
111
- for t, t_prev in tqdm(t_pairs, desc="Sampling", disable=not verbose):
112
- out = self.sample_once(model, sample, t, t_prev, cond, **kwargs)
113
- sample = out.pred_x_prev
114
- ret.pred_x_t.append(out.pred_x_prev)
115
- ret.pred_x_0.append(out.pred_x_0)
116
- ret.samples = sample
117
- return ret
118
-
119
-
120
- class FlowEulerCfgSampler(ClassifierFreeGuidanceSamplerMixin, FlowEulerSampler):
121
- """
122
- Generate samples from a flow-matching model using Euler sampling with classifier-free guidance.
123
- """
124
- @torch.no_grad()
125
- def sample(
126
- self,
127
- model,
128
- noise,
129
- cond,
130
- neg_cond,
131
- steps: int = 50,
132
- rescale_t: float = 1.0,
133
- cfg_strength: float = 3.0,
134
- verbose: bool = True,
135
- **kwargs
136
- ):
137
- """
138
- Generate samples from the model using Euler method.
139
-
140
- Args:
141
- model: The model to sample from.
142
- noise: The initial noise tensor.
143
- cond: conditional information.
144
- neg_cond: negative conditional information.
145
- steps: The number of steps to sample.
146
- rescale_t: The rescale factor for t.
147
- cfg_strength: The strength of classifier-free guidance.
148
- verbose: If True, show a progress bar.
149
- **kwargs: Additional arguments for model_inference.
150
-
151
- Returns:
152
- a dict containing the following
153
- - 'samples': the model samples.
154
- - 'pred_x_t': a list of prediction of x_t.
155
- - 'pred_x_0': a list of prediction of x_0.
156
- """
157
- return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, **kwargs)
158
-
159
-
160
- class FlowEulerGuidanceIntervalSampler(GuidanceIntervalSamplerMixin, FlowEulerSampler):
161
- """
162
- Generate samples from a flow-matching model using Euler sampling with classifier-free guidance and interval.
163
- """
164
- @torch.no_grad()
165
- def sample(
166
- self,
167
- model,
168
- noise,
169
- cond,
170
- neg_cond,
171
- steps: int = 50,
172
- rescale_t: float = 1.0,
173
- cfg_strength: float = 3.0,
174
- cfg_interval: Tuple[float, float] = (0.0, 1.0),
175
- verbose: bool = True,
176
- **kwargs
177
- ):
178
- """
179
- Generate samples from the model using Euler method.
180
-
181
- Args:
182
- model: The model to sample from.
183
- noise: The initial noise tensor.
184
- cond: conditional information.
185
- neg_cond: negative conditional information.
186
- steps: The number of steps to sample.
187
- rescale_t: The rescale factor for t.
188
- cfg_strength: The strength of classifier-free guidance.
189
- cfg_interval: The interval for classifier-free guidance.
190
- verbose: If True, show a progress bar.
191
- **kwargs: Additional arguments for model_inference.
192
-
193
- Returns:
194
- a dict containing the following
195
- - 'samples': the model samples.
196
- - 'pred_x_t': a list of prediction of x_t.
197
- - 'pred_x_0': a list of prediction of x_0.
198
- """
199
- return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, cfg_interval=cfg_interval, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/samplers/guidance_interval_mixin.py DELETED
@@ -1,15 +0,0 @@
1
- from typing import *
2
-
3
-
4
- class GuidanceIntervalSamplerMixin:
5
- """
6
- A mixin class for samplers that apply classifier-free guidance with interval.
7
- """
8
-
9
- def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
10
- if cfg_interval[0] <= t <= cfg_interval[1]:
11
- pred = super()._inference_model(model, x_t, t, cond, **kwargs)
12
- neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
13
- return (1 + cfg_strength) * pred - cfg_strength * neg_pred
14
- else:
15
- return super()._inference_model(model, x_t, t, cond, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/pipelines/trellis_image_to_3d.py DELETED
@@ -1,376 +0,0 @@
1
- from typing import *
2
- from contextlib import contextmanager
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
- import numpy as np
7
- from tqdm import tqdm
8
- from easydict import EasyDict as edict
9
- from torchvision import transforms
10
- from PIL import Image
11
- import rembg
12
- from .base import Pipeline
13
- from . import samplers
14
- from ..modules import sparse as sp
15
- from ..representations import Gaussian, Strivec, MeshExtractResult
16
-
17
-
18
- class TrellisImageTo3DPipeline(Pipeline):
19
- """
20
- Pipeline for inferring Trellis image-to-3D models.
21
-
22
- Args:
23
- models (dict[str, nn.Module]): The models to use in the pipeline.
24
- sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure.
25
- slat_sampler (samplers.Sampler): The sampler for the structured latent.
26
- slat_normalization (dict): The normalization parameters for the structured latent.
27
- image_cond_model (str): The name of the image conditioning model.
28
- """
29
- def __init__(
30
- self,
31
- models: dict[str, nn.Module] = None,
32
- sparse_structure_sampler: samplers.Sampler = None,
33
- slat_sampler: samplers.Sampler = None,
34
- slat_normalization: dict = None,
35
- image_cond_model: str = None,
36
- ):
37
- if models is None:
38
- return
39
- super().__init__(models)
40
- self.sparse_structure_sampler = sparse_structure_sampler
41
- self.slat_sampler = slat_sampler
42
- self.sparse_structure_sampler_params = {}
43
- self.slat_sampler_params = {}
44
- self.slat_normalization = slat_normalization
45
- self.rembg_session = None
46
- self._init_image_cond_model(image_cond_model)
47
-
48
- @staticmethod
49
- def from_pretrained(path: str) -> "TrellisImageTo3DPipeline":
50
- """
51
- Load a pretrained model.
52
-
53
- Args:
54
- path (str): The path to the model. Can be either local path or a Hugging Face repository.
55
- """
56
- pipeline = super(TrellisImageTo3DPipeline, TrellisImageTo3DPipeline).from_pretrained(path)
57
- new_pipeline = TrellisImageTo3DPipeline()
58
- new_pipeline.__dict__ = pipeline.__dict__
59
- args = pipeline._pretrained_args
60
-
61
- new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler']['args'])
62
- new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler']['params']
63
-
64
- new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler']['args'])
65
- new_pipeline.slat_sampler_params = args['slat_sampler']['params']
66
-
67
- new_pipeline.slat_normalization = args['slat_normalization']
68
-
69
- new_pipeline._init_image_cond_model(args['image_cond_model'])
70
-
71
- return new_pipeline
72
-
73
- def _init_image_cond_model(self, name: str):
74
- """
75
- Initialize the image conditioning model.
76
- """
77
- dinov2_model = torch.hub.load('facebookresearch/dinov2', name, pretrained=True)
78
- dinov2_model.eval()
79
- self.models['image_cond_model'] = dinov2_model
80
- transform = transforms.Compose([
81
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
82
- ])
83
- self.image_cond_model_transform = transform
84
-
85
- def preprocess_image(self, input: Image.Image) -> Image.Image:
86
- """
87
- Preprocess the input image.
88
- """
89
- # if has alpha channel, use it directly; otherwise, remove background
90
- has_alpha = False
91
- if input.mode == 'RGBA':
92
- alpha = np.array(input)[:, :, 3]
93
- if not np.all(alpha == 255):
94
- has_alpha = True
95
- if has_alpha:
96
- output = input
97
- else:
98
- input = input.convert('RGB')
99
- max_size = max(input.size)
100
- scale = min(1, 1024 / max_size)
101
- if scale < 1:
102
- input = input.resize((int(input.width * scale), int(input.height * scale)), Image.Resampling.LANCZOS)
103
- if getattr(self, 'rembg_session', None) is None:
104
- self.rembg_session = rembg.new_session('u2net')
105
- output = rembg.remove(input, session=self.rembg_session)
106
- output_np = np.array(output)
107
- alpha = output_np[:, :, 3]
108
- bbox = np.argwhere(alpha > 0.8 * 255)
109
- bbox = np.min(bbox[:, 1]), np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.max(bbox[:, 0])
110
- center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
111
- size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
112
- size = int(size * 1.2)
113
- bbox = center[0] - size // 2, center[1] - size // 2, center[0] + size // 2, center[1] + size // 2
114
- output = output.crop(bbox) # type: ignore
115
- output = output.resize((518, 518), Image.Resampling.LANCZOS)
116
- output = np.array(output).astype(np.float32) / 255
117
- output = output[:, :, :3] * output[:, :, 3:4]
118
- output = Image.fromarray((output * 255).astype(np.uint8))
119
- return output
120
-
121
- @torch.no_grad()
122
- def encode_image(self, image: Union[torch.Tensor, list[Image.Image]]) -> torch.Tensor:
123
- """
124
- Encode the image.
125
-
126
- Args:
127
- image (Union[torch.Tensor, list[Image.Image]]): The image to encode
128
-
129
- Returns:
130
- torch.Tensor: The encoded features.
131
- """
132
- if isinstance(image, torch.Tensor):
133
- assert image.ndim == 4, "Image tensor should be batched (B, C, H, W)"
134
- elif isinstance(image, list):
135
- assert all(isinstance(i, Image.Image) for i in image), "Image list should be list of PIL images"
136
- image = [i.resize((518, 518), Image.LANCZOS) for i in image]
137
- image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image]
138
- image = [torch.from_numpy(i).permute(2, 0, 1).float() for i in image]
139
- image = torch.stack(image).to(self.device)
140
- else:
141
- raise ValueError(f"Unsupported type of image: {type(image)}")
142
-
143
- image = self.image_cond_model_transform(image).to(self.device)
144
- features = self.models['image_cond_model'](image, is_training=True)['x_prenorm']
145
- patchtokens = F.layer_norm(features, features.shape[-1:])
146
- return patchtokens
147
-
148
- def get_cond(self, image: Union[torch.Tensor, list[Image.Image]]) -> dict:
149
- """
150
- Get the conditioning information for the model.
151
-
152
- Args:
153
- image (Union[torch.Tensor, list[Image.Image]]): The image prompts.
154
-
155
- Returns:
156
- dict: The conditioning information
157
- """
158
- cond = self.encode_image(image)
159
- neg_cond = torch.zeros_like(cond)
160
- return {
161
- 'cond': cond,
162
- 'neg_cond': neg_cond,
163
- }
164
-
165
- def sample_sparse_structure(
166
- self,
167
- cond: dict,
168
- num_samples: int = 1,
169
- sampler_params: dict = {},
170
- ) -> torch.Tensor:
171
- """
172
- Sample sparse structures with the given conditioning.
173
-
174
- Args:
175
- cond (dict): The conditioning information.
176
- num_samples (int): The number of samples to generate.
177
- sampler_params (dict): Additional parameters for the sampler.
178
- """
179
- # Sample occupancy latent
180
- flow_model = self.models['sparse_structure_flow_model']
181
- reso = flow_model.resolution
182
- noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso).to(self.device)
183
- sampler_params = {**self.sparse_structure_sampler_params, **sampler_params}
184
- z_s = self.sparse_structure_sampler.sample(
185
- flow_model,
186
- noise,
187
- **cond,
188
- **sampler_params,
189
- verbose=True
190
- ).samples
191
-
192
- # Decode occupancy latent
193
- decoder = self.models['sparse_structure_decoder']
194
- coords = torch.argwhere(decoder(z_s)>0)[:, [0, 2, 3, 4]].int()
195
-
196
- return coords
197
-
198
- def decode_slat(
199
- self,
200
- slat: sp.SparseTensor,
201
- formats: List[str] = ['mesh', 'gaussian', 'radiance_field'],
202
- ) -> dict:
203
- """
204
- Decode the structured latent.
205
-
206
- Args:
207
- slat (sp.SparseTensor): The structured latent.
208
- formats (List[str]): The formats to decode the structured latent to.
209
-
210
- Returns:
211
- dict: The decoded structured latent.
212
- """
213
- ret = {}
214
- if 'mesh' in formats:
215
- ret['mesh'] = self.models['slat_decoder_mesh'](slat)
216
- if 'gaussian' in formats:
217
- ret['gaussian'] = self.models['slat_decoder_gs'](slat)
218
- if 'radiance_field' in formats:
219
- ret['radiance_field'] = self.models['slat_decoder_rf'](slat)
220
- return ret
221
-
222
- def sample_slat(
223
- self,
224
- cond: dict,
225
- coords: torch.Tensor,
226
- sampler_params: dict = {},
227
- ) -> sp.SparseTensor:
228
- """
229
- Sample structured latent with the given conditioning.
230
-
231
- Args:
232
- cond (dict): The conditioning information.
233
- coords (torch.Tensor): The coordinates of the sparse structure.
234
- sampler_params (dict): Additional parameters for the sampler.
235
- """
236
- # Sample structured latent
237
- flow_model = self.models['slat_flow_model']
238
- noise = sp.SparseTensor(
239
- feats=torch.randn(coords.shape[0], flow_model.in_channels).to(self.device),
240
- coords=coords,
241
- )
242
- sampler_params = {**self.slat_sampler_params, **sampler_params}
243
- slat = self.slat_sampler.sample(
244
- flow_model,
245
- noise,
246
- **cond,
247
- **sampler_params,
248
- verbose=True
249
- ).samples
250
-
251
- std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device)
252
- mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device)
253
- slat = slat * std + mean
254
-
255
- return slat
256
-
257
- @torch.no_grad()
258
- def run(
259
- self,
260
- image: Image.Image,
261
- num_samples: int = 1,
262
- seed: int = 42,
263
- sparse_structure_sampler_params: dict = {},
264
- slat_sampler_params: dict = {},
265
- formats: List[str] = ['mesh', 'gaussian', 'radiance_field'],
266
- preprocess_image: bool = True,
267
- ) -> dict:
268
- """
269
- Run the pipeline.
270
-
271
- Args:
272
- image (Image.Image): The image prompt.
273
- num_samples (int): The number of samples to generate.
274
- sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler.
275
- slat_sampler_params (dict): Additional parameters for the structured latent sampler.
276
- preprocess_image (bool): Whether to preprocess the image.
277
- """
278
- if preprocess_image:
279
- image = self.preprocess_image(image)
280
- cond = self.get_cond([image])
281
- torch.manual_seed(seed)
282
- coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params)
283
- slat = self.sample_slat(cond, coords, slat_sampler_params)
284
- return self.decode_slat(slat, formats)
285
-
286
- @contextmanager
287
- def inject_sampler_multi_image(
288
- self,
289
- sampler_name: str,
290
- num_images: int,
291
- num_steps: int,
292
- mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
293
- ):
294
- """
295
- Inject a sampler with multiple images as condition.
296
-
297
- Args:
298
- sampler_name (str): The name of the sampler to inject.
299
- num_images (int): The number of images to condition on.
300
- num_steps (int): The number of steps to run the sampler for.
301
- """
302
- sampler = getattr(self, sampler_name)
303
- setattr(sampler, f'_old_inference_model', sampler._inference_model)
304
-
305
- if mode == 'stochastic':
306
- if num_images > num_steps:
307
- print(f"\033[93mWarning: number of conditioning images is greater than number of steps for {sampler_name}. "
308
- "This may lead to performance degradation.\033[0m")
309
-
310
- cond_indices = (np.arange(num_steps) % num_images).tolist()
311
- def _new_inference_model(self, model, x_t, t, cond, **kwargs):
312
- cond_idx = cond_indices.pop(0)
313
- cond_i = cond[cond_idx:cond_idx+1]
314
- return self._old_inference_model(model, x_t, t, cond=cond_i, **kwargs)
315
-
316
- elif mode =='multidiffusion':
317
- from .samplers import FlowEulerSampler
318
- def _new_inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
319
- if cfg_interval[0] <= t <= cfg_interval[1]:
320
- preds = []
321
- for i in range(len(cond)):
322
- preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
323
- pred = sum(preds) / len(preds)
324
- neg_pred = FlowEulerSampler._inference_model(self, model, x_t, t, neg_cond, **kwargs)
325
- return (1 + cfg_strength) * pred - cfg_strength * neg_pred
326
- else:
327
- preds = []
328
- for i in range(len(cond)):
329
- preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
330
- pred = sum(preds) / len(preds)
331
- return pred
332
-
333
- else:
334
- raise ValueError(f"Unsupported mode: {mode}")
335
-
336
- sampler._inference_model = _new_inference_model.__get__(sampler, type(sampler))
337
-
338
- yield
339
-
340
- sampler._inference_model = sampler._old_inference_model
341
- delattr(sampler, f'_old_inference_model')
342
-
343
- @torch.no_grad()
344
- def run_multi_image(
345
- self,
346
- images: List[Image.Image],
347
- num_samples: int = 1,
348
- seed: int = 42,
349
- sparse_structure_sampler_params: dict = {},
350
- slat_sampler_params: dict = {},
351
- formats: List[str] = ['mesh', 'gaussian', 'radiance_field'],
352
- preprocess_image: bool = True,
353
- mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
354
- ) -> dict:
355
- """
356
- Run the pipeline with multiple images as condition
357
-
358
- Args:
359
- images (List[Image.Image]): The multi-view images of the assets
360
- num_samples (int): The number of samples to generate.
361
- sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler.
362
- slat_sampler_params (dict): Additional parameters for the structured latent sampler.
363
- preprocess_image (bool): Whether to preprocess the image.
364
- """
365
- if preprocess_image:
366
- images = [self.preprocess_image(image) for image in images]
367
- cond = self.get_cond(images)
368
- cond['neg_cond'] = cond['neg_cond'][:1]
369
- torch.manual_seed(seed)
370
- ss_steps = {**self.sparse_structure_sampler_params, **sparse_structure_sampler_params}.get('steps')
371
- with self.inject_sampler_multi_image('sparse_structure_sampler', len(images), ss_steps, mode=mode):
372
- coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params)
373
- slat_steps = {**self.slat_sampler_params, **slat_sampler_params}.get('steps')
374
- with self.inject_sampler_multi_image('slat_sampler', len(images), slat_steps, mode=mode):
375
- slat = self.sample_slat(cond, coords, slat_sampler_params)
376
- return self.decode_slat(slat, formats)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/renderers/__init__.py DELETED
@@ -1,31 +0,0 @@
1
- import importlib
2
-
3
- __attributes = {
4
- 'OctreeRenderer': 'octree_renderer',
5
- 'GaussianRenderer': 'gaussian_render',
6
- 'MeshRenderer': 'mesh_renderer',
7
- }
8
-
9
- __submodules = []
10
-
11
- __all__ = list(__attributes.keys()) + __submodules
12
-
13
- def __getattr__(name):
14
- if name not in globals():
15
- if name in __attributes:
16
- module_name = __attributes[name]
17
- module = importlib.import_module(f".{module_name}", __name__)
18
- globals()[name] = getattr(module, name)
19
- elif name in __submodules:
20
- module = importlib.import_module(f".{name}", __name__)
21
- globals()[name] = module
22
- else:
23
- raise AttributeError(f"module {__name__} has no attribute {name}")
24
- return globals()[name]
25
-
26
-
27
- # For Pylance
28
- if __name__ == '__main__':
29
- from .octree_renderer import OctreeRenderer
30
- from .gaussian_render import GaussianRenderer
31
- from .mesh_renderer import MeshRenderer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/renderers/gaussian_render.py DELETED
@@ -1,231 +0,0 @@
1
- #
2
- # Copyright (C) 2023, Inria
3
- # GRAPHDECO research group, https://team.inria.fr/graphdeco
4
- # All rights reserved.
5
- #
6
- # This software is free for non-commercial, research and evaluation use
7
- # under the terms of the LICENSE.md file.
8
- #
9
- # For inquiries contact [email protected]
10
- #
11
-
12
- import torch
13
- import math
14
- from easydict import EasyDict as edict
15
- import numpy as np
16
- from ..representations.gaussian import Gaussian
17
- from .sh_utils import eval_sh
18
- import torch.nn.functional as F
19
- from easydict import EasyDict as edict
20
-
21
-
22
- def intrinsics_to_projection(
23
- intrinsics: torch.Tensor,
24
- near: float,
25
- far: float,
26
- ) -> torch.Tensor:
27
- """
28
- OpenCV intrinsics to OpenGL perspective matrix
29
-
30
- Args:
31
- intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix
32
- near (float): near plane to clip
33
- far (float): far plane to clip
34
- Returns:
35
- (torch.Tensor): [4, 4] OpenGL perspective matrix
36
- """
37
- fx, fy = intrinsics[0, 0], intrinsics[1, 1]
38
- cx, cy = intrinsics[0, 2], intrinsics[1, 2]
39
- ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device)
40
- ret[0, 0] = 2 * fx
41
- ret[1, 1] = 2 * fy
42
- ret[0, 2] = 2 * cx - 1
43
- ret[1, 2] = - 2 * cy + 1
44
- ret[2, 2] = far / (far - near)
45
- ret[2, 3] = near * far / (near - far)
46
- ret[3, 2] = 1.
47
- return ret
48
-
49
-
50
- def render(viewpoint_camera, pc : Gaussian, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
51
- """
52
- Render the scene.
53
-
54
- Background tensor (bg_color) must be on GPU!
55
- """
56
- # lazy import
57
- if 'GaussianRasterizer' not in globals():
58
- from diff_gaussian_rasterization import GaussianRasterizer, GaussianRasterizationSettings
59
-
60
- # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
61
- screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
62
- try:
63
- screenspace_points.retain_grad()
64
- except:
65
- pass
66
- # Set up rasterization configuration
67
- tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
68
- tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
69
-
70
- kernel_size = pipe.kernel_size
71
- subpixel_offset = torch.zeros((int(viewpoint_camera.image_height), int(viewpoint_camera.image_width), 2), dtype=torch.float32, device="cuda")
72
-
73
- raster_settings = GaussianRasterizationSettings(
74
- image_height=int(viewpoint_camera.image_height),
75
- image_width=int(viewpoint_camera.image_width),
76
- tanfovx=tanfovx,
77
- tanfovy=tanfovy,
78
- kernel_size=kernel_size,
79
- subpixel_offset=subpixel_offset,
80
- bg=bg_color,
81
- scale_modifier=scaling_modifier,
82
- viewmatrix=viewpoint_camera.world_view_transform,
83
- projmatrix=viewpoint_camera.full_proj_transform,
84
- sh_degree=pc.active_sh_degree,
85
- campos=viewpoint_camera.camera_center,
86
- prefiltered=False,
87
- debug=pipe.debug
88
- )
89
-
90
- rasterizer = GaussianRasterizer(raster_settings=raster_settings)
91
-
92
- means3D = pc.get_xyz
93
- means2D = screenspace_points
94
- opacity = pc.get_opacity
95
-
96
- # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
97
- # scaling / rotation by the rasterizer.
98
- scales = None
99
- rotations = None
100
- cov3D_precomp = None
101
- if pipe.compute_cov3D_python:
102
- cov3D_precomp = pc.get_covariance(scaling_modifier)
103
- else:
104
- scales = pc.get_scaling
105
- rotations = pc.get_rotation
106
-
107
- # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
108
- # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
109
- shs = None
110
- colors_precomp = None
111
- if override_color is None:
112
- if pipe.convert_SHs_python:
113
- shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
114
- dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
115
- dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
116
- sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
117
- colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
118
- else:
119
- shs = pc.get_features
120
- else:
121
- colors_precomp = override_color
122
-
123
- # Rasterize visible Gaussians to image, obtain their radii (on screen).
124
- rendered_image, radii = rasterizer(
125
- means3D = means3D,
126
- means2D = means2D,
127
- shs = shs,
128
- colors_precomp = colors_precomp,
129
- opacities = opacity,
130
- scales = scales,
131
- rotations = rotations,
132
- cov3D_precomp = cov3D_precomp
133
- )
134
-
135
- # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
136
- # They will be excluded from value updates used in the splitting criteria.
137
- return edict({"render": rendered_image,
138
- "viewspace_points": screenspace_points,
139
- "visibility_filter" : radii > 0,
140
- "radii": radii})
141
-
142
-
143
- class GaussianRenderer:
144
- """
145
- Renderer for the Voxel representation.
146
-
147
- Args:
148
- rendering_options (dict): Rendering options.
149
- """
150
-
151
- def __init__(self, rendering_options={}) -> None:
152
- self.pipe = edict({
153
- "kernel_size": 0.1,
154
- "convert_SHs_python": False,
155
- "compute_cov3D_python": False,
156
- "scale_modifier": 1.0,
157
- "debug": False
158
- })
159
- self.rendering_options = edict({
160
- "resolution": None,
161
- "near": None,
162
- "far": None,
163
- "ssaa": 1,
164
- "bg_color": 'random',
165
- })
166
- self.rendering_options.update(rendering_options)
167
- self.bg_color = None
168
-
169
- def render(
170
- self,
171
- gausssian: Gaussian,
172
- extrinsics: torch.Tensor,
173
- intrinsics: torch.Tensor,
174
- colors_overwrite: torch.Tensor = None
175
- ) -> edict:
176
- """
177
- Render the gausssian.
178
-
179
- Args:
180
- gaussian : gaussianmodule
181
- extrinsics (torch.Tensor): (4, 4) camera extrinsics
182
- intrinsics (torch.Tensor): (3, 3) camera intrinsics
183
- colors_overwrite (torch.Tensor): (N, 3) override color
184
-
185
- Returns:
186
- edict containing:
187
- color (torch.Tensor): (3, H, W) rendered color image
188
- """
189
- resolution = self.rendering_options["resolution"]
190
- near = self.rendering_options["near"]
191
- far = self.rendering_options["far"]
192
- ssaa = self.rendering_options["ssaa"]
193
-
194
- if self.rendering_options["bg_color"] == 'random':
195
- self.bg_color = torch.zeros(3, dtype=torch.float32, device="cuda")
196
- if np.random.rand() < 0.5:
197
- self.bg_color += 1
198
- else:
199
- self.bg_color = torch.tensor(self.rendering_options["bg_color"], dtype=torch.float32, device="cuda")
200
-
201
- view = extrinsics
202
- perspective = intrinsics_to_projection(intrinsics, near, far)
203
- camera = torch.inverse(view)[:3, 3]
204
- focalx = intrinsics[0, 0]
205
- focaly = intrinsics[1, 1]
206
- fovx = 2 * torch.atan(0.5 / focalx)
207
- fovy = 2 * torch.atan(0.5 / focaly)
208
-
209
- camera_dict = edict({
210
- "image_height": resolution * ssaa,
211
- "image_width": resolution * ssaa,
212
- "FoVx": fovx,
213
- "FoVy": fovy,
214
- "znear": near,
215
- "zfar": far,
216
- "world_view_transform": view.T.contiguous(),
217
- "projection_matrix": perspective.T.contiguous(),
218
- "full_proj_transform": (perspective @ view).T.contiguous(),
219
- "camera_center": camera
220
- })
221
-
222
- # Render
223
- render_ret = render(camera_dict, gausssian, self.pipe, self.bg_color, override_color=colors_overwrite, scaling_modifier=self.pipe.scale_modifier)
224
-
225
- if ssaa > 1:
226
- render_ret.render = F.interpolate(render_ret.render[None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
227
-
228
- ret = edict({
229
- 'color': render_ret['render']
230
- })
231
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/renderers/mesh_renderer.py DELETED
@@ -1,140 +0,0 @@
1
- # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
8
- import torch
9
- import nvdiffrast.torch as dr
10
- from easydict import EasyDict as edict
11
- from ..representations.mesh import MeshExtractResult
12
- import torch.nn.functional as F
13
-
14
-
15
- def intrinsics_to_projection(
16
- intrinsics: torch.Tensor,
17
- near: float,
18
- far: float,
19
- ) -> torch.Tensor:
20
- """
21
- OpenCV intrinsics to OpenGL perspective matrix
22
-
23
- Args:
24
- intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix
25
- near (float): near plane to clip
26
- far (float): far plane to clip
27
- Returns:
28
- (torch.Tensor): [4, 4] OpenGL perspective matrix
29
- """
30
- fx, fy = intrinsics[0, 0], intrinsics[1, 1]
31
- cx, cy = intrinsics[0, 2], intrinsics[1, 2]
32
- ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device)
33
- ret[0, 0] = 2 * fx
34
- ret[1, 1] = 2 * fy
35
- ret[0, 2] = 2 * cx - 1
36
- ret[1, 2] = - 2 * cy + 1
37
- ret[2, 2] = far / (far - near)
38
- ret[2, 3] = near * far / (near - far)
39
- ret[3, 2] = 1.
40
- return ret
41
-
42
-
43
- class MeshRenderer:
44
- """
45
- Renderer for the Mesh representation.
46
-
47
- Args:
48
- rendering_options (dict): Rendering options.
49
- glctx (nvdiffrast.torch.RasterizeGLContext): RasterizeGLContext object for CUDA/OpenGL interop.
50
- """
51
- def __init__(self, rendering_options={}, device='cuda'):
52
- self.rendering_options = edict({
53
- "resolution": None,
54
- "near": None,
55
- "far": None,
56
- "ssaa": 1
57
- })
58
- self.rendering_options.update(rendering_options)
59
- self.glctx = dr.RasterizeCudaContext(device=device)
60
- self.device=device
61
-
62
- def render(
63
- self,
64
- mesh : MeshExtractResult,
65
- extrinsics: torch.Tensor,
66
- intrinsics: torch.Tensor,
67
- return_types = ["mask", "normal", "depth"]
68
- ) -> edict:
69
- """
70
- Render the mesh.
71
-
72
- Args:
73
- mesh : meshmodel
74
- extrinsics (torch.Tensor): (4, 4) camera extrinsics
75
- intrinsics (torch.Tensor): (3, 3) camera intrinsics
76
- return_types (list): list of return types, can be "mask", "depth", "normal_map", "normal", "color"
77
-
78
- Returns:
79
- edict based on return_types containing:
80
- color (torch.Tensor): [3, H, W] rendered color image
81
- depth (torch.Tensor): [H, W] rendered depth image
82
- normal (torch.Tensor): [3, H, W] rendered normal image
83
- normal_map (torch.Tensor): [3, H, W] rendered normal map image
84
- mask (torch.Tensor): [H, W] rendered mask image
85
- """
86
- resolution = self.rendering_options["resolution"]
87
- near = self.rendering_options["near"]
88
- far = self.rendering_options["far"]
89
- ssaa = self.rendering_options["ssaa"]
90
-
91
- if mesh.vertices.shape[0] == 0 or mesh.faces.shape[0] == 0:
92
- default_img = torch.zeros((1, resolution, resolution, 3), dtype=torch.float32, device=self.device)
93
- ret_dict = {k : default_img if k in ['normal', 'normal_map', 'color'] else default_img[..., :1] for k in return_types}
94
- return ret_dict
95
-
96
- perspective = intrinsics_to_projection(intrinsics, near, far)
97
-
98
- RT = extrinsics.unsqueeze(0)
99
- full_proj = (perspective @ extrinsics).unsqueeze(0)
100
-
101
- vertices = mesh.vertices.unsqueeze(0)
102
-
103
- vertices_homo = torch.cat([vertices, torch.ones_like(vertices[..., :1])], dim=-1)
104
- vertices_camera = torch.bmm(vertices_homo, RT.transpose(-1, -2))
105
- vertices_clip = torch.bmm(vertices_homo, full_proj.transpose(-1, -2))
106
- faces_int = mesh.faces.int()
107
- rast, _ = dr.rasterize(
108
- self.glctx, vertices_clip, faces_int, (resolution * ssaa, resolution * ssaa))
109
-
110
- out_dict = edict()
111
- for type in return_types:
112
- img = None
113
- if type == "mask" :
114
- img = dr.antialias((rast[..., -1:] > 0).float(), rast, vertices_clip, faces_int)
115
- elif type == "depth":
116
- img = dr.interpolate(vertices_camera[..., 2:3].contiguous(), rast, faces_int)[0]
117
- img = dr.antialias(img, rast, vertices_clip, faces_int)
118
- elif type == "normal" :
119
- img = dr.interpolate(
120
- mesh.face_normal.reshape(1, -1, 3), rast,
121
- torch.arange(mesh.faces.shape[0] * 3, device=self.device, dtype=torch.int).reshape(-1, 3)
122
- )[0]
123
- img = dr.antialias(img, rast, vertices_clip, faces_int)
124
- # normalize norm pictures
125
- img = (img + 1) / 2
126
- elif type == "normal_map" :
127
- img = dr.interpolate(mesh.vertex_attrs[:, 3:].contiguous(), rast, faces_int)[0]
128
- img = dr.antialias(img, rast, vertices_clip, faces_int)
129
- elif type == "color" :
130
- img = dr.interpolate(mesh.vertex_attrs[:, :3].contiguous(), rast, faces_int)[0]
131
- img = dr.antialias(img, rast, vertices_clip, faces_int)
132
-
133
- if ssaa > 1:
134
- img = F.interpolate(img.permute(0, 3, 1, 2), (resolution, resolution), mode='bilinear', align_corners=False, antialias=True)
135
- img = img.squeeze()
136
- else:
137
- img = img.permute(0, 3, 1, 2).squeeze()
138
- out_dict[type] = img
139
-
140
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/renderers/octree_renderer.py DELETED
@@ -1,300 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn.functional as F
4
- import math
5
- import cv2
6
- from scipy.stats import qmc
7
- from easydict import EasyDict as edict
8
- from ..representations.octree import DfsOctree
9
-
10
-
11
- def intrinsics_to_projection(
12
- intrinsics: torch.Tensor,
13
- near: float,
14
- far: float,
15
- ) -> torch.Tensor:
16
- """
17
- OpenCV intrinsics to OpenGL perspective matrix
18
-
19
- Args:
20
- intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix
21
- near (float): near plane to clip
22
- far (float): far plane to clip
23
- Returns:
24
- (torch.Tensor): [4, 4] OpenGL perspective matrix
25
- """
26
- fx, fy = intrinsics[0, 0], intrinsics[1, 1]
27
- cx, cy = intrinsics[0, 2], intrinsics[1, 2]
28
- ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device)
29
- ret[0, 0] = 2 * fx
30
- ret[1, 1] = 2 * fy
31
- ret[0, 2] = 2 * cx - 1
32
- ret[1, 2] = - 2 * cy + 1
33
- ret[2, 2] = far / (far - near)
34
- ret[2, 3] = near * far / (near - far)
35
- ret[3, 2] = 1.
36
- return ret
37
-
38
-
39
- def render(viewpoint_camera, octree : DfsOctree, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, used_rank = None, colors_overwrite = None, aux=None, halton_sampler=None):
40
- """
41
- Render the scene.
42
-
43
- Background tensor (bg_color) must be on GPU!
44
- """
45
- # lazy import
46
- if 'OctreeTrivecRasterizer' not in globals():
47
- from diffoctreerast import OctreeVoxelRasterizer, OctreeGaussianRasterizer, OctreeTrivecRasterizer, OctreeDecoupolyRasterizer
48
-
49
- # Set up rasterization configuration
50
- tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
51
- tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
52
-
53
- raster_settings = edict(
54
- image_height=int(viewpoint_camera.image_height),
55
- image_width=int(viewpoint_camera.image_width),
56
- tanfovx=tanfovx,
57
- tanfovy=tanfovy,
58
- bg=bg_color,
59
- scale_modifier=scaling_modifier,
60
- viewmatrix=viewpoint_camera.world_view_transform,
61
- projmatrix=viewpoint_camera.full_proj_transform,
62
- sh_degree=octree.active_sh_degree,
63
- campos=viewpoint_camera.camera_center,
64
- with_distloss=pipe.with_distloss,
65
- jitter=pipe.jitter,
66
- debug=pipe.debug,
67
- )
68
-
69
- positions = octree.get_xyz
70
- if octree.primitive == "voxel":
71
- densities = octree.get_density
72
- elif octree.primitive == "gaussian":
73
- opacities = octree.get_opacity
74
- elif octree.primitive == "trivec":
75
- trivecs = octree.get_trivec
76
- densities = octree.get_density
77
- raster_settings.density_shift = octree.density_shift
78
- elif octree.primitive == "decoupoly":
79
- decoupolys_V, decoupolys_g = octree.get_decoupoly
80
- densities = octree.get_density
81
- raster_settings.density_shift = octree.density_shift
82
- else:
83
- raise ValueError(f"Unknown primitive {octree.primitive}")
84
- depths = octree.get_depth
85
-
86
- # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
87
- # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
88
- colors_precomp = None
89
- shs = octree.get_features
90
- if octree.primitive in ["voxel", "gaussian"] and colors_overwrite is not None:
91
- colors_precomp = colors_overwrite
92
- shs = None
93
-
94
- ret = edict()
95
-
96
- if octree.primitive == "voxel":
97
- renderer = OctreeVoxelRasterizer(raster_settings=raster_settings)
98
- rgb, depth, alpha, distloss = renderer(
99
- positions = positions,
100
- densities = densities,
101
- shs = shs,
102
- colors_precomp = colors_precomp,
103
- depths = depths,
104
- aabb = octree.aabb,
105
- aux = aux,
106
- )
107
- ret['rgb'] = rgb
108
- ret['depth'] = depth
109
- ret['alpha'] = alpha
110
- ret['distloss'] = distloss
111
- elif octree.primitive == "gaussian":
112
- renderer = OctreeGaussianRasterizer(raster_settings=raster_settings)
113
- rgb, depth, alpha = renderer(
114
- positions = positions,
115
- opacities = opacities,
116
- shs = shs,
117
- colors_precomp = colors_precomp,
118
- depths = depths,
119
- aabb = octree.aabb,
120
- aux = aux,
121
- )
122
- ret['rgb'] = rgb
123
- ret['depth'] = depth
124
- ret['alpha'] = alpha
125
- elif octree.primitive == "trivec":
126
- raster_settings.used_rank = used_rank if used_rank is not None else trivecs.shape[1]
127
- renderer = OctreeTrivecRasterizer(raster_settings=raster_settings)
128
- rgb, depth, alpha, percent_depth = renderer(
129
- positions = positions,
130
- trivecs = trivecs,
131
- densities = densities,
132
- shs = shs,
133
- colors_precomp = colors_precomp,
134
- colors_overwrite = colors_overwrite,
135
- depths = depths,
136
- aabb = octree.aabb,
137
- aux = aux,
138
- halton_sampler = halton_sampler,
139
- )
140
- ret['percent_depth'] = percent_depth
141
- ret['rgb'] = rgb
142
- ret['depth'] = depth
143
- ret['alpha'] = alpha
144
- elif octree.primitive == "decoupoly":
145
- raster_settings.used_rank = used_rank if used_rank is not None else decoupolys_V.shape[1]
146
- renderer = OctreeDecoupolyRasterizer(raster_settings=raster_settings)
147
- rgb, depth, alpha = renderer(
148
- positions = positions,
149
- decoupolys_V = decoupolys_V,
150
- decoupolys_g = decoupolys_g,
151
- densities = densities,
152
- shs = shs,
153
- colors_precomp = colors_precomp,
154
- depths = depths,
155
- aabb = octree.aabb,
156
- aux = aux,
157
- )
158
- ret['rgb'] = rgb
159
- ret['depth'] = depth
160
- ret['alpha'] = alpha
161
-
162
- return ret
163
-
164
-
165
- class OctreeRenderer:
166
- """
167
- Renderer for the Voxel representation.
168
-
169
- Args:
170
- rendering_options (dict): Rendering options.
171
- """
172
-
173
- def __init__(self, rendering_options={}) -> None:
174
- try:
175
- import diffoctreerast
176
- except ImportError:
177
- print("\033[93m[WARNING] diffoctreerast is not installed. The renderer will be disabled.\033[0m")
178
- self.unsupported = True
179
- else:
180
- self.unsupported = False
181
-
182
- self.pipe = edict({
183
- "with_distloss": False,
184
- "with_aux": False,
185
- "scale_modifier": 1.0,
186
- "used_rank": None,
187
- "jitter": False,
188
- "debug": False,
189
- })
190
- self.rendering_options = edict({
191
- "resolution": None,
192
- "near": None,
193
- "far": None,
194
- "ssaa": 1,
195
- "bg_color": 'random',
196
- })
197
- self.halton_sampler = qmc.Halton(2, scramble=False)
198
- self.rendering_options.update(rendering_options)
199
- self.bg_color = None
200
-
201
- def render(
202
- self,
203
- octree: DfsOctree,
204
- extrinsics: torch.Tensor,
205
- intrinsics: torch.Tensor,
206
- colors_overwrite: torch.Tensor = None,
207
- ) -> edict:
208
- """
209
- Render the octree.
210
-
211
- Args:
212
- octree (Octree): octree
213
- extrinsics (torch.Tensor): (4, 4) camera extrinsics
214
- intrinsics (torch.Tensor): (3, 3) camera intrinsics
215
- colors_overwrite (torch.Tensor): (N, 3) override color
216
-
217
- Returns:
218
- edict containing:
219
- color (torch.Tensor): (3, H, W) rendered color
220
- depth (torch.Tensor): (H, W) rendered depth
221
- alpha (torch.Tensor): (H, W) rendered alpha
222
- distloss (Optional[torch.Tensor]): (H, W) rendered distance loss
223
- percent_depth (Optional[torch.Tensor]): (H, W) rendered percent depth
224
- aux (Optional[edict]): auxiliary tensors
225
- """
226
- resolution = self.rendering_options["resolution"]
227
- near = self.rendering_options["near"]
228
- far = self.rendering_options["far"]
229
- ssaa = self.rendering_options["ssaa"]
230
-
231
- if self.unsupported:
232
- image = np.zeros((512, 512, 3), dtype=np.uint8)
233
- text_bbox = cv2.getTextSize("Unsupported", cv2.FONT_HERSHEY_SIMPLEX, 2, 3)[0]
234
- origin = (512 - text_bbox[0]) // 2, (512 - text_bbox[1]) // 2
235
- image = cv2.putText(image, "Unsupported", origin, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3, cv2.LINE_AA)
236
- return {
237
- 'color': torch.tensor(image, dtype=torch.float32).permute(2, 0, 1) / 255,
238
- }
239
-
240
- if self.rendering_options["bg_color"] == 'random':
241
- self.bg_color = torch.zeros(3, dtype=torch.float32, device="cuda")
242
- if np.random.rand() < 0.5:
243
- self.bg_color += 1
244
- else:
245
- self.bg_color = torch.tensor(self.rendering_options["bg_color"], dtype=torch.float32, device="cuda")
246
-
247
- if self.pipe["with_aux"]:
248
- aux = {
249
- 'grad_color2': torch.zeros((octree.num_leaf_nodes, 3), dtype=torch.float32, requires_grad=True, device="cuda") + 0,
250
- 'contributions': torch.zeros((octree.num_leaf_nodes, 1), dtype=torch.float32, requires_grad=True, device="cuda") + 0,
251
- }
252
- for k in aux.keys():
253
- aux[k].requires_grad_()
254
- aux[k].retain_grad()
255
- else:
256
- aux = None
257
-
258
- view = extrinsics
259
- perspective = intrinsics_to_projection(intrinsics, near, far)
260
- camera = torch.inverse(view)[:3, 3]
261
- focalx = intrinsics[0, 0]
262
- focaly = intrinsics[1, 1]
263
- fovx = 2 * torch.atan(0.5 / focalx)
264
- fovy = 2 * torch.atan(0.5 / focaly)
265
-
266
- camera_dict = edict({
267
- "image_height": resolution * ssaa,
268
- "image_width": resolution * ssaa,
269
- "FoVx": fovx,
270
- "FoVy": fovy,
271
- "znear": near,
272
- "zfar": far,
273
- "world_view_transform": view.T.contiguous(),
274
- "projection_matrix": perspective.T.contiguous(),
275
- "full_proj_transform": (perspective @ view).T.contiguous(),
276
- "camera_center": camera
277
- })
278
-
279
- # Render
280
- render_ret = render(camera_dict, octree, self.pipe, self.bg_color, aux=aux, colors_overwrite=colors_overwrite, scaling_modifier=self.pipe.scale_modifier, used_rank=self.pipe.used_rank, halton_sampler=self.halton_sampler)
281
-
282
- if ssaa > 1:
283
- render_ret.rgb = F.interpolate(render_ret.rgb[None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
284
- render_ret.depth = F.interpolate(render_ret.depth[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
285
- render_ret.alpha = F.interpolate(render_ret.alpha[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
286
- if hasattr(render_ret, 'percent_depth'):
287
- render_ret.percent_depth = F.interpolate(render_ret.percent_depth[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze()
288
-
289
- ret = edict({
290
- 'color': render_ret.rgb,
291
- 'depth': render_ret.depth,
292
- 'alpha': render_ret.alpha,
293
- })
294
- if self.pipe["with_distloss"] and 'distloss' in render_ret:
295
- ret['distloss'] = render_ret.distloss
296
- if self.pipe["with_aux"]:
297
- ret['aux'] = aux
298
- if hasattr(render_ret, 'percent_depth'):
299
- ret['percent_depth'] = render_ret.percent_depth
300
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
trellis/renderers/sh_utils.py DELETED
@@ -1,118 +0,0 @@
1
- # Copyright 2021 The PlenOctree Authors.
2
- # Redistribution and use in source and binary forms, with or without
3
- # modification, are permitted provided that the following conditions are met:
4
- #
5
- # 1. Redistributions of source code must retain the above copyright notice,
6
- # this list of conditions and the following disclaimer.
7
- #
8
- # 2. Redistributions in binary form must reproduce the above copyright notice,
9
- # this list of conditions and the following disclaimer in the documentation
10
- # and/or other materials provided with the distribution.
11
- #
12
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13
- # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14
- # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15
- # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
16
- # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17
- # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18
- # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19
- # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20
- # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21
- # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22
- # POSSIBILITY OF SUCH DAMAGE.
23
-
24
- import torch
25
-
26
- C0 = 0.28209479177387814
27
- C1 = 0.4886025119029199
28
- C2 = [
29
- 1.0925484305920792,
30
- -1.0925484305920792,
31
- 0.31539156525252005,
32
- -1.0925484305920792,
33
- 0.5462742152960396
34
- ]
35
- C3 = [
36
- -0.5900435899266435,
37
- 2.890611442640554,
38
- -0.4570457994644658,
39
- 0.3731763325901154,
40
- -0.4570457994644658,
41
- 1.445305721320277,
42
- -0.5900435899266435
43
- ]
44
- C4 = [
45
- 2.5033429417967046,
46
- -1.7701307697799304,
47
- 0.9461746957575601,
48
- -0.6690465435572892,
49
- 0.10578554691520431,
50
- -0.6690465435572892,
51
- 0.47308734787878004,
52
- -1.7701307697799304,
53
- 0.6258357354491761,
54
- ]
55
-
56
-
57
- def eval_sh(deg, sh, dirs):
58
- """
59
- Evaluate spherical harmonics at unit directions
60
- using hardcoded SH polynomials.
61
- Works with torch/np/jnp.
62
- ... Can be 0 or more batch dimensions.
63
- Args:
64
- deg: int SH deg. Currently, 0-3 supported
65
- sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
66
- dirs: jnp.ndarray unit directions [..., 3]
67
- Returns:
68
- [..., C]
69
- """
70
- assert deg <= 4 and deg >= 0
71
- coeff = (deg + 1) ** 2
72
- assert sh.shape[-1] >= coeff
73
-
74
- result = C0 * sh[..., 0]
75
- if deg > 0:
76
- x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
77
- result = (result -
78
- C1 * y * sh[..., 1] +
79
- C1 * z * sh[..., 2] -
80
- C1 * x * sh[..., 3])
81
-
82
- if deg > 1:
83
- xx, yy, zz = x * x, y * y, z * z
84
- xy, yz, xz = x * y, y * z, x * z
85
- result = (result +
86
- C2[0] * xy * sh[..., 4] +
87
- C2[1] * yz * sh[..., 5] +
88
- C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
89
- C2[3] * xz * sh[..., 7] +
90
- C2[4] * (xx - yy) * sh[..., 8])
91
-
92
- if deg > 2:
93
- result = (result +
94
- C3[0] * y * (3 * xx - yy) * sh[..., 9] +
95
- C3[1] * xy * z * sh[..., 10] +
96
- C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
97
- C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
98
- C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
99
- C3[5] * z * (xx - yy) * sh[..., 14] +
100
- C3[6] * x * (xx - 3 * yy) * sh[..., 15])
101
-
102
- if deg > 3:
103
- result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
104
- C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
105
- C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
106
- C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
107
- C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
108
- C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
109
- C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
110
- C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
111
- C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
112
- return result
113
-
114
- def RGB2SH(rgb):
115
- return (rgb - 0.5) / C0
116
-
117
- def SH2RGB(sh):
118
- return sh * C0 + 0.5