import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class SA_PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class SA_FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class SA_Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = dots.softmax(dim=-1) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class ReAttention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.reattn_weights = nn.Parameter(torch.randn(heads, heads)) self.reattn_norm = nn.Sequential( Rearrange('b h i j -> b i j h'), nn.LayerNorm(heads), Rearrange('b i j h -> b h i j') ) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) # attention dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = dots.softmax(dim=-1) # re-attention attn = einsum('b h i j, h g -> b g i j', attn, self.reattn_weights) attn = self.reattn_norm(attn) # aggregate and out out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class LeFF(nn.Module): def __init__(self, dim = 192, scale = 4, depth_kernel = 3): super().__init__() scale_dim = dim*scale self.up_proj = nn.Sequential(nn.Linear(dim, scale_dim), Rearrange('b n c -> b c n'), nn.BatchNorm1d(scale_dim), nn.GELU(), Rearrange('b c (h w) -> b c h w', h=14, w=14) ) self.depth_conv = nn.Sequential(nn.Conv2d(scale_dim, scale_dim, kernel_size=depth_kernel, padding=1, groups=scale_dim, bias=False), nn.BatchNorm2d(scale_dim), nn.GELU(), Rearrange('b c h w -> b (h w) c', h=14, w=14) ) self.down_proj = nn.Sequential(nn.Linear(scale_dim, dim), Rearrange('b n c -> b c n'), nn.BatchNorm1d(dim), nn.GELU(), Rearrange('b c n -> b n c') ) def forward(self, x): x = self.up_proj(x) x = self.depth_conv(x) x = self.down_proj(x) return x class LCAttention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) q = q[:, :, -1, :].unsqueeze(2) # Only Lth element use as query dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = dots.softmax(dim=-1) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class SA_Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) self.norm = nn.LayerNorm(dim) for _ in range(depth): self.layers.append(nn.ModuleList([ SA_PreNorm(dim, SA_Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)), SA_PreNorm(dim, SA_FeedForward(dim, mlp_dim, dropout = dropout)) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x)