|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Encodec SEANet-based encoder and decoder implementation.""" |
|
|
|
import typing as tp |
|
|
|
import numpy as np |
|
import torch.nn as nn |
|
|
|
from .conv import SConv1d |
|
from .lstm import SLSTM |
|
|
|
|
|
class SEANetResnetBlock(nn.Module): |
|
def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], |
|
activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, |
|
norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, |
|
pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): |
|
super().__init__() |
|
assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' |
|
act = getattr(nn, activation) |
|
hidden = dim // compress |
|
block = [] |
|
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): |
|
in_chs = dim if i == 0 else hidden |
|
out_chs = dim if i == len(kernel_sizes) - 1 else hidden |
|
block += [ |
|
act(**activation_params), |
|
SConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, |
|
norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode), |
|
] |
|
self.block = nn.Sequential(*block) |
|
self.shortcut: nn.Module |
|
if true_skip: |
|
self.shortcut = nn.Identity() |
|
else: |
|
self.shortcut = SConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
|
|
def forward(self, x): |
|
return self.shortcut(x) + self.block(x) |
|
|
|
|
|
class SEANetEncoder(nn.Module): |
|
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 1, |
|
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, |
|
norm: str = 'weight_norm', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, |
|
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, |
|
pad_mode: str = 'reflect', true_skip: bool = False, compress: int = 2, lstm: int = 2): |
|
super().__init__() |
|
self.channels = channels |
|
self.dimension = dimension |
|
self.n_filters = n_filters |
|
self.ratios = list(reversed(ratios)) |
|
del ratios |
|
self.n_residual_layers = n_residual_layers |
|
self.hop_length = np.prod(self.ratios) |
|
|
|
act = getattr(nn, activation) |
|
mult = 1 |
|
model: tp.List[nn.Module] = [ |
|
SConv1d(channels, mult * n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
for i, ratio in enumerate(self.ratios): |
|
|
|
for j in range(n_residual_layers): |
|
model += [ |
|
SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], |
|
dilations=[dilation_base ** j, 1], |
|
norm=norm, norm_params=norm_params, |
|
activation=activation, activation_params=activation_params, |
|
causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] |
|
|
|
|
|
model += [ |
|
act(**activation_params), |
|
SConv1d(mult * n_filters, mult * n_filters * 2, |
|
kernel_size=ratio * 2, stride=ratio, |
|
norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode), |
|
] |
|
mult *= 2 |
|
|
|
if lstm: |
|
model += [SLSTM(mult * n_filters, num_layers=lstm)] |
|
|
|
model += [ |
|
act(**activation_params), |
|
SConv1d(mult * n_filters, dimension, last_kernel_size, norm=norm, norm_kwargs=norm_params, |
|
causal=causal, pad_mode=pad_mode) |
|
] |
|
|
|
self.model = nn.Sequential(*model) |
|
|
|
def forward(self, x): |
|
return self.model(x) |