response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Calculate funnel ratio. | def calc_funnel_ratio(keys_len, queries_len):
"""Calculate funnel ratio."""
if queries_len > keys_len: # Upsampling
assert queries_len % keys_len == 0
funnel_factor = queries_len // keys_len
is_upsampling = True
else: # Downsampling
assert keys_len % queries_len == 0
funnel_factor = keys_len // queries_len
is_upsampling = False
return funnel_factor, is_upsampling |
Fast matrix shift. | def _fast_matrix_shift(x, funnel_factor=1, is_upsampling=False):
"""Fast matrix shift."""
if funnel_factor == 1 and not is_upsampling:
shift = 1
batch_size, n_head = x.shape[0], x.shape[1]
queries_len, keys_len = x.shape[2], x.shape[3]
zero_pad = jnp.zeros((batch_size, n_head, queries_len, shift))
x = jnp.concatenate([zero_pad, x], axis=3)
x = x.reshape(batch_size, n_head, keys_len + shift, queries_len)
x = x[:, :, shift:, :]
return x
if is_upsampling:
k = funnel_factor
shift = 1
else:
k = 1
shift = funnel_factor
bsz, n_head = x.shape[0], x.shape[1]
qlen, klen = x.shape[2], (x.shape[3] + 1) // 2
zero_pad = jnp.zeros((bsz, n_head, qlen, shift))
x = jnp.concatenate([zero_pad, x], axis=3)
x = x.reshape(bsz, n_head, 2 * klen - 1 + shift, qlen)
x = x[:, :, shift:, :]
x = x.reshape(bsz, n_head, qlen, klen * 2 - 1)
x = x[:, :, :, shift - 1:shift - 1 + klen:k]
return x |
Creates attention mask layer.
Returns a layer that based on queries, keys and accumulated pool size of
keys/values until this layer calculates positional embeddings for
causal relative attention calculations.
Takes as input q, k, v and appends proper mask in the end.
Causal attention uses masking to prevent a given sequence position from
attending to positions greater than / following it. This is used, for
example, when training autoregressive sequence models, or when decoding a
sequence symbol by symbol.
Returns:
an attention mask layer. | def CreateAttentionMaskLayer():
"""Creates attention mask layer.
Returns a layer that based on queries, keys and accumulated pool size of
keys/values until this layer calculates positional embeddings for
causal relative attention calculations.
Takes as input q, k, v and appends proper mask in the end.
Causal attention uses masking to prevent a given sequence position from
attending to positions greater than / following it. This is used, for
example, when training autoregressive sequence models, or when decoding a
sequence symbol by symbol.
Returns:
an attention mask layer.
"""
def calculate_mask(queries, keys):
batch_size = queries.shape[0]
keys_len, queries_len = keys.shape[-2], queries.shape[-2]
funnel_factor, is_upsampling = calc_funnel_ratio(keys_len, queries_len)
return _funnel_mask(batch_size, keys_len, queries_len, funnel_factor,
is_upsampling)
def _funnel_mask(batch_size, keys_len, queries_len, funnel_factor,
is_upsampling):
"""Funnel mask.
Args:
batch_size: batch size.
keys_len: keys length.
queries_len: queries length.
funnel_factor: funnel factor.
is_upsampling: True or False.
Returns:
funnel mask.
This function based on keys/queries lengths creates a triangle mask
that prevents tokens from attending to positions following it.
If funnel_factor is not equal to 1 due to funnel upsampling or
downsampling it adjusts created mask for funnel attention
by repeating each element funnel_factor times.
This is because after funnel layer one token attends to funnel_factor
different tokens in downsampling. During upsampling on the other hand
funnel_factor tokens are attending to single token before upsampling.
"""
if funnel_factor != 1:
if not is_upsampling:
mask = jnp.tril(jnp.ones((queries_len, queries_len), dtype=jnp.bool_))
mask = jnp.repeat(mask, funnel_factor, axis=-1)
else:
mask = jnp.tril(jnp.ones((keys_len, keys_len), dtype=jnp.bool_))
mask = jnp.repeat(mask, funnel_factor, axis=-2)
else:
mask = jnp.tril(jnp.ones((queries_len, queries_len), dtype=jnp.bool_))
return jnp.repeat(mask[None, None, :, :], batch_size, axis=0)
return cb.Branch(
cb.Select([0]), cb.Select([1]), cb.Select([2]),
cb.Fn('create attention mask layer', calculate_mask, n_out=1)) |
Shifts right and insert cls.
Args:
cls_id: id of the cls token in embedding dictionary. Returns a layer that
shifts input tokens to the right by one and inserts an cls token to the
beginning like in BERT paper.
Returns:
layer shifting to right and inserting cls. | def ShiftRightCls(cls_id):
"""Shifts right and insert cls.
Args:
cls_id: id of the cls token in embedding dictionary. Returns a layer that
shifts input tokens to the right by one and inserts an cls token to the
beginning like in BERT paper.
Returns:
layer shifting to right and inserting cls.
"""
def shift_right(x):
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(cls_id))
return padded[:, :-1]
return cb.Fn('ShiftRightCls()', shift_right) |
Attention resampling. | def AttentionResampling(shorten_factor, d_model, is_upsampling, d_ff, n_heads,
dropout, dropout_shared_axes, mode, ff_activation,
context_bias_layer, location_bias_layer, total_pooling,
resampling_fn):
"""Attention resampling."""
attention = RelativeAttentionLMLayer(
d_model, context_bias_layer, location_bias_layer,
total_pooling, n_heads=n_heads, dropout=dropout,
mode=mode)
feed_forward = FeedForwardBlock(
d_model, d_ff, dropout, dropout_shared_axes, mode, ff_activation)
resampling = resampling_fn(shorten_factor, d_model,
mode=mode)
def _Dropout():
return core.Dropout(rate=dropout, shared_axes=dropout_shared_axes,
mode=mode)
return [
LayerNorm(), # h
cb.Branch(cb.Serial(
resampling,
LayerNorm(),
), None), # h', h
cb.Serial( # pylint: disable=g-long-ternary
cb.Select([0, 2, 1, 2]),
cb.Add(),
) if is_upsampling else [],
cb.Residual(
cb.Select([0, 1, 1]), # h', h, h
attention,
_Dropout(),
),
cb.Residual(
LayerNorm(),
feed_forward,
_Dropout(),
),
] |
Rotate function. | def rotate(x):
"""Rotate function."""
_, l, d = x.shape
inv_freq = jnp.exp(jnp.arange(0, d, 2) * -(jnp.log(10000.0) / d))
positions = jnp.arange(l)
freqs = jnp.einsum('i,j->ij', positions, inv_freq)
emb = jnp.concatenate((freqs, freqs), axis=-1)
cos = jnp.cos(emb)
sin = jnp.sin(emb)
def mul(vecs, pos_emb):
return jnp.einsum('bld,ld->bld', vecs, pos_emb)
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return jnp.concatenate((-x2, x1), axis=x1.ndim - 1)
return mul(x, cos) + mul(rotate_half(x), sin) |
Layer using LocallyConnected1d for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
LocallyConnected1d (grouped convolution) on all those modules, and
concatenates their results. It is essentially a locally-sensitive
approximation of Dense layer, with number of parameters smaller by the factor
of `n_modules / kernel_size`.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
kernel_size: The size of the kernel to be used.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
use_bias: If `True`, compute an affine map `y = Wx + b`; else compute
a linear map `y = Wx`.
Returns:
LocallyConnectedDense base.Layer. | def LocallyConnectedDense(n_modules, n_units, kernel_size=1,
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
use_bias=True):
"""Layer using LocallyConnected1d for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
LocallyConnected1d (grouped convolution) on all those modules, and
concatenates their results. It is essentially a locally-sensitive
approximation of Dense layer, with number of parameters smaller by the factor
of `n_modules / kernel_size`.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
kernel_size: The size of the kernel to be used.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
use_bias: If `True`, compute an affine map `y = Wx + b`; else compute
a linear map `y = Wx`.
Returns:
LocallyConnectedDense base.Layer.
"""
if n_modules == 1:
return tl.Dense(n_units, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, use_bias=use_bias)
return tl.Serial(
tl.SplitLastAxis(n_modules),
tl.LocallyConnected1d(
n_units, kernel_size, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, use_bias=use_bias, padding='WRAP'),
tl.MergeLastTwoAxes()) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConnectedDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConnectedDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def ModularCausalAttention(d_feature, n_heads=1, sparsity=None, dropout=0.0,
max_inference_length=2048,
kernel_size=1, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConnectedDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConnectedDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
n_modules = n_heads if sparsity is None else sparsity
@assert_shape('...a->...b')
def ProcessingLayer():
assert d_feature % n_modules == 0
return LocallyConnectedDense(n_modules, d_feature // n_modules,
kernel_size=kernel_size)
return tl.ConfigurableAttention(
ProcessingLayer(), ProcessingLayer(), ProcessingLayer(),
ProcessingLayer(), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Layer using local convolutions for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
a convolution on all those modules, and concatenates their results.
It is similar to LocallyConnectedDense above, but shares weights.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_size: The size of the kernel to be used.
length_kernel_size: If > 1, also do causal convolution on the previous axis,
which is often the sentence length in sequence models.
Returns:
LocallyConvDense base.Layer. | def LocallyConvDense(n_modules, n_units, mode, kernel_size=1,
length_kernel_size=1):
"""Layer using local convolutions for approximation of Dense layer.
The layer splits the last axis of a tensor into `n_modules`, then runs
a convolution on all those modules, and concatenates their results.
It is similar to LocallyConnectedDense above, but shares weights.
Args:
n_modules: Indicates how many modules (pixels) should be input and output
split into for processing.
n_units: how many outputs (filters) should each module generate.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_size: The size of the kernel to be used.
length_kernel_size: If > 1, also do causal convolution on the previous axis,
which is often the sentence length in sequence models.
Returns:
LocallyConvDense base.Layer.
"""
if n_modules == 1:
return tl.Dense(n_units)
if kernel_size % 2 != 1:
raise ValueError('Currently we only handle odd kernel sizes.')
half = (kernel_size - 1) // 2
pad_widths = [[0, 0], [0, 0], [half, half], [0, 0]]
return tl.Serial(
tl.SplitLastAxis(n_modules),
tl.Fn('Pad', lambda x: jnp.pad(x, pad_width=pad_widths, mode='constant')),
_RememberPad(length_kernel_size-1, mode=mode),
tl.Conv(n_units, kernel_size=(length_kernel_size, kernel_size)),
tl.MergeLastTwoAxes()
) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConvDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConvDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def ConvCausalAttention(d_feature, n_heads=1, sparsity=None, dropout=0.0,
max_inference_length=2048,
kernel_size=1, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses LocallyConvDense instead of Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: Number of modules used in LocallyConvDense.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
kernel_size: Kernel size used in LocallyConnectedDense.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
n_modules = n_heads if sparsity is None else sparsity
@assert_shape('...a->...b')
def ProcessingLayer():
assert d_feature % n_modules == 0
return LocallyConvDense(n_modules, d_feature // n_modules, mode=mode,
kernel_size=kernel_size)
return tl.ConfigurableAttention(
ProcessingLayer(), ProcessingLayer(), ProcessingLayer(),
ProcessingLayer(), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Returns a reimplementation of Dense layer, using einsum.
While this is an equivalent of a Dense layer, it seems to be faster when used
in decoding if used with bias (see decoding_timing_test.py ).
This layer can be removed when we understand better the reason for the
difference in decoding speed.
Args:
d_input: Dimensionality of the input tensor.
d_output: Dimensionality of the output tensor.
use_bias: Whether to use bias. | def EinsumDense(d_input, d_output, use_bias):
"""Returns a reimplementation of Dense layer, using einsum.
While this is an equivalent of a Dense layer, it seems to be faster when used
in decoding if used with bias (see decoding_timing_test.py ).
This layer can be removed when we understand better the reason for the
difference in decoding speed.
Args:
d_input: Dimensionality of the input tensor.
d_output: Dimensionality of the output tensor.
use_bias: Whether to use bias.
"""
layers = [
tl.Weights(init.GlorotUniformInitializer(), [d_output, d_input]),
tl.Fn('EinsumDense',
(lambda kernel, embeds: # pylint: disable=g-long-lambda
jnp.einsum('xd,...d->...x', kernel, embeds)))
]
if use_bias:
layers.extend([
tl.Weights(init.RandomNormalInitializer(1e-6), [d_output]),
tl.Add()
])
return tl.Serial(layers) |
Runs `layer_a` with probability `prob_a`, otherwise runs `layer_b`. | def RandomLayer(layer_a, layer_b, prob_a):
"""Runs `layer_a` with probability `prob_a`, otherwise runs `layer_b`."""
condition = tl.Serial(
tl.RandomUniform(),
tl.Fn('SmallerThan', lambda x: x < prob_a)
)
return tl.Cond(condition, layer_a, layer_b) |
Configurable sparse version of Dense layer. | def SparseDenseWithOptions(n_units, d_input=None, sparsity_type=None,
sparsity=0, d_lowrank=None, prob_sparse=None,
mode=None, use_bias=True, use_bfloat16=False):
"""Configurable sparse version of Dense layer."""
if prob_sparse is not None:
if mode is not None and mode != 'train':
# For non-training modes, we want to use a sparse variant.
# This is different than simply prob_sparse being None, as the weights of
# the model are different.
prob_sparse = 1.0
return RandomLayer(
SparseDenseWithOptions(n_units, d_input, sparsity_type, sparsity,
d_lowrank, use_bias=use_bias,
use_bfloat16=use_bfloat16),
tl.Dense(n_units, use_bias=use_bias, use_bfloat16=use_bfloat16),
prob_sparse)
if sparsity_type is None or sparsity_type == 'None' or sparsity == 0:
return tl.Dense(n_units, use_bias=use_bias, use_bfloat16=use_bfloat16)
if sparsity_type == 'mult':
return FactoredDense(sparsity, d_input, n_units, use_bias=use_bias,
use_bfloat16=use_bfloat16)
assert not use_bfloat16 # use_bfloat16 is unsupported for other variants
if sparsity_type == 'lowrank':
assert use_bias # use_bias=False is unsupported
return LowRankDense(n_units, d_lowrank)
if sparsity_type == 'einsum':
return EinsumDense(d_input, n_units, use_bias=use_bias)
if sparsity_type == 'local':
assert use_bias # use_bias = False is unsupported
assert n_units % sparsity == 0
return LocallyConnectedDense(sparsity, n_units/sparsity)
if sparsity_type == 'local3':
assert use_bias # use_bias = False is unsupported
assert n_units % sparsity == 0
return LocallyConnectedDense(sparsity, n_units/sparsity, kernel_size=3)
raise ValueError('Unknown sparsity type: {}'.format(sparsity_type)) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses low-rank approximation of kernel in Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
lowrank: The rank of low-rank approximation.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def LowRankCausalAttention(d_feature, n_heads=1, dropout=0.0,
max_inference_length=2048, lowrank=64,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
it uses low-rank approximation of kernel in Dense layer for computing Q/K/V.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
lowrank: The rank of low-rank approximation.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
return tl.ConfigurableAttention(
LowRankDense(d_feature, lowrank), LowRankDense(d_feature, lowrank),
LowRankDense(d_feature, lowrank), LowRankDense(d_feature, lowrank),
n_heads=n_heads, qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Returns a Dense-like layer, internally factored to use fewer parameters.
This layer treats an activation vector as if divided into :math:`M`
subvectors (``n_modules`` 'modules'). It uses this factored view to compute
a :py:class:`Dense`-like mapping with high mixing/connectivity, but using
approximately :math:`1/M` the number of weights of a similarly dimensioned
:py:class:`Dense` layer.
More specifically, each activation vector of dimensionality ``n_in`` is
multiplied element-wise (a generalized form of gating) with ``n_modules``
vectors also of dimensionality ``n_in``. The resulting vectors are projected
to the subvector/module dimensionality ``d_out / n_modules`` via a matrix
multiply, and finally reshaped back to a single vector of dimensionality
``d_out``. Optionally, a bias vector of dimensionality ``d_out`` is added at
the end. All the above-mentioned non-input objects -- gating vectors,
projection matrix, and optional bias -- are trainable weights.
Args:
n_modules: Number by which an activation vector is divided into subvectors
(modules) for the factored computation.
d_in: Last/innermost dimension of input array.
d_out: Last/innermost dimension of output array.
use_bias: If True, add bias vectors at the end of the layer; else end the
layer with the matrix multiply.
use_bfloat16: If True, use bfloat16 weights; else use float32 weights. | def FactoredDense(n_modules, d_in, d_out, use_bias=True, use_bfloat16=False):
r"""Returns a Dense-like layer, internally factored to use fewer parameters.
This layer treats an activation vector as if divided into :math:`M`
subvectors (``n_modules`` 'modules'). It uses this factored view to compute
a :py:class:`Dense`-like mapping with high mixing/connectivity, but using
approximately :math:`1/M` the number of weights of a similarly dimensioned
:py:class:`Dense` layer.
More specifically, each activation vector of dimensionality ``n_in`` is
multiplied element-wise (a generalized form of gating) with ``n_modules``
vectors also of dimensionality ``n_in``. The resulting vectors are projected
to the subvector/module dimensionality ``d_out / n_modules`` via a matrix
multiply, and finally reshaped back to a single vector of dimensionality
``d_out``. Optionally, a bias vector of dimensionality ``d_out`` is added at
the end. All the above-mentioned non-input objects -- gating vectors,
projection matrix, and optional bias -- are trainable weights.
Args:
n_modules: Number by which an activation vector is divided into subvectors
(modules) for the factored computation.
d_in: Last/innermost dimension of input array.
d_out: Last/innermost dimension of output array.
use_bias: If True, add bias vectors at the end of the layer; else end the
layer with the matrix multiply.
use_bfloat16: If True, use bfloat16 weights; else use float32 weights.
"""
if d_out % n_modules != 0:
raise ValueError(f'Value d_out ({d_out}) must be a multiple of arg '
f'n_modules ({n_modules}).')
d_module = d_out // n_modules
def GatingVectors():
return tl.Weights(init.RandomNormalInitializer(stddev=0.5),
shape=[n_modules, d_in],
use_bfloat16=use_bfloat16)
def ProjectionMatrix():
return tl.Weights(init.GlorotUniformInitializer(),
shape=[d_in, d_module],
use_bfloat16=use_bfloat16),
def Bias():
return tl.Weights(init.RandomNormalInitializer(1e-6),
shape=[d_out],
use_bfloat16=use_bfloat16),
layers = [
GatingVectors(),
ProjectionMatrix(),
_GateAndProject(),
MergeLastTwoAxes(),
]
if use_bias:
layers += [Bias(), tl.Add()]
return tl.Serial(layers) |
Returns a combined gating+projection layer that saves on memory. | def _GateAndProject():
"""Returns a combined gating+projection layer that saves on memory."""
def f(projection, gating, x):
# Args arrive in reverse order because of how they were put on the stack.
# Einsum indices: d (d_in), n (n_modules), m (d_module = d_out/n_modules)
return jnp.einsum('...d,nd,dm->...nm', x, gating, projection)
return tl.Fn('_GateAndProject', f) |
Returns a replacement of Dense layer which uses less parameters.
The layer uses number of modules equal to `sparsity`. It is a combination of
multiplicative dense and locally connected dense layers.
Args:
sparsity: The sparsity of the layer; the output vector is divided into this
number of modules.
d_feature: Dimensionality of input and output tensor. | def MultiplicativeModularSparseDense(sparsity, d_feature):
"""Returns a replacement of Dense layer which uses less parameters.
The layer uses number of modules equal to `sparsity`. It is a combination of
multiplicative dense and locally connected dense layers.
Args:
sparsity: The sparsity of the layer; the output vector is divided into this
number of modules.
d_feature: Dimensionality of input and output tensor.
"""
assert d_feature % sparsity == 0
d_module = d_feature // sparsity
return tl.Serial(
# Weight below is used for per-head preprocessing of an embedding.
tl.Weights(init.RandomNormalInitializer(stddev=0.5),
shape=[sparsity, d_feature]),
# Weight below is a kernel of multiplicative dense, shared across heads.
tl.Weights(init.GlorotUniformInitializer(), [d_feature, d_module]),
# Weight below is a kernel of modular dense.
tl.Weights(functools.partial(init.GlorotUniformInitializer(),
nonreceptive_dims=[0]),
[sparsity, d_module, d_module]),
# To save memory the per-head preprocessing and multiplying by
# kernels is done in a single einsum.
tl.Fn('SparseDenseEinsum',
(lambda kmod, kmult, multiplier, embeds: # pylint: disable=g-long-lambda
jnp.einsum('hxo,dx,hd,...d->...ho', kmod, kmult, multiplier, embeds
))),
MergeLastTwoAxes(),
# Weight below is bias after dense, per-head.
tl.Weights(init.RandomNormalInitializer(1e-6), [d_feature]),
tl.Add(),
) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it multiplies each embedding
dimension by a scalar specific to each dimension and each head; then it
produces Q/K/V by applying the same dense layer to each head. In comparison
to standard dense layer for computing Q/K/V, this layer uses less parameters
while still being able to express many functions, like a permutation.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def MultiplicativeCausalAttention(d_feature, n_heads=1, sparsity=None,
dropout=0.0, max_inference_length=2048,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it multiplies each embedding
dimension by a scalar specific to each dimension and each head; then it
produces Q/K/V by applying the same dense layer to each head. In comparison
to standard dense layer for computing Q/K/V, this layer uses less parameters
while still being able to express many functions, like a permutation.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
sparsity = n_heads if sparsity is None else sparsity
return tl.ConfigurableAttention(
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
FactoredDense(sparsity, d_feature, d_feature),
n_heads=n_heads, qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConnectedLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def MultiplicativeModularCausalAttention(
d_feature, n_heads=1, sparsity=None, dropout=0.0, max_inference_length=2048,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConnectedLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
max_inference_length: maximum length for inference.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
sparsity = n_heads if sparsity is None else sparsity
return tl.ConfigurableAttention(
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature),
MultiplicativeModularSparseDense(sparsity, d_feature), n_heads=n_heads,
qkv_attention_layer=tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConvLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
length_kernel_size: Size of convolution kernel on the length dimension.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
force_no_dropout: If True, force dropout to be 0.0 independent of the above
value; used to override some configurations.
max_inference_length: maximum length for inference.
share_qk: if True, average Q and K embeddings and share for both Q and K.
output_layer_type: Which sparse layers to use for processing output from the
attention mechanism. One of `'none'`, `'mult'`, `'conv'`,
or `'multconv'`.
v_concat_type: What kind of concatenation to use when computing V tensor.
One of `'original'`, `'fixed'`, or `'none'`. `'none'` means using just
output from mutliplicative layer shared by Q, K, V. `'fixed'` means
using output from multiplicative layer concatenated, for each module,
with the layer input. `'original'` means using concatenation without
properly taking modules into account; this method was used in
experiments previously, so it is included for backwards-compatibility.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def MultiplicativeConvCausalAttention(
d_feature, n_heads=1, sparsity=None, length_kernel_size=3, dropout=0.0,
force_no_dropout=False, max_inference_length=2048, share_qk=False,
output_layer_type='none', v_concat_type='none', mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
self-attention with causal masking rather than padding-based masking. However,
for computing Q/K/V instead of a Dense layer it combines
FactoredDense layer with LocallyConvLayer.
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
sparsity: The sparsity of the layer; usually it should be equal to n_heads.
length_kernel_size: Size of convolution kernel on the length dimension.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
force_no_dropout: If True, force dropout to be 0.0 independent of the above
value; used to override some configurations.
max_inference_length: maximum length for inference.
share_qk: if True, average Q and K embeddings and share for both Q and K.
output_layer_type: Which sparse layers to use for processing output from the
attention mechanism. One of `'none'`, `'mult'`, `'conv'`,
or `'multconv'`.
v_concat_type: What kind of concatenation to use when computing V tensor.
One of `'original'`, `'fixed'`, or `'none'`. `'none'` means using just
output from mutliplicative layer shared by Q, K, V. `'fixed'` means
using output from multiplicative layer concatenated, for each module,
with the layer input. `'original'` means using concatenation without
properly taking modules into account; this method was used in
experiments previously, so it is included for backwards-compatibility.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
assert output_layer_type in ['none', 'mult', 'conv', 'multconv']
assert v_concat_type in ['original', 'fixed', 'none']
dropout = 0.0 if force_no_dropout else dropout
sparsity = n_heads if sparsity is None else sparsity
d_module = d_feature // sparsity
output_layers = []
if 'mult' in output_layer_type:
output_layers.append(FactoredDense(
sparsity, d_feature, d_feature))
if 'conv' in output_layer_type:
output_layers.append(LocallyConvDense(
sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size))
if v_concat_type == 'original':
# 'original'` uses concatenation without properly taking modules into
# account; this method was used in experiments previously, so it is included
# for backwards-compatibility.
concat_layers = [tl.Concatenate()] # use permuted and original for v
elif v_concat_type == 'fixed':
# `'fixed'` uses the output from multiplicative layer concatenated, for each
# module, with the layer input. This means that every module in Conv layer
# has access both to parts of embeddings which were used to compute Q/K of
# this particular module, and it ha access to parts of the embedding which
# will be modified by this module.
concat_layers = [
tl.Parallel(
tl.Fn('Reshape1', lambda x: jnp.reshape( # pylint: disable=g-long-lambda
x, (x.shape[0], x.shape[1], sparsity, d_module))),
tl.Fn('Reshape2', lambda x: jnp.reshape( # pylint: disable=g-long-lambda
x, (x.shape[0], x.shape[1], sparsity, d_module)))),
tl.Concatenate(),
tl.Fn('Reshape3',
lambda x: jnp.reshape(x, (x.shape[0], x.shape[1], 2*d_feature))),
]
elif v_concat_type == 'none':
# `'none'` doesn't use concatenation: we throw away the original layer
# input and pass to Conv only output of shared Multiplicative layer.
concat_layers = [tl.Select([0], n_in=2)]
if share_qk:
return tl.Serial(
tl.Select([0, 0]), # pre-qkv, pre-v-for-concat
FactoredDense(sparsity, d_feature, d_feature), # shared q k
tl.Select([0, 0]), # pre-qk, pre-v, pre-v-for-concat
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads),
tl.Select([0, 0]), # use for q and k
tl.Parallel(
[],
[],
[concat_layers,
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
),
tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode),
tl.MergeHeads(n_heads),
output_layers,
)
return tl.Serial(
tl.Select([0, 0]), # duplicate activations
FactoredDense(sparsity, d_feature, d_feature), # shared q, k
tl.Select([0, 0, 0]), # use for q, k, v
tl.Parallel(
[LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
[LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
[concat_layers,
LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1,
length_kernel_size=length_kernel_size),
tl.SplitIntoHeads(n_heads)],
),
tl.DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode),
tl.MergeHeads(n_heads),
output_layers,
) |
Returns a layer that maps (activations, mask) to (new_activations, mask).
See the FAVOR paper for details: https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
n_random_features: Free dimension size for the orthogonal random matrix.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
use_approximate_softmax: Bool, if True uses approximate softmax, otherwise
Relu.
scale_by_norm: Boolean; whether to scale orthogonal random matrix.
normalize_data: predicate indicating whether data should be normalized.
epsilon: numerical stabilizer.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def Favor(d_feature, n_heads=1, n_random_features=256, dropout=0.0,
numerical_stabilizer=0.001, use_approximate_softmax=False,
scale_by_norm=0, normalize_data=False, epsilon=0.0001, mode='train'):
"""Returns a layer that maps (activations, mask) to (new_activations, mask).
See the FAVOR paper for details: https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
n_random_features: Free dimension size for the orthogonal random matrix.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
use_approximate_softmax: Bool, if True uses approximate softmax, otherwise
Relu.
scale_by_norm: Boolean; whether to scale orthogonal random matrix.
normalize_data: predicate indicating whether data should be normalized.
epsilon: numerical stabilizer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
del dropout # not implemented yet but needed in the API
return tl.ConfigurableAttention(
tl.Dense(d_feature), tl.Dense(d_feature), tl.Dense(d_feature),
tl.Dense(d_feature),
tl.FavorAttention(d_feature, n_heads, n_random_features,
numerical_stabilizer, use_approximate_softmax,
scale_by_norm, normalize_data, epsilon, mode),
n_heads=n_heads) |
Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
causal attention, but using FAVOR fast attention as in the following paper:
https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
mode: One of `'train'`, `'eval'`, or `'predict'`. | def CausalFavor(d_feature, n_heads=1, dropout=0.0,
numerical_stabilizer=0.001, mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like `CausalAttention`, this layer type represents one pass of multi-head
causal attention, but using FAVOR fast attention as in the following paper:
https://arxiv.org/abs/2006.03555
Args:
d_feature: Depth/dimensionality of feature embedding.
n_heads: Number of attention heads.
dropout: Probababilistic rate for internal dropout applied to attention
activations (based on query-key pairs) before dotting them with values.
numerical_stabilizer: float, small number used for numerical stability.
mode: One of `'train'`, `'eval'`, or `'predict'`.
"""
del dropout
return tl.ConfigurableAttention(
core.Dense(d_feature), core.Dense(d_feature), core.Dense(d_feature),
core.Dense(d_feature), n_heads=n_heads,
qkv_attention_layer=tl.CausalFavorAttention(numerical_stabilizer,
mode)) |
Returns Feed-forward block with sparsity.
The original (non-sparse) FF block is a triple Dense(d_ff)-Relu-Dense
that takes an input, makes it of size d_ff (usually larger than it was) and
then brings it back to the original size after Relu. It is commonly used in
Transformer models where it often accounts for most of the trainable weights.
The original block can be slow in decoding due to the need to fetch a lot of
weights from memory. This sparse block only allows one non-zero element
in a block of a specified size. This is trained with straight-through Gumbel
softmax trick.
Args:
d_ff: Depth/dimensionality of FeedForward layer.
n_elements_in_block: The sparsity level. The layer is divided into blocks of
this size, and each block has only a single element active.
d_lowrank: The dimensionality of low-rank controller.
temperature: The temperature of the controller during training.
quant_prob: During training this proportion of blocks will have quantized
mask (i.e. a single element active). The rest will use a soft mask.
use_bfloat16: Whether to use bfloat16 for weights.
big_weights_in_bfloat16: : Whether to use bfloat16 for main weights of the
FeedForward layer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
dropout_rate: Probability for dropping an activation value.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks.
multiply_by_controller_output: whether to multiply the middle activation
layer of FF by controller output (i.e. softmax).
kernel_scaling: Whether to scale the kernel matrix (during init) to keep the
variance of the layer output regardless of n_elements_in_block. | def SparseFF(
d_ff, n_elements_in_block=32, d_lowrank=64, temperature=0.1, quant_prob=0.3,
use_bfloat16=False, big_weights_in_bfloat16=False, mode='train',
kernel_initializer=init.GlorotUniformInitializer(),
bias_initializer=init.RandomNormalInitializer(1e-6),
dropout_rate=0.0, dropout_shared_axes=None, ff_chunk_size=0,
multiply_by_controller_output=False, kernel_scaling=False):
"""Returns Feed-forward block with sparsity.
The original (non-sparse) FF block is a triple Dense(d_ff)-Relu-Dense
that takes an input, makes it of size d_ff (usually larger than it was) and
then brings it back to the original size after Relu. It is commonly used in
Transformer models where it often accounts for most of the trainable weights.
The original block can be slow in decoding due to the need to fetch a lot of
weights from memory. This sparse block only allows one non-zero element
in a block of a specified size. This is trained with straight-through Gumbel
softmax trick.
Args:
d_ff: Depth/dimensionality of FeedForward layer.
n_elements_in_block: The sparsity level. The layer is divided into blocks of
this size, and each block has only a single element active.
d_lowrank: The dimensionality of low-rank controller.
temperature: The temperature of the controller during training.
quant_prob: During training this proportion of blocks will have quantized
mask (i.e. a single element active). The rest will use a soft mask.
use_bfloat16: Whether to use bfloat16 for weights.
big_weights_in_bfloat16: : Whether to use bfloat16 for main weights of the
FeedForward layer.
mode: One of `'train'`, `'eval'`, or `'predict'`.
kernel_initializer: Function that creates a matrix of (random) initial
connection weights `W` for the layer.
bias_initializer: Function that creates a vector of (random) initial
bias weights `b` for the layer.
dropout_rate: Probability for dropping an activation value.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks.
multiply_by_controller_output: whether to multiply the middle activation
layer of FF by controller output (i.e. softmax).
kernel_scaling: Whether to scale the kernel matrix (during init) to keep the
variance of the layer output regardless of n_elements_in_block.
"""
if mode == 'train' or multiply_by_controller_output:
also_return_nondiscrete_output = True
else:
also_return_nondiscrete_output = False
controller = _SparseFFController(
d_ff=d_ff, n_elements_in_block=n_elements_in_block,
d_lowrank=d_lowrank, temperature=temperature,
use_bfloat16=use_bfloat16, mode=mode,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
also_return_nondiscrete_output=also_return_nondiscrete_output)
main = [
_SparseFFMain(
d_ff=d_ff, n_elements_in_block=n_elements_in_block,
d_lowrank=d_lowrank, quant_prob=quant_prob, use_bfloat16=use_bfloat16,
big_weights_in_bfloat16=big_weights_in_bfloat16, mode=mode,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
multiply_by_controller_output=multiply_by_controller_output,
kernel_scaling=kernel_scaling),
# quant_mask, emb
tl.Select([1, 0]),
# emb, quant_mask
tl.Dropout(rate=dropout_rate, shared_axes=dropout_shared_axes, mode=mode),
tl.Select([1, 0]),
# quant_mask, emb
]
# We will "remember" quant_mask _after_ chunking, and "recall" this same
# quant_mask during reverse_and_grad _before_ chunking.
remembering = _RememberInReverse(output=False)
recalling = _RecallQuantMaskInReverse(
remember_layer=remembering, elements=d_ff//n_elements_in_block)
return tl.BatchLeadingAxes(tl.Serial(
recalling, # emb, quant_mask
tl.Chunk(chunk_size=ff_chunk_size, layer=tl.Serial(
# emb, quant_mask
tl.Select((0, 1, 0)), # emb, quant_mask, emb
controller, # quant_mask, mask, emb
main, # quant_mask, emb/output
)),
remembering, # emb/output
)) |
Stacks successive game frames along their last dimension. | def _FrameStack(n_frames):
"""Stacks successive game frames along their last dimension."""
# Input shape: (B, T, ..., C).
# Output shape: (B, T, ..., C * n_frames).
assert n_frames >= 1
if n_frames == 1:
return [] # No-op; just let the data flow through.
return [
# Create copies of input sequence, shift right by [0, ..., n_frames - 1]
# frames, and concatenate along the channel dimension.
tl.Branch(*map(_shift_right, range(n_frames))),
tl.Concatenate(n_items=n_frames, axis=-1)
] |
Layer that converts unsigned bytes to floats. | def _BytesToFloats():
"""Layer that converts unsigned bytes to floats."""
return tl.Fn('BytesToFloats', lambda x: x / 255.0) |
An Atari CNN. | def AtariCnn(n_frames=4, hidden_sizes=(32, 32), output_size=128, mode='train'):
"""An Atari CNN."""
del mode
# TODO(jonni): Include link to paper?
# Input shape: (B, T, H, W, C)
# Output shape: (B, T, output_size)
return tl.Serial(
_BytesToFloats(),
_FrameStack(n_frames=n_frames), # (B, T, H, W, 4C)
tl.Conv(hidden_sizes[0], (5, 5), (2, 2), 'SAME'),
tl.Relu(),
tl.Conv(hidden_sizes[1], (5, 5), (2, 2), 'SAME'),
tl.Relu(),
tl.Flatten(n_axes_to_keep=2), # B, T and rest.
tl.Dense(output_size),
tl.Relu(),
) |
An Atari CNN. | def AtariCnnBody(n_frames=4, hidden_sizes=(32, 64, 64),
output_size=512, mode='train',
kernel_initializer=None, padding='VALID'):
"""An Atari CNN."""
del mode
# TODO(jonni): Include link to paper?
# Input shape: (B, T, H, W, C)
# Output shape: (B, T, output_size)
return tl.Serial(
_BytesToFloats(),
_FrameStack(n_frames=n_frames), # (B, T, H, W, 4C)
tl.Conv(hidden_sizes[0], (8, 8), (4, 4), padding=padding,
kernel_initializer=kernel_initializer),
tl.Relu(),
tl.Conv(hidden_sizes[1], (4, 4), (2, 2), padding=padding,
kernel_initializer=kernel_initializer),
tl.Relu(),
tl.Conv(hidden_sizes[2], (3, 3), (1, 1), padding=padding,
kernel_initializer=kernel_initializer),
tl.Relu(),
tl.Flatten(n_axes_to_keep=2), # B, T and rest.
tl.Dense(output_size),
tl.Relu(),
) |
MLP operating on a fixed number of last frames. | def FrameStackMLP(n_frames=4, hidden_sizes=(64,), output_size=64,
mode='train'):
"""MLP operating on a fixed number of last frames."""
del mode
return tl.Serial(
_FrameStack(n_frames=n_frames),
[[tl.Dense(d_hidden), tl.Relu()] for d_hidden in hidden_sizes],
tl.Dense(output_size),
) |
A "multilayer perceptron" (MLP) network.
This is a classic fully connected feedforward network, with one or more
layers and a (nonlinear) activation function between each layer. For
historical reasons, such networks are often called multilayer perceptrons;
but they are more accurately described as multilayer networks, where
each layer + activation function is a perceptron-like unit (see, e.g.,
[https://en.wikipedia.org/wiki/Multilayer_perceptron#Terminology]).
Args:
layer_widths: Tuple of ints telling the number of layers and the width of
each layer. For example, setting `layer_widths=(128, 64, 32)` would
yield 3 layers with successive widths of 128, 64, and 32.
activation_fn: Type of activation function between pairs of fully connected
layers; must be an activation-type subclass of `Layer`.
out_activation: If True, include a copy of the activation function as the
last layer in the network.
flatten: If True, insert a layer at the head of the network to flatten the
input tensor into a matrix of shape (batch_size. -1).
mode: Ignored.
Returns:
An assembled MLP network with the specified layers. This network can either
be initialized and trained as a full model, or can be used as a building
block in a larger network. | def MLP(
layer_widths=(128, 64),
activation_fn=tl.Relu,
out_activation=False,
flatten=True,
mode='train'):
"""A "multilayer perceptron" (MLP) network.
This is a classic fully connected feedforward network, with one or more
layers and a (nonlinear) activation function between each layer. For
historical reasons, such networks are often called multilayer perceptrons;
but they are more accurately described as multilayer networks, where
each layer + activation function is a perceptron-like unit (see, e.g.,
[https://en.wikipedia.org/wiki/Multilayer_perceptron#Terminology]).
Args:
layer_widths: Tuple of ints telling the number of layers and the width of
each layer. For example, setting `layer_widths=(128, 64, 32)` would
yield 3 layers with successive widths of 128, 64, and 32.
activation_fn: Type of activation function between pairs of fully connected
layers; must be an activation-type subclass of `Layer`.
out_activation: If True, include a copy of the activation function as the
last layer in the network.
flatten: If True, insert a layer at the head of the network to flatten the
input tensor into a matrix of shape (batch_size. -1).
mode: Ignored.
Returns:
An assembled MLP network with the specified layers. This network can either
be initialized and trained as a full model, or can be used as a building
block in a larger network.
"""
del mode
layers = []
for width in layer_widths:
layers.append(tl.Dense(width))
layers.append(activation_fn())
if not out_activation:
# Don't need the last activation.
layers.pop()
return tl.Serial(
[tl.Flatten()] if flatten else [],
layers,
) |
Split channels in 3 parts. Shifts 1st and 3rd sections to left/right. | def DiagonalGate():
"""Split channels in 3 parts. Shifts 1st and 3rd sections to left/right."""
def f(x): # pylint: disable=invalid-name
# x : [batch, 1, length, depth]
x = jnp.pad(x, [(0, 0), (0, 0), (1, 1), (0, 0)],
mode='constant', constant_values=0.0)
depth = x.shape[-1] // 3
assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,
x.shape)
xs = [
x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],
x[:, :, 2:, 2 * depth:3 * depth]
]
return jnp.concatenate(xs, axis=3)
return tl.Fn('DiagonalGate', f) |
Build convolutional GRU with diagonal gating as in ImprovedNGPU. | def ConvDiagonalGRU(units, kernel_size=(3, 3)):
"""Build convolutional GRU with diagonal gating as in ImprovedNGPU."""
def BuildConv():
return tl.Conv(filters=units, kernel_size=kernel_size, padding='SAME')
return tl.GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform_fn=DiagonalGate,
gate_nonlinearity=tl.HardSigmoid,
candidate_nonlinearity=tl.HardTanh) |
Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
d_feature: Number of memory channels (dimensionality of feature embedding).
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
mode: Whether we are training or evaluating or doing inference.
Returns:
A NeuralGPU Stax model. | def NeuralGPU(d_feature=96, steps=16, vocab_size=2, mode='train'):
"""Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
d_feature: Number of memory channels (dimensionality of feature embedding).
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
mode: Whether we are training or evaluating or doing inference.
Returns:
A NeuralGPU Stax model.
"""
del mode
core = ConvDiagonalGRU(units=d_feature)
return tl.Serial(
tl.Embedding(vocab_size=vocab_size, d_feature=d_feature),
[core] * steps,
tl.Dense(vocab_size),
) |
ResNet convolutional striding block. | def ConvBlock(kernel_size, filters, strides, norm, non_linearity,
mode='train'):
"""ResNet convolutional striding block."""
ks = kernel_size
filters1, filters2, filters3 = filters
main = [
tl.Conv(filters1, (1, 1), strides),
norm(mode=mode),
non_linearity(),
tl.Conv(filters2, (ks, ks), padding='SAME'),
norm(mode=mode),
non_linearity(),
tl.Conv(filters3, (1, 1)),
norm(mode=mode),
]
shortcut = [
tl.Conv(filters3, (1, 1), strides),
norm(mode=mode),
]
return [
tl.Residual(main, shortcut=shortcut),
non_linearity()
] |
ResNet identical size block. | def IdentityBlock(kernel_size, filters, norm, non_linearity,
mode='train'):
"""ResNet identical size block."""
ks = kernel_size
filters1, filters2, filters3 = filters
main = [
tl.Conv(filters1, (1, 1)),
norm(mode=mode),
non_linearity(),
tl.Conv(filters2, (ks, ks), padding='SAME'),
norm(mode=mode),
non_linearity(),
tl.Conv(filters3, (1, 1)),
norm(mode=mode),
]
return [
tl.Residual(main),
non_linearity(),
] |
ResNet.
Args:
d_hidden: Dimensionality of the first hidden layer (multiplied later).
n_output_classes: Number of distinct output classes.
mode: Whether we are training or evaluating or doing inference.
norm: `Layer` used for normalization, Ex: BatchNorm or
FilterResponseNorm.
non_linearity: `Layer` used as a non-linearity, Ex: If norm is
BatchNorm then this is a Relu, otherwise for FilterResponseNorm this
should be ThresholdedLinearUnit.
Returns:
The list of layers comprising a ResNet model with the given parameters. | def Resnet50(d_hidden=64, n_output_classes=1001, mode='train',
norm=tl.BatchNorm,
non_linearity=tl.Relu):
"""ResNet.
Args:
d_hidden: Dimensionality of the first hidden layer (multiplied later).
n_output_classes: Number of distinct output classes.
mode: Whether we are training or evaluating or doing inference.
norm: `Layer` used for normalization, Ex: BatchNorm or
FilterResponseNorm.
non_linearity: `Layer` used as a non-linearity, Ex: If norm is
BatchNorm then this is a Relu, otherwise for FilterResponseNorm this
should be ThresholdedLinearUnit.
Returns:
The list of layers comprising a ResNet model with the given parameters.
"""
# A ConvBlock configured with the given norm, non-linearity and mode.
def Resnet50ConvBlock(filter_multiplier=1, strides=(2, 2)):
filters = (
[filter_multiplier * dim for dim in [d_hidden, d_hidden, 4 * d_hidden]])
return ConvBlock(3, filters, strides, norm, non_linearity, mode)
# Same as above for IdentityBlock.
def Resnet50IdentityBlock(filter_multiplier=1):
filters = (
[filter_multiplier * dim for dim in [d_hidden, d_hidden, 4 * d_hidden]])
return IdentityBlock(3, filters, norm, non_linearity, mode)
return tl.Serial(
tl.ToFloat(),
tl.Conv(d_hidden, (7, 7), (2, 2), 'SAME'),
norm(mode=mode),
non_linearity(),
tl.MaxPool(pool_size=(3, 3), strides=(2, 2)),
Resnet50ConvBlock(strides=(1, 1)),
[Resnet50IdentityBlock() for _ in range(2)],
Resnet50ConvBlock(2),
[Resnet50IdentityBlock(2) for _ in range(3)],
Resnet50ConvBlock(4),
[Resnet50IdentityBlock(4) for _ in range(5)],
Resnet50ConvBlock(8),
[Resnet50IdentityBlock(8) for _ in range(2)],
tl.AvgPool(pool_size=(7, 7)),
tl.Flatten(),
tl.Dense(n_output_classes),
) |
WideResnet convolutional block. | def WideResnetBlock(channels, strides=(1, 1), bn_momentum=0.9, mode='train'):
"""WideResnet convolutional block."""
return [
tl.BatchNorm(momentum=bn_momentum, mode=mode),
tl.Relu(),
tl.Conv(channels, (3, 3), strides, padding='SAME'),
tl.BatchNorm(momentum=bn_momentum, mode=mode),
tl.Relu(),
tl.Conv(channels, (3, 3), padding='SAME'),
] |
WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
n_blocks: int, number of blocks in a group. total layers = 6n + 4.
widen_factor: int, widening factor of each group. k=1 is vanilla resnet.
n_output_classes: int, number of distinct output classes.
bn_momentum: float, momentum in BatchNorm.
mode: Whether we are training or evaluating or doing inference.
Returns:
The list of layers comprising a WideResnet model with the given parameters. | def WideResnet(n_blocks=3, widen_factor=1, n_output_classes=10, bn_momentum=0.9,
mode='train'):
"""WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
n_blocks: int, number of blocks in a group. total layers = 6n + 4.
widen_factor: int, widening factor of each group. k=1 is vanilla resnet.
n_output_classes: int, number of distinct output classes.
bn_momentum: float, momentum in BatchNorm.
mode: Whether we are training or evaluating or doing inference.
Returns:
The list of layers comprising a WideResnet model with the given parameters.
"""
return tl.Serial(
tl.ToFloat(),
tl.Conv(16, (3, 3), padding='SAME'),
WideResnetGroup(n_blocks, 16 * widen_factor, bn_momentum=bn_momentum,
mode=mode),
WideResnetGroup(n_blocks, 32 * widen_factor, (2, 2),
bn_momentum=bn_momentum, mode=mode),
WideResnetGroup(n_blocks, 64 * widen_factor, (2, 2),
bn_momentum=bn_momentum, mode=mode),
tl.BatchNorm(momentum=bn_momentum, mode=mode),
tl.Relu(),
tl.AvgPool(pool_size=(8, 8)),
tl.Flatten(),
tl.Dense(n_output_classes),
) |
Combine all but batch_axes last axes into batch to avoid shape problems. | def _Batch(x, batch_axes):
"""Combine all but batch_axes last axes into batch to avoid shape problems."""
if batch_axes is None:
return x
if isinstance(x, list) and not x:
return []
return tl.BatchLeadingAxes(x, n_last_axes_to_keep=batch_axes) |
Attaches a policy head to a model body. | def Policy(
policy_distribution,
body=None,
normalizer=None,
head_init_range=None,
batch_axes=None,
mode='train',
):
"""Attaches a policy head to a model body."""
if body is None:
body = lambda mode: []
if normalizer is None:
normalizer = lambda mode: []
head_kwargs = {}
if head_init_range is not None:
head_kwargs['kernel_initializer'] = tl.RandomUniformInitializer(
lim=head_init_range
)
return tl.Serial(
_Batch(normalizer(mode=mode), batch_axes),
_Batch(body(mode=mode), batch_axes),
tl.Dense(policy_distribution.n_inputs, **head_kwargs),
) |
Attaches a value head to a model body. | def Value(
body=None,
normalizer=None,
inject_actions=False,
inject_actions_n_layers=1,
inject_actions_dim=64,
batch_axes=None,
mode='train',
is_discrete=False,
vocab_size=2,
multiplicative_action_injection=False,
head_init_range=None,
):
"""Attaches a value head to a model body."""
if body is None:
body = lambda mode: []
if normalizer is None:
normalizer = lambda mode: []
def ActionInjector(mode):
if inject_actions:
if is_discrete:
action_encoder = tl.Embedding(vocab_size, inject_actions_dim)
else:
action_encoder = tl.Dense(inject_actions_dim)
encoders = tl.Parallel(
tl.Dense(inject_actions_dim),
action_encoder,
)
if multiplicative_action_injection:
action_injector = tl.Serial(
tl.Fn('TanhMulGate', lambda x, a: x * jnp.tanh(a)),
tl.LayerNorm() # compensate for reduced variance
)
else:
action_injector = tl.Add()
return tl.Serial(
# Input: (body output, actions).
encoders,
action_injector,
models.MLP(
layer_widths=(inject_actions_dim,) * inject_actions_n_layers,
out_activation=True,
flatten=False,
mode=mode,
)
)
else:
return []
head_kwargs = {}
if head_init_range is not None:
head_kwargs['kernel_initializer'] = tl.RandomUniformInitializer(
lim=head_init_range
)
return tl.Serial(
_Batch(normalizer(mode=mode), batch_axes),
_Batch(body(mode=mode), batch_axes),
ActionInjector(mode=mode),
tl.Dense(1, **head_kwargs),
) |
Attaches policy and value heads to a model body. | def PolicyAndValue(
policy_distribution,
body=None,
policy_top=Policy,
value_top=Value,
normalizer=None,
joint=True,
mode='train',
):
"""Attaches policy and value heads to a model body."""
if normalizer is None:
normalizer = lambda mode: []
if body is None:
body = lambda mode: []
common_kwargs = {'body': None, 'normalizer': None, 'mode': mode}
policy_top = policy_top(
policy_distribution=policy_distribution, **common_kwargs
)
value_top = value_top(**common_kwargs)
layers = [normalizer(mode=mode)]
if joint:
layers.extend([
body(mode=mode),
tl.Branch(policy_top, value_top),
])
else:
layers.append(tl.Branch(
tl.Serial(body(mode=mode), policy_top),
tl.Serial(body(mode=mode), value_top),
))
return tl.Serial(layers) |
The network takes as input an observation and outputs values of actions. | def Quality(
body=None,
normalizer=None,
batch_axes=None,
mode='train',
n_actions=2,
head_init_range=None,
):
"""The network takes as input an observation and outputs values of actions."""
if body is None:
body = lambda mode: []
if normalizer is None:
normalizer = lambda mode: []
head_kwargs = {}
if head_init_range is not None:
head_kwargs['kernel_initializer'] = tl.RandomUniformInitializer(
lim=head_init_range
)
return tl.Serial(
_Batch(normalizer(mode=mode), batch_axes),
_Batch(body(mode=mode), batch_axes),
tl.Dense(n_actions, **head_kwargs),
) |
Returns an RNN language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of RNN layers.
rnn_cell: Type of RNN cell; must be a subclass of `Layer`.
rnn_cell_d_state_multiplier: Multiplier for feature depth of RNN cell
state.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout.
mode: If `'predict'`, use fast inference; if `'train'` apply dropout.
Returns:
An RNN language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def RNNLM(vocab_size,
d_model=512,
n_layers=2,
rnn_cell=tl.LSTMCell,
rnn_cell_d_state_multiplier=2,
dropout=0.1,
mode='train'):
"""Returns an RNN language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of RNN layers.
rnn_cell: Type of RNN cell; must be a subclass of `Layer`.
rnn_cell_d_state_multiplier: Multiplier for feature depth of RNN cell
state.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout.
mode: If `'predict'`, use fast inference; if `'train'` apply dropout.
Returns:
An RNN language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
if n_layers != 2: # TODO(jonni): Remove n_layers arg, if it can't vary?
raise ValueError(f'Number of layers must be set to 2; instead got'
f' {n_layers}.')
def MultiRNNCell():
"""Multi-layer RNN cell."""
return tl.Serial(
tl.Parallel([], tl.Split(n_items=n_layers)),
tl.SerialWithSideOutputs(
[rnn_cell(n_units=d_model) for _ in range(n_layers)]),
tl.Parallel([], tl.Concatenate(n_items=n_layers))
)
zero_state = tl.MakeZeroState( # pylint: disable=no-value-for-parameter
depth_multiplier=n_layers * rnn_cell_d_state_multiplier
)
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.Branch([], zero_state),
tl.Scan(MultiRNNCell(), axis=1, mode=mode),
tl.Select([0], n_in=2), # Drop RNN state.
tl.Dense(vocab_size),
) |
Returns a GRU (gated recurrent unit) language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of GRU layers.
mode: If `'predict'`, use fast inference (and omit the right shift).
Returns:
A GRU language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def GRULM(vocab_size=256,
d_model=512,
n_layers=1,
mode='train'):
"""Returns a GRU (gated recurrent unit) language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Embedding depth throughout the model.
n_layers: Number of GRU layers.
mode: If `'predict'`, use fast inference (and omit the right shift).
Returns:
A GRU language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
[tl.GRU(d_model, mode=mode) for _ in range(n_layers)],
tl.Dense(vocab_size),
) |
Returns an LSTM sequence-to-sequence model with attention.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
The model works as follows:
* Input encoder runs on the input tokens and creates activations that
are used as both keys and values in attention.
* Pre-attention decoder runs on the targets and creates
activations that are used as queries in attention.
* Attention runs on the queries, keys and values masking out input padding.
* Decoder runs on the result, followed by a cross-entropy loss.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
target_vocab_size: Target vocabulary size.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
n_encoder_layers: Number of LSTM layers in the encoder.
n_decoder_layers: Number of LSTM layers in the decoder after attention.
n_attention_heads: Number of attention heads.
attention_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout within an attention block.
mode: If `'predict'`, use fast inference. If `'train'`, each attention block
will include dropout; else, it will pass all values through unaltered.
Returns:
An LSTM sequence-to-sequence model as a layer that maps from a
source-target tokenized text pair to activations over a vocab set. | def LSTMSeq2SeqAttn(input_vocab_size=256,
target_vocab_size=256,
d_model=512,
n_encoder_layers=2,
n_decoder_layers=2,
n_attention_heads=1,
attention_dropout=0.0,
mode='train'):
"""Returns an LSTM sequence-to-sequence model with attention.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
The model works as follows:
* Input encoder runs on the input tokens and creates activations that
are used as both keys and values in attention.
* Pre-attention decoder runs on the targets and creates
activations that are used as queries in attention.
* Attention runs on the queries, keys and values masking out input padding.
* Decoder runs on the result, followed by a cross-entropy loss.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
target_vocab_size: Target vocabulary size.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
n_encoder_layers: Number of LSTM layers in the encoder.
n_decoder_layers: Number of LSTM layers in the decoder after attention.
n_attention_heads: Number of attention heads.
attention_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout within an attention block.
mode: If `'predict'`, use fast inference. If `'train'`, each attention block
will include dropout; else, it will pass all values through unaltered.
Returns:
An LSTM sequence-to-sequence model as a layer that maps from a
source-target tokenized text pair to activations over a vocab set.
"""
input_encoder = tl.Serial(
tl.Embedding(input_vocab_size, d_model),
[tl.LSTM(d_model) for _ in range(n_encoder_layers)],
)
pre_attention_decoder = tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(target_vocab_size, d_model),
tl.LSTM(d_model, mode=mode),
)
def PrepareAttentionInputs():
"""Layer that prepares queries, keys, values and mask for attention."""
def F(encoder_activations, decoder_activations, input_tokens):
keys = values = encoder_activations
queries = decoder_activations
# Mask is 1 where inputs are not padding (0) and 0 where they are padding.
mask = (input_tokens != 0)
# We need to add axes to the mask for attention heads and decoder length.
mask = jnp.reshape(mask, (mask.shape[0], 1, 1, mask.shape[1]))
# Broadcast so mask is [batch, 1 for heads, decoder-len, encoder-len].
mask = mask + jnp.zeros((1, 1, decoder_activations.shape[1], 1))
mask = mask.astype(jnp.float32)
return queries, keys, values, mask
return tl.Fn('PrepareAttentionInputs', F, n_out=4)
return tl.Serial( # in-toks, target-toks
tl.Select([0, 1, 0, 1]), # in-toks, target-toks, in-toks, target-toks
tl.Parallel(input_encoder, pre_attention_decoder),
PrepareAttentionInputs(), # q, k, v, mask, target-toks
tl.Residual(
tl.AttentionQKV(d_model, n_heads=n_attention_heads,
dropout=attention_dropout, mode=mode,
cache_KV_in_predict=True)
), # decoder-vecs, mask, target-toks
tl.Select([0, 2]), # decoder-vecs, target-toks
[tl.LSTM(d_model, mode=mode) for _ in range(n_decoder_layers)],
tl.Dense(target_vocab_size),
tl.LogSoftmax()
) |
Returns a Transformer encoder suitable for N-way classification.
This model maps tokenized text to N-way (``n_classes``) activations:
- input: Array representing a batch of text strings via token IDs plus
padding markers; shape is (batch_size, sequence_length), where
sequence_length <= ``max_len``. Array elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- output: Array representing a batch of raw (non-normalized) activations
over ``n_classes`` categories; shape is (batch_size, ``n_classes``).
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
n_classes: Last/innermost dimension of output arrays, suitable for N-way
classification.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also
used for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer model that maps strings (conveyed by token IDs) to
raw (non-normalized) activations over a range of output classes. | def TransformerEncoder(vocab_size,
n_classes=10,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer encoder suitable for N-way classification.
This model maps tokenized text to N-way (``n_classes``) activations:
- input: Array representing a batch of text strings via token IDs plus
padding markers; shape is (batch_size, sequence_length), where
sequence_length <= ``max_len``. Array elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- output: Array representing a batch of raw (non-normalized) activations
over ``n_classes`` categories; shape is (batch_size, ``n_classes``).
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
n_classes: Last/innermost dimension of output arrays, suitable for N-way
classification.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also
used for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer model that maps strings (conveyed by token IDs) to
raw (non-normalized) activations over a range of output classes.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _EncBlock():
return _EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
tl.Branch([], tl.PaddingMask()), # Creates masks from copy of the tokens.
tl.Embedding(vocab_size, d_model),
_Dropout(),
tl.PositionalEncoding(max_len=max_len),
[_EncBlock() for _ in range(n_layers)],
tl.Select([0], n_in=2), # Drops the masks.
tl.LayerNorm(),
tl.Mean(axis=1),
tl.Dense(n_classes),
) |
Returns a Transformer decoder.
This model maps sequential inputs to sequential outputs:
- input if ``vocab_size`` is specified: array representing a batch
of text strings via token IDs plus padding markers; shape is
(batch_size, sequence_length). The tensor elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- input if ``vocab_size`` is ``None``: 3-D array representing a batch of
sequences of activation vectors; shape is (batch_size, sequence_length,
``d_model``).
- output: 3-D array with shape (batch_size, sequence_length, ``d_model``).
The model uses causal attention and does *not* shift the input to the right.
Thus, the output for position `t` is based on inputs up to and including
position `t`.
Args:
vocab_size: If specified, gives the input vocabulary size -- each element
of the input tensor should be an integer in ``range(vocab_size)``.
If ``None``, indicates that the model expects as input sequences of
floating point vectors, each with ``d_model`` components.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
If ``vocab_size`` is defined: a Transformer model that maps strings
(conveyed by token IDs) to sequences of activation vectors.
If ``vocab_size`` is ``None``: a Transformer model that maps sequences of
activation vectors to sequences of activation vectors. | def TransformerDecoder(vocab_size=None,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer decoder.
This model maps sequential inputs to sequential outputs:
- input if ``vocab_size`` is specified: array representing a batch
of text strings via token IDs plus padding markers; shape is
(batch_size, sequence_length). The tensor elements are integers in
``range(vocab_size)``, and 0 values mark padding positions.
- input if ``vocab_size`` is ``None``: 3-D array representing a batch of
sequences of activation vectors; shape is (batch_size, sequence_length,
``d_model``).
- output: 3-D array with shape (batch_size, sequence_length, ``d_model``).
The model uses causal attention and does *not* shift the input to the right.
Thus, the output for position `t` is based on inputs up to and including
position `t`.
Args:
vocab_size: If specified, gives the input vocabulary size -- each element
of the input tensor should be an integer in ``range(vocab_size)``.
If ``None``, indicates that the model expects as input sequences of
floating point vectors, each with ``d_model`` components.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each encoder block will include dropout; else, it
will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
If ``vocab_size`` is defined: a Transformer model that maps strings
(conveyed by token IDs) to sequences of activation vectors.
If ``vocab_size`` is ``None``: a Transformer model that maps sequences of
activation vectors to sequences of activation vectors.
"""
def _EmbeddingOrDense():
return (tl.Embedding(vocab_size, d_model) if vocab_size is not None
else tl.Dense(d_model))
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _DecBlock():
return _DecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
_EmbeddingOrDense(),
_Dropout(),
tl.PositionalEncoding(max_len=max_len),
[_DecBlock() for _ in range(n_layers)],
tl.LayerNorm(),
) |
Returns a Transformer language model.
This model performs autoregressive language modeling:
- input: Array representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). Array
elements are integers in ``range(vocab_size)``, and 0 values mark padding
positions.
- output: 3-D array of raw activations with last/innermost dimension of
``vocab_size``, suitable for decoding into a batch of token strings;
shape is (batch_size, sequence_length, ``vocab_size``).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each decoder
block will include dropout; else, it will pass all values through
unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer language model that maps strings (represented as token ID
sequences) to sequences of raw (non-normalized) activation vectors; each
vector in the sequence can be mapped (e.g., by `argmax`) to a token ID. | def TransformerLM(vocab_size,
d_model=D_MODEL,
d_ff=D_FF,
n_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a Transformer language model.
This model performs autoregressive language modeling:
- input: Array representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). Array
elements are integers in ``range(vocab_size)``, and 0 values mark padding
positions.
- output: 3-D array of raw activations with last/innermost dimension of
``vocab_size``, suitable for decoding into a batch of token strings;
shape is (batch_size, sequence_length, ``vocab_size``).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input array
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_layers: Number of decoder blocks. Each block includes attention, dropout,
residual, layer-norm, feedforward (:py:class:`Dense`), and activation
layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also
used for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each decoder
block will include dropout; else, it will pass all values through
unaltered.
ff_activation: Type of activation function at the end of each encoder
block; must be an activation-type subclass of :py:class:`Layer`.
Returns:
A Transformer language model that maps strings (represented as token ID
sequences) to sequences of raw (non-normalized) activation vectors; each
vector in the sequence can be mapped (e.g., by `argmax`) to a token ID.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _DecBlock():
return _DecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
return tl.Serial(
tl.ShiftRight(mode=mode),
tl.Embedding(vocab_size, d_model),
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=mode),
[_DecBlock() for _ in range(n_layers)],
tl.LayerNorm(),
tl.Dense(vocab_size),
) |
Returns a full Transformer model.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(input_vocab_size)``, and 0 values mark padding positions.
- target: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(output_vocab_size)``, and 0 values mark padding positions.
- output: 3-D array of raw activations with last/innermost dimension of
``output_vocab_size``, suitable for decoding into a batch of token
strings; shape is (batch_size, sequence_length, ``vocab_size``).
An example use would be to translate (tokenized) sentences from English to
German.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if ``None``, then input and target integers (token IDs) are assumed to
come from the same vocabulary.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_encoder_layers: Number of encoder blocks.
n_decoder_layers: Number of decoder blocks.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each
encoder/decoder block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each
encoder/decoder block; must be an activation-type subclass of
:py:class:`Layer`.
Returns:
A Transformer model as a layer that maps from a source-target tokenized
text pair to activations over a vocab set. | def Transformer(input_vocab_size,
output_vocab_size=None,
d_model=D_MODEL,
d_ff=D_FF,
n_encoder_layers=N_LAYERS,
n_decoder_layers=N_LAYERS,
n_heads=N_HEADS,
max_len=MAX_SEQUENCE_LENGTH,
dropout=DROPOUT_RATE,
dropout_shared_axes=DROPOUT_SHARED_AXES,
mode=MODE,
ff_activation=FF_ACTIVATION_TYPE):
"""Returns a full Transformer model.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(input_vocab_size)``, and 0 values mark padding positions.
- target: Array representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length),
where sequence_length <= ``max_len``. Array elements are integers in
``range(output_vocab_size)``, and 0 values mark padding positions.
- output: 3-D array of raw activations with last/innermost dimension of
``output_vocab_size``, suitable for decoding into a batch of token
strings; shape is (batch_size, sequence_length, ``vocab_size``).
An example use would be to translate (tokenized) sentences from English to
German.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in ``range(vocab_size)``. These integers typically
represent token IDs from a vocabulary-based tokenizer.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if ``None``, then input and target integers (token IDs) are assumed to
come from the same vocabulary.
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each encoder block.
n_encoder_layers: Number of encoder blocks.
n_decoder_layers: Number of decoder blocks.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'predict'``, use fast inference. If ``'train'``, each
encoder/decoder block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each
encoder/decoder block; must be an activation-type subclass of
:py:class:`Layer`.
Returns:
A Transformer model as a layer that maps from a source-target tokenized
text pair to activations over a vocab set.
"""
# Avoid 'predict' mode in encoder, since encoder doesn't run stepwise.
encoder_mode = 'eval' if mode == 'predict' else mode
# Share embedding weights if no separate output vocab size.
in_embedder = tl.Embedding(input_vocab_size, d_model)
if output_vocab_size is None:
out_embedder = in_embedder
output_vocab_size = input_vocab_size
else:
out_embedder = tl.Embedding(output_vocab_size, d_model)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _EncBlock():
return _EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
def _Encoder():
encoder = tl.Serial(
in_embedder,
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=encoder_mode),
[_EncBlock() for _ in range(n_encoder_layers)],
tl.LayerNorm(),
)
return tl.Cache(encoder) if mode == 'predict' else encoder
def _EncDecBlock():
return _EncoderDecoderBlock(d_model, d_ff, n_heads, dropout,
dropout_shared_axes, mode, ff_activation)
# Input to model is encoder-side tokens and decoder-side tokens: tok_d, tok_e
# Model output is decoder-side vectors and decoder-side tokens: vec_d tok_d
return tl.Serial(
tl.Select([0, 1, 1]), # Copies decoder tokens for use in loss.
# Encode.
tl.Branch([], tl.PaddingMask()), # tok_e masks tok_d tok_d
_Encoder(),
# Decode.
tl.Select([2, 1, 0]), # Re-orders inputs: tok_d masks vec_e .....
tl.ShiftRight(mode=mode),
out_embedder,
_Dropout(),
tl.PositionalEncoding(max_len=max_len, mode=mode),
tl.Branch([], tl.EncoderDecoderMask()), # vec_d masks ..... .....
[_EncDecBlock() for _ in range(n_decoder_layers)],
tl.LayerNorm(),
tl.Select([0], n_in=3), # Drops masks and encoding vectors.
# Map vectors to match output vocab size.
tl.Dense(output_vocab_size),
) |
Returns a list of layers that implements a Transformer encoder block.
The input to the block is a pair (activations, mask) where the mask was
created from the original source tokens to prevent attending to the padding
part of the input. The block's outputs are the same type/shape as its inputs,
so that multiple blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also used
for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder block. | def _EncoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers that implements a Transformer encoder block.
The input to the block is a pair (activations, mask) where the mask was
created from the original source tokens to prevent attending to the padding
part of the input. The block's outputs are the same type/shape as its inputs,
so that multiple blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder blocks. The same rate is also used
for attention dropout in encoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder block.
"""
def _Attention():
return tl.Attention(d_model, n_heads=n_heads, dropout=dropout, mode=mode)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
return [
tl.Residual(
tl.LayerNorm(),
_Attention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
] |
Returns a list of layers that implements a Transformer decoder block.
The input to the block is a pair (activations, mask) where the mask encodes
causal connections, preventing attention to future positions in the sequence.
The block's outputs are the same type/shape as its inputs, so that multiple
blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also used
for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) decoder block. | def _DecoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers that implements a Transformer decoder block.
The input to the block is a pair (activations, mask) where the mask encodes
causal connections, preventing attention to future positions in the sequence.
The block's outputs are the same type/shape as its inputs, so that multiple
blocks can be chained together.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within decoder blocks. The same rate is also used
for attention dropout in decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) decoder block.
"""
def _CausalAttention():
return tl.CausalAttention(d_model, n_heads=n_heads, dropout=dropout,
mode=mode),
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Residual(
tl.LayerNorm(),
_CausalAttention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
] |
Returns a list of layers implementing a Transformer encoder-decoder block.
The block input is a triple (decoder_activations, mask, encoder_activations)
where the mask was created from the original input token IDs to prevent
attending to padding positions for that input.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder-decoder
block. | def _EncoderDecoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation):
"""Returns a list of layers implementing a Transformer encoder-decoder block.
The block input is a triple (decoder_activations, mask, encoder_activations)
where the mask was created from the original input token IDs to prevent
attending to padding positions for that input.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within encoder/decoder blocks. The same rate is
also used for attention dropout in encoder/decoder blocks.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that act in series as a (repeatable) encoder-decoder
block.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
def _AttentionQKV():
return tl.AttentionQKV(d_model, n_heads=n_heads, dropout=dropout,
mode=mode, cache_KV_in_predict=True)
def _CausalAttention():
return tl.CausalAttention(d_model, n_heads=n_heads, mode=mode)
def _FFBlock():
return _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes, mode,
ff_activation)
return [ # vec_d masks vec_e
tl.Residual(
tl.LayerNorm(),
_CausalAttention(),
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
tl.Select([0, 2, 2, 1, 2]), # vec_d vec_e vec_e masks vec_e
_AttentionQKV(), # vec_d masks vec_e
_Dropout(),
),
tl.Residual(
tl.LayerNorm(),
_FFBlock(),
_Dropout(),
),
] |
Returns a list of layers that implements a feedforward block.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that maps vectors to vectors. | def _FeedForwardBlock(d_model,
d_ff,
dropout,
dropout_shared_axes,
mode,
activation):
"""Returns a list of layers that implements a feedforward block.
Args:
d_model: Last/innermost dimension of activation arrays at most points in
the model, including the initial embedding output.
d_ff: Last/innermost dimension of special (typically wider)
:py:class:`Dense` layer in the feedforward part of each block.
dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask.
Sharing along batch and sequence axes (``dropout_shared_axes=(0,1)``)
is a useful way to save memory and apply consistent masks to activation
vectors at different sequence positions.
mode: If ``'train'``, each block will include dropout; else, it will
pass all values through unaltered.
activation: Type of activation function at the end of each block; must
be an activation-type subclass of :py:class:`Layer`.
Returns:
A list of layers that maps vectors to vectors.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Dense(d_ff),
activation(),
_Dropout(),
tl.Dense(d_model),
] |
Reversible transformer decoder layer.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_heads: int: number of attention heads
attention_type: subclass of tl.BaseCausalAttention: attention class to use
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: the dropout rate in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
use_bfloat16: whether to use bfloat16 for weights (default: False).
mode: str: 'train' or 'eval'
Returns:
the layer. | def DecoderBlock(d_model, d_ff, d_attention_key, d_attention_value,
n_heads, attention_type, dropout, ff_activation,
ff_dropout, ff_use_sru, ff_chunk_size, ff_sparsity,
attention_chunk_size, n_attention_layers=1,
n_feedforward_layers=1, center_layernorm=True,
use_bfloat16=False, mode='train'):
"""Reversible transformer decoder layer.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_heads: int: number of attention heads
attention_type: subclass of tl.BaseCausalAttention: attention class to use
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: the dropout rate in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
use_bfloat16: whether to use bfloat16 for weights (default: False).
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
# pylint: disable=g-complex-comprehension
def _Attn():
return ct.ApplyAttentionLayer(
attention_type, d_model, n_heads, d_attention_key,
d_attention_value, True, False, dropout, dropout,
attention_chunk_size, mode)
def _FF():
return ct.FeedForwardWithOptions(
d_model, d_ff, dropout, [-2], ff_activation, ff_dropout,
ff_chunk_size, ff_use_sru, ff_sparsity, center_layernorm,
mode, use_bfloat16)
def _attention_half_residual():
return [
tl.ReversibleHalfResidual(tl.LayerNorm(center=center_layernorm),
attention_layer=_Attn(),
name='ReversibleHalfResidualDecoderAttn'),
tl.ReversibleSwap()
]
def _feed_forward():
return [
tl.ReversibleHalfResidual(_FF(),
name='ReversibleHalfResidualDecoderFF'),
tl.ReversibleSwap()
]
return ([_attention_half_residual() for _ in range(n_attention_layers)]
+ [_feed_forward() for _ in range(n_feedforward_layers)]) |
Reversible transformer language model (only uses a decoder, no encoder).
Args:
vocab_size: int: vocab size
d_model: int: depth of *each half* of the two-part features
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
attention_type: class: attention class to use, such as SelfAttention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
ff_activation: the non-linearity in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
loss_sparsity_type: str, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
mode: str: 'train', 'eval', or 'predict'
Returns:
the layer. | def ReformerLM(vocab_size,
d_model=512,
d_ff=2048,
d_attention_key=64,
d_attention_value=64,
n_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
attention_type=tl.SelfAttention,
pos_type=None,
pos_axial_shape=(),
pos_d_axial_embs=None,
pos_start_from_zero_prob=1.0,
pos_max_offset_to_add=0,
ff_activation=tl.FastGelu,
ff_use_sru=0,
ff_chunk_size=0,
ff_sparsity=0,
loss_sparsity_type='mult',
loss_sparsity=0,
loss_d_lowrank=0,
loss_sparsity_prob=None,
attention_chunk_size=0,
mode='train'):
"""Reversible transformer language model (only uses a decoder, no encoder).
Args:
vocab_size: int: vocab size
d_model: int: depth of *each half* of the two-part features
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
attention_type: class: attention class to use, such as SelfAttention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
ff_activation: the non-linearity in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
loss_sparsity_type: str, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
mode: str: 'train', 'eval', or 'predict'
Returns:
the layer.
"""
positional_encoding = ct.PositionalEncoder(
mode, dropout, max_len, pos_type, pos_axial_shape, pos_d_axial_embs,
pos_start_from_zero_prob, pos_max_offset_to_add)
positional_embedder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=[-2], mode=mode), # pylint: disable=no-value-for-parameter
positional_encoding,
]
decoder_blocks = []
if isinstance(attention_type, (tuple, list)):
assert n_layers % len(attention_type) == 0
else:
attention_type = [attention_type]
for layer_idx in range(n_layers):
layer_attention_type = attention_type[layer_idx % len(attention_type)]
decoder_block = DecoderBlock(
d_model, d_ff, d_attention_key, d_attention_value, n_heads,
attention_type=layer_attention_type,
dropout=dropout,
ff_activation=ff_activation,
ff_dropout=dropout,
ff_use_sru=ff_use_sru,
ff_chunk_size=ff_chunk_size,
ff_sparsity=ff_sparsity,
attention_chunk_size=attention_chunk_size,
mode=mode)
decoder_blocks.append(decoder_block)
dense_loss_layer = tl.SparseDenseWithOptions(
vocab_size,
d_input=d_model,
sparsity_type=loss_sparsity_type,
sparsity=loss_sparsity,
d_lowrank=loss_d_lowrank,
prob_sparse=loss_sparsity_prob,
mode=mode)
return tl.Serial(
tl.ShiftRight(mode=mode),
positional_embedder,
tl.Dup(),
tl.ReversibleSerial(decoder_blocks),
tl.Concatenate(),
# TODO(kitaev): Test whether dropout should go before or after the
# LayerNorm, and whether dropout broadcasting is needed here.
tl.LayerNorm(),
tl.Dropout(rate=dropout, shared_axes=[-2], mode=mode), # pylint: disable=no-value-for-parameter
dense_loss_layer,
) |
Reversible transformer language model with shortening.
When shorten_factor is F and processing an input of shape [batch, length],
we embed the (shifted-right) input and then group each F elements (on length)
into a single vector -- so that in the end we process a tensor of shape ::
[batch, length // F, d_model]
almost until the end -- at the end it's un-shortend and a SRU is applied.
This reduces the length processed inside the main model body, effectively
making the model faster but possibly slightly less accurate.
Args:
vocab_size: int: vocab size
shorten_factor: by how much to shorten, see above
d_embedding: the depth of the embedding layer and final logits
d_model: int: depth of *each half* of the two-part features
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
attention_type: class: attention class to use, such as SelfAttention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, values must sum to d_embedding.
ff_activation: the non-linearity in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
mode: str: 'train' or 'eval'
Returns:
the layer. | def ReformerShortenLM(vocab_size,
shorten_factor=1,
d_embedding=256,
d_model=512,
d_ff=2048,
d_attention_key=64,
d_attention_value=64,
n_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
attention_type=tl.SelfAttention,
pos_type=None,
pos_axial_shape=(),
pos_d_axial_embs=None,
ff_activation=tl.FastGelu,
ff_use_sru=0,
ff_chunk_size=0,
ff_sparsity=0,
attention_chunk_size=0,
mode='train'):
"""Reversible transformer language model with shortening.
When shorten_factor is F and processing an input of shape [batch, length],
we embed the (shifted-right) input and then group each F elements (on length)
into a single vector -- so that in the end we process a tensor of shape ::
[batch, length // F, d_model]
almost until the end -- at the end it's un-shortend and a SRU is applied.
This reduces the length processed inside the main model body, effectively
making the model faster but possibly slightly less accurate.
Args:
vocab_size: int: vocab size
shorten_factor: by how much to shorten, see above
d_embedding: the depth of the embedding layer and final logits
d_model: int: depth of *each half* of the two-part features
d_ff: int: depth of feed-forward layer
d_attention_key: int: depth of key vector for each attention head
d_attention_value: int: depth of value vector for each attention head
n_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
attention_type: class: attention class to use, such as SelfAttention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, values must sum to d_embedding.
ff_activation: the non-linearity in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
assert mode != 'predict' # TODO(lukaszkaiser,kitaev): fast inference
positional_encoding = ct.PositionalEncoder(
mode, dropout, max_len, pos_type, pos_axial_shape, pos_d_axial_embs)
positional_embedder = [
tl.Embedding(vocab_size, d_embedding),
tl.Dropout(rate=dropout, shared_axes=[-2], mode=mode), # pylint: disable=no-value-for-parameter
positional_encoding,
]
decoder_blocks = []
if isinstance(attention_type, (tuple, list)):
assert n_layers % len(attention_type) == 0
else:
attention_type = [attention_type]
for layer_idx in range(n_layers):
layer_attention_type = attention_type[layer_idx % len(attention_type)]
decoder_block = DecoderBlock(
d_model, d_ff, d_attention_key, d_attention_value, n_heads,
attention_type=layer_attention_type,
dropout=dropout,
ff_activation=ff_activation,
ff_dropout=dropout,
ff_use_sru=ff_use_sru,
ff_chunk_size=ff_chunk_size,
ff_sparsity=ff_sparsity,
attention_chunk_size=attention_chunk_size,
mode=mode)
decoder_blocks.append(decoder_block)
# pylint: disable=g-long-lambda
return tl.Serial(
tl.ShiftRight(),
positional_embedder,
tl.Dup(), # Stack has (x, x), the first will be shortened
# Before shortening, we need to pad by shorten factor so as not to leak
# information into the future. To understand why, imagine shorten factor
# of 2 and sequence of length 4, so ABCD. If we shift just by 1, then we
# would have 0ABC, which gets grouped to [0A][BC] on input, which is
# predicting ABCD as targets. The problem is that [0A] has access to A
# and [BC] has access to C -- it will learn to copy it, peek into
# the future. Shifting twice to [00][AB] solves the problem as the first
# "big" symbol becomes all-0 and the rest is shifted enough.
tl.ShiftRight(n_positions=shorten_factor - 1),
tl.Fn('Shorten', lambda x: jnp.reshape( # Shorten -- move to depth.
x, (x.shape[0], x.shape[1] // shorten_factor, -1)), n_out=1),
tl.Dense(d_model),
tl.Dup(), # Stack has (short_x, short_x, x)
tl.ReversibleSerial(decoder_blocks),
tl.Select([0], n_in=2),
tl.LayerNorm(),
tl.Dropout(rate=dropout, shared_axes=[-2], mode=mode), # pylint: disable=no-value-for-parameter
tl.Dense(shorten_factor * d_embedding),
tl.Fn('ProlongBack', lambda x: jnp.reshape( # Prolong back.
x, (x.shape[0], x.shape[1] * shorten_factor, -1)), n_out=1),
tl.Concatenate(), # Concatenate with just the embeddings.
tl.CausalConv(d_embedding),
tl.Relu(),
tl.SRU(d_embedding), # One RNN layer for conditional dependence.
tl.Dense(vocab_size),
) |
Returns a list of layers that implements a Reformer encoder block.
The input to the layer is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
attention_type: subclass of tl.BaseCausalAttention: attention class to use
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: the dropout rate in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
use_bfloat16: whether to use bfloat16 for weights (default: False)
use_two_swaps_per_block: bool, if True use two reversible swaps in Encoder
block, otherwise use only one swap.
mode: str: 'train' or 'eval'
Returns:
A list of layers that maps (activations, mask) to (activations, mask). | def EncoderBlock(d_model, d_ff, n_heads, attention_type, dropout, ff_activation,
ff_dropout, ff_use_sru=0, ff_chunk_size=0, ff_sparsity=0,
attention_chunk_size=0, center_layernorm=True,
use_bfloat16=False, use_two_swaps_per_block=True,
mode='train'):
"""Returns a list of layers that implements a Reformer encoder block.
The input to the layer is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
attention_type: subclass of tl.BaseCausalAttention: attention class to use
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: the dropout rate in feed-forward layer
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
attention_chunk_size: int, if > 0 run attention chunked at this size
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
use_bfloat16: whether to use bfloat16 for weights (default: False)
use_two_swaps_per_block: bool, if True use two reversible swaps in Encoder
block, otherwise use only one swap.
mode: str: 'train' or 'eval'
Returns:
A list of layers that maps (activations, mask) to (activations, mask).
"""
if mode == 'predict':
# Mode 'predict' means that the decoder should be run one token at a time.
# The encoder only ever runs over full sequences, which is why it's switched
# to 'eval' mode instead.
mode = 'eval'
def _Attn():
return ct.ApplyAttentionLayer(
attention_type=attention_type, d_model=d_model, n_heads=n_heads,
d_qk=d_model//n_heads, d_v=d_model//n_heads, masked=True, causal=False,
attention_dropout=dropout, output_dropout=dropout,
attention_chunk_size=attention_chunk_size, mode=mode)
def _FF():
return ct.FeedForwardWithOptions(
d_model, d_ff, dropout, [-2], ff_activation, ff_dropout,
ff_chunk_size, ff_use_sru, ff_sparsity, center_layernorm,
mode, use_bfloat16)
# TODO(lukaszkaiser): refactor efficient attention layers to unify the API
# If we're using standard attention, we need to pass reshaped mask and not
# return the mask to be compatible with the EfficientAttention API.
attention = _Attn()
if attention.n_out == 2:
attention = tl.Serial(
tl.Parallel([], _InsertAxes12()),
attention,
tl.Select([0], n_in=2)
)
def _attention_half_residual():
return [
tl.ReversibleHalfResidual(tl.LayerNorm(center=center_layernorm),
attention_layer=attention,
name='ReversibleHalfResidualEncoderAttn'),
tl.ReversibleSwap()
]
def _feed_forward():
layers = [
tl.ReversibleHalfResidual(_FF(),
name='ReversibleHalfResidualEncoderFF')
]
if use_two_swaps_per_block:
layers.append(tl.ReversibleSwap())
return layers
return _attention_half_residual() + _feed_forward() |
Reversible transformer decoder layer.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: float: (optional) separate dropout rate for feed-forward layer
mode: str: 'train' or 'eval'
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
Returns:
the layer. | def EncoderDecoderBlock(d_model, d_ff, n_heads, dropout, ff_activation,
ff_dropout, mode, ff_use_sru=0, ff_chunk_size=0,
ff_sparsity=0):
"""Reversible transformer decoder layer.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
ff_activation: the non-linearity in feed-forward layer
ff_dropout: float: (optional) separate dropout rate for feed-forward layer
mode: str: 'train' or 'eval'
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
Returns:
the layer.
"""
enc_dec_attention = tl.EncDecAttention(
n_heads=n_heads, d_qk=d_model//n_heads, d_v=d_model//n_heads,
attention_dropout=dropout, output_dropout=dropout,
mode=mode)
enc_dec_attention_half_residual = tl.ReversibleHalfResidual(
tl.LayerNorm(),
attention_layer=enc_dec_attention,
)
causal_attention = tl.SelfAttention(
n_heads=n_heads, d_qk=d_model//n_heads, d_v=d_model//n_heads,
causal=True,
attention_dropout=dropout, output_dropout=dropout,
mode=mode)
causal_attention_half_residual = tl.ReversibleHalfResidual(
tl.LayerNorm(),
attention_layer=causal_attention,
)
feed_forward = ct.FeedForwardWithOptions(
d_model, d_ff, dropout, [-2], ff_activation, ff_dropout,
ff_chunk_size, ff_use_sru, ff_sparsity, True, mode)
return [ # vec_d1 vec_d2 vec_e masks
causal_attention_half_residual,
tl.ReversibleSwap(),
enc_dec_attention_half_residual,
tl.ReversibleSwap(),
tl.ReversibleHalfResidual(feed_forward),
tl.ReversibleSwap(),
] |
Reversible transformer encoder-decoder model.
This model expects an input pair: target, source.
At the moment, this model supports dot-product attention only. For the
attention types in the Reformer paper, see ReformerLM.
Args:
input_vocab_size: int: vocab size of the source.
output_vocab_size: int (optional): vocab size of the target. If None, the
source and target are assumed to have the same vocab.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_encoder_layers: int: number of encoder layers
n_decoder_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
ff_activation: the non-linearity in feed-forward layer
ff_dropout: float: (optional) separate dropout rate at feed-forward
nonlinearity. This is called relu_dropout in T2T.
mode: str: 'train' or 'eval'
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
Returns:
A Reformer model as a layer that maps from a target, source pair to
activations over a vocab set. | def Reformer(input_vocab_size,
output_vocab_size=None,
d_model=512,
d_ff=2048,
n_encoder_layers=6,
n_decoder_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
ff_activation=tl.Relu,
ff_dropout=None,
mode='train',
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None,
ff_use_sru=0,
ff_chunk_size=0,
ff_sparsity=0):
"""Reversible transformer encoder-decoder model.
This model expects an input pair: target, source.
At the moment, this model supports dot-product attention only. For the
attention types in the Reformer paper, see ReformerLM.
Args:
input_vocab_size: int: vocab size of the source.
output_vocab_size: int (optional): vocab size of the target. If None, the
source and target are assumed to have the same vocab.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_encoder_layers: int: number of encoder layers
n_decoder_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
ff_activation: the non-linearity in feed-forward layer
ff_dropout: float: (optional) separate dropout rate at feed-forward
nonlinearity. This is called relu_dropout in T2T.
mode: str: 'train' or 'eval'
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
ff_use_sru: int; if > 0, we use this many SRU layers instead of feed-forward
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
Returns:
A Reformer model as a layer that maps from a target, source pair to
activations over a vocab set.
"""
in_encoder, out_encoder, output_vocab_size = (
ct.EmbeddingAndPositionalEncodings(
input_vocab_size,
d_model,
mode,
dropout,
[-2], # dropout_shared_axes
max_len,
output_vocab_size=output_vocab_size,
pos_type=pos_type,
pos_axial_shape=pos_axial_shape,
pos_d_axial_embs=pos_d_axial_embs)
)
# pylint: disable=g-complex-comprehension
encoder_blocks = [
EncoderBlock(
d_model, d_ff, n_heads, tl.SelfAttention, dropout, ff_activation,
ff_dropout, mode=mode, ff_use_sru=ff_use_sru,
ff_chunk_size=ff_chunk_size, ff_sparsity=ff_sparsity)
for _ in range(n_encoder_layers)]
# pylint: enable=g-complex-comprehension
encoder = tl.Serial([
in_encoder,
tl.Dup(),
tl.ReversibleSerial(encoder_blocks),
_XYAvg(),
tl.LayerNorm(),
])
if mode == 'predict':
encoder = tl.Cache(encoder)
# pylint: disable=g-complex-comprehension
encoder_decoder_blocks = [
EncoderDecoderBlock(
d_model, d_ff, n_heads, dropout, ff_activation, ff_dropout, mode,
ff_use_sru=ff_use_sru, ff_chunk_size=ff_chunk_size,
ff_sparsity=ff_sparsity)
for _ in range(n_decoder_layers)]
# pylint: enable=g-complex-comprehension
# Assemble and return the model.
return tl.Serial(
# Input: encoder_side_tokens, decoder_side_tokens
# Copy decoder tokens for use in loss.
tl.Select([0, 1, 1]), # tok_e tok_d tok_d
tl.Branch([], [tl.PaddingMask(),
_RemoveAxes12()]), # tok_e mask tok_d .....
# Encode.
encoder, # vec_e mask tok_d .....
# Decode.
tl.Select([2, 0, 1]), # tok_d vec_e mask .....
tl.ShiftRight(mode=mode), # tok_d vec_e mask .....
out_encoder, # vec_d vec_e mask .....
tl.Dup(), # vec_d1 vec_d2 vec_e mask .....
tl.ReversibleSerial(encoder_decoder_blocks),
_XYAvg(), # vec_d vec_e mask .....
tl.LayerNorm(), # vec_d vec_e mask .....
# Map to output vocab.
tl.Select([0], n_in=3), # vec_d .....
tl.Dense(output_vocab_size), # vec_d .....
) |
Returns a layer that inserts two internal size-1 axes into an array. | def _InsertAxes12():
"""Returns a layer that inserts two internal size-1 axes into an array."""
return tl.Fn('InsertAxes12',
lambda x: jnp.reshape(x, (x.shape[0], 1, 1, x.shape[1]))) |
Returns a layer that removes two internal size-1 axes from an array. | def _RemoveAxes12():
"""Returns a layer that removes two internal size-1 axes from an array."""
return tl.Fn('RemoveAxes12', lambda x: jnp.squeeze(x, (1, 2))) |
Returns a layer that makes mask values look like token ID ints. | def _AsTokenIDs():
"""Returns a layer that makes mask values look like token ID ints."""
return tl.Fn('AsTokenIDs', lambda x: x.astype(jnp.int32)) |
Returns a layer that computes the element-wise average of two arrays. | def _XYAvg():
"""Returns a layer that computes the element-wise average of two arrays."""
return tl.Fn('XYAvg', lambda x, y: (x + y) / 2.0) |
ReversibleSerial but with a forgetting block every n_layers. | def _ReversibleSerialForget(layers, d_model, n_layers, forget_dense=True):
"""ReversibleSerial but with a forgetting block every n_layers."""
if not n_layers or len(layers) <= n_layers + 1:
return tl.ReversibleSerial(layers)
layers1, layers2 = layers[:n_layers], layers[n_layers:]
if forget_dense:
forgetting_layer = tl.Serial(
_XYAvg(),
tl.Dense(d_model),
tl.Dup(),
)
else:
forgetting_layer = tl.Select([0, 1])
return tl.Serial(
tl.ReversibleSerial(layers1),
forgetting_layer,
_ReversibleSerialForget(layers2, d_model, n_layers, forget_dense)
) |
BERT (default hparams are for bert-base-uncased). | def BERT(
d_model=768,
vocab_size=30522,
max_len=512,
type_vocab_size=2,
n_heads=12,
d_ff=3072,
n_layers=12,
head=None,
init_checkpoint=None,
mode='eval',
):
"""BERT (default hparams are for bert-base-uncased)."""
# TODO(piotrekp1): loading config from model_name
layer_norm_eps = 1e-12
d_head = d_model // n_heads
word_embeddings = tl.Embedding(vocab_size, d_model)
type_embeddings = tl.Embedding(type_vocab_size, d_model)
position_embeddings = tl.PositionalEncoding(max_len, mode=mode)
embeddings = [
tl.Select([0, 1, 0], n_in=3), # Drops 'idx' input.
tl.Parallel(word_embeddings, type_embeddings, [
tl.PaddingMask(),
tl.Fn('Squeeze', lambda x: np.squeeze(x, (1, 2)), n_out=1)
]),
tl.Add(),
position_embeddings,
tl.LayerNorm(epsilon=layer_norm_eps),
]
encoder = []
for _ in range(n_layers):
attn = tl.SelfAttention(
n_heads=n_heads,
d_qk=d_head,
d_v=d_head,
bias=True,
masked=True,
mode=mode)
feed_forward = [tl.Dense(d_ff), tl.Gelu(), tl.Dense(d_model)]
encoder += [
tl.Select([0, 1, 1]), # Save a copy of the mask
tl.Residual(attn, AddBias()), # pylint: disable=no-value-for-parameter
tl.LayerNorm(epsilon=layer_norm_eps),
tl.Residual(*feed_forward),
tl.LayerNorm(epsilon=layer_norm_eps),
]
encoder += [tl.Select([0], n_in=2)] # Drop the mask
pooler = [
tl.Fn('', lambda x: (x[:, 0, :], x), n_out=2),
tl.Dense(d_model),
tl.Tanh(),
]
init_checkpoint = init_checkpoint if mode == 'train' else None
bert = PretrainedBERT(
embeddings + encoder + pooler, init_checkpoint=init_checkpoint)
if head is not None:
bert = tl.Serial(bert, head())
return bert |
Feed-forward block with layer normalization at start. | def _FeedForward(d_model, d_ff, dropout, activation, act_dropout,
use_bfloat16, mode):
"""Feed-forward block with layer normalization at start."""
if act_dropout is None:
act_dropout = dropout
return [
tl.Dense(d_ff, use_bfloat16=use_bfloat16),
tl.Dropout(rate=act_dropout, shared_axes=[-2], mode=mode),
activation(),
tl.Dense(d_model, use_bfloat16=use_bfloat16),
] |
Feed-Forward block with all the options.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, tuple or string; if not 0, use sparse feed-forward block
with this sparsity
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
use_bfloat16: whether to use bfloat16 for weights (default: False).
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
use SwitchSparseFF if ff_sparsity_type=`'Switch'`
Returns:
A list of layers which maps vectors to vectors. | def FeedForwardWithOptions(d_model,
d_ff,
dropout,
dropout_shared_axes,
ff_activation,
ff_dropout,
ff_chunk_size,
ff_use_sru,
ff_sparsity,
center_layernorm,
mode,
use_bfloat16=False,
ff_sparsity_type='1inN'):
"""Feed-Forward block with all the options.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, tuple or string; if not 0, use sparse feed-forward block
with this sparsity
center_layernorm: whether to use centering in LayerNorm (default) or if
to skip it, which is known as RMS normalization.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
use_bfloat16: whether to use bfloat16 for weights (default: False).
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
use SwitchSparseFF if ff_sparsity_type=`'Switch'`
Returns:
A list of layers which maps vectors to vectors.
"""
if ff_sparsity and ff_sparsity_type == '1inN':
temperature, quant_prob = 0.1, 0.3
if isinstance(ff_sparsity, str):
# This is hacky but used to pass ff_sparsity in yaml sweep files.
ff_sparsity = [(float(x) if '.' in x else int(x))
for x in ff_sparsity.split()]
if isinstance(ff_sparsity, (list, tuple)):
if len(ff_sparsity) == 2:
n_elements_in_block, d_lowrank = ff_sparsity
else:
n_elements_in_block, d_lowrank, temperature, quant_prob = ff_sparsity
else:
assert isinstance(ff_sparsity, int)
n_elements_in_block, d_lowrank = ff_sparsity, d_ff // ff_sparsity
ff = tl.SparseFF(
d_ff,
n_elements_in_block=n_elements_in_block,
d_lowrank=d_lowrank,
temperature=temperature,
quant_prob=quant_prob,
use_bfloat16=use_bfloat16,
mode=mode,
dropout_rate=dropout,
dropout_shared_axes=dropout_shared_axes,
ff_chunk_size=ff_chunk_size)
elif ff_sparsity and ff_sparsity_type == 'Block':
ff = tl.BlockSparseFF(d_ff, n_experts=ff_sparsity, mode=mode)
elif ff_sparsity and ff_sparsity_type == 'Switch':
ff = tl.SwitchSparseFF(d_ff, n_experts=ff_sparsity, mode=mode)
else:
ff = _FeedForward(d_model, d_ff, dropout, ff_activation, ff_dropout,
use_bfloat16, mode)
res = [tl.LayerNorm(center=center_layernorm), ff]
if ff_sparsity_type != '1inN' or ff_sparsity == 0:
# SparseFF has Dropout and BatchLeadingAxes built-in.
res.append(tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes,
mode=mode))
if ff_chunk_size > 0:
res = tl.BatchLeadingAxes(tl.Chunk(tl.Serial(res), ff_chunk_size))
if ff_use_sru:
if isinstance(ff_use_sru, (list, tuple)):
sru_n_layers, sru_n_units = ff_use_sru
else:
sru_n_layers, sru_n_units = ff_use_sru, 32
sru = [tl.SRU(sru_n_units, mode=mode) for _ in range(sru_n_layers)]
block = [tl.LayerNorm(center=center_layernorm), tl.Dense(sru_n_units)
] + sru + [tl.Dense(d_model)]
res = tl.Residual(block, shortcut=res)
return [res] |
Runs the supplied attention layer. | def ApplyAttentionLayer(attention_type, d_model, n_heads, d_qk, d_v, causal,
masked, attention_dropout, output_dropout,
attention_chunk_size, mode):
"""Runs the supplied attention layer."""
try:
attention = attention_type(
n_heads=n_heads,
d_qk=d_qk,
d_v=d_v,
causal=causal,
masked=masked,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
mode=mode)
except TypeError: # No d_qk arguments in less advanced layers.
attention = attention_type(
d_model, n_heads=n_heads, dropout=attention_dropout, mode=mode)
return tl.Chunk(attention, attention_chunk_size) |
Returns the positional encoding layer depending on the arguments.
Args:
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout after the embedding block.
max_len: Maximum symbol length for positional encoding.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
use_bfloat16: If `True`, use bfloat16 weights instead of the default
float32; this can save memory but may (rarely) lead to numerical issues.
Returns:
A layer that will do the positional encoding. | def PositionalEncoder(mode,
dropout=None,
max_len=None,
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None,
pos_start_from_zero_prob=1.0,
pos_max_offset_to_add=0,
use_bfloat16=False):
"""Returns the positional encoding layer depending on the arguments.
Args:
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout after the embedding block.
max_len: Maximum symbol length for positional encoding.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
use_bfloat16: If `True`, use bfloat16 weights instead of the default
float32; this can save memory but may (rarely) lead to numerical issues.
Returns:
A layer that will do the positional encoding.
"""
if not pos_type:
positional_encoding = tl.PositionalEncoding(
max_len=max_len, dropout=dropout, use_bfloat16=use_bfloat16,
start_from_zero_prob=pos_start_from_zero_prob,
max_offset_to_add=pos_max_offset_to_add, mode=mode)
elif pos_type == 'sin-cos':
positional_encoding = tl.SinCosPositionalEncoding(mode=mode)
elif pos_type == 'fixed-base':
positional_encoding = tl.FixedBasePositionalEncoding(mode=mode)
elif pos_type == 'infinite':
positional_encoding = tl.InfinitePositionalEncoding(affine=False)
elif pos_type == 'infinite-affine':
positional_encoding = tl.InfinitePositionalEncoding()
elif pos_type == 'time-bin':
positional_encoding = tl.TimeBinPositionalEncoding()
elif pos_type == 'no':
positional_encoding = tl.Serial() # no positional encoding at all
else: # TODO(lukaszkaiser): name this type and check for the correct name
assert pos_d_axial_embs is not None
positional_encoding = tl.AxialPositionalEncoding(
shape=pos_axial_shape, d_embs=pos_d_axial_embs,
dropout_broadcast_dims=tuple(range(1, len(pos_axial_shape) + 1)),
dropout=dropout, mode=mode)
return positional_encoding |
Returns the embedder and positional encoder.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
embedding_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout after the embedding block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
max_len: Maximum symbol length for positional encoding.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if None, then input and target integers (token IDs) are assumed to come
from the same vocabulary.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
use_bfloat16: If `True`, use bfloat16 weights instead of the default
float32; this can save memory but may (rarely) lead to numerical issues.
Returns:
A tuple of (input encoder, output encoder, output vocab size used). | def EmbeddingAndPositionalEncodings(input_vocab_size,
d_model,
mode,
embedding_dropout,
dropout_shared_axes,
max_len,
output_vocab_size=None,
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None,
pos_start_from_zero_prob=1.0,
pos_max_offset_to_add=0,
use_bfloat16=False):
"""Returns the embedder and positional encoder.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
embedding_dropout: Stochastic rate (probability) for dropping an activation
value when applying dropout after the embedding block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
max_len: Maximum symbol length for positional encoding.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if None, then input and target integers (token IDs) are assumed to come
from the same vocabulary.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
use_bfloat16: If `True`, use bfloat16 weights instead of the default
float32; this can save memory but may (rarely) lead to numerical issues.
Returns:
A tuple of (input encoder, output encoder, output vocab size used).
"""
# tokens --> vectors
def Embedder(vocab_size, embedding_mode):
if vocab_size is not None:
embedding = tl.Embedding(vocab_size, d_model, use_bfloat16=use_bfloat16)
else:
embedding = tl.Dense(d_model, use_bfloat16=use_bfloat16)
return [
embedding,
tl.Dropout(rate=embedding_dropout,
shared_axes=dropout_shared_axes,
mode=embedding_mode),
]
# NOTE: Positional encodings are not shared between encoder and decoder.
# Since encoder doesn't run stepwise, we do not use predict mode there.
encoder_mode = 'eval' if mode == 'predict' else mode
in_embedder = Embedder(input_vocab_size, encoder_mode)
in_encoder = in_embedder + [
PositionalEncoder(encoder_mode,
dropout=embedding_dropout,
max_len=max_len,
pos_type=pos_type,
pos_axial_shape=pos_axial_shape,
pos_d_axial_embs=pos_d_axial_embs,
pos_start_from_zero_prob=pos_start_from_zero_prob,
pos_max_offset_to_add=pos_max_offset_to_add,
use_bfloat16=use_bfloat16)
]
# If output_vocab_size is None, we reuse the same embedding matrix, otherwise
# we initialize one.
assert input_vocab_size or output_vocab_size
if output_vocab_size is None:
out_embedder = in_embedder
else:
out_embedder = Embedder(output_vocab_size, mode)
out_encoder = out_embedder + [
PositionalEncoder(mode,
dropout=embedding_dropout,
max_len=max_len,
pos_type=pos_type,
pos_axial_shape=pos_axial_shape,
pos_d_axial_embs=pos_d_axial_embs,
pos_start_from_zero_prob=pos_start_from_zero_prob,
pos_max_offset_to_add=pos_max_offset_to_add,
use_bfloat16=use_bfloat16)
]
# Set this to the value actually used.
if output_vocab_size is None:
output_vocab_size = input_vocab_size
if input_vocab_size is None:
in_encoder = tl.AssertFunction('...a->...b', in_encoder)
else:
in_encoder = tl.AssertFunction('...->...d', in_encoder)
out_encoder = tl.AssertFunction('...->...d', out_encoder)
return in_encoder, out_encoder, output_vocab_size |
Returns a Transformer encoder merged with an N-way categorization head.
This model performs text categorization:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 2 tensor representing a batch of log-probability
distributions over N categories; shape is (batch_size, `n_classes`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
n_classes: Final dimension of the output tensors, representing N-way
classification.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, feed-forward (`Dense`), and activation layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each encoder block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use for the encoder part.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
Returns:
A Transformer model that maps strings (conveyed via token IDs) to
probability-like activations over a range of output classes. | def ConfigurableTransformerEncoder(vocab_size,
n_classes=10,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
max_len=2048,
dropout=0.1,
dropout_shared_axes=None,
mode='train',
ff_activation=tl.Relu,
ff_dropout=0.1,
ff_chunk_size=0,
ff_use_sru=0,
ff_sparsity=0,
ff_sparsity_type='1inN',
attention_chunk_size=0,
attention_type=tl.Attention,
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None):
"""Returns a Transformer encoder merged with an N-way categorization head.
This model performs text categorization:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 2 tensor representing a batch of log-probability
distributions over N categories; shape is (batch_size, `n_classes`).
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
n_classes: Final dimension of the output tensors, representing N-way
classification.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, feed-forward (`Dense`), and activation layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each encoder block will include dropout; else, it will
pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use for the encoder part.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
Returns:
A Transformer model that maps strings (conveyed via token IDs) to
probability-like activations over a range of output classes.
"""
positional_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
PositionalEncoder(
mode, dropout, max_len, pos_type, pos_axial_shape, pos_d_axial_embs)
]
positional_encoder = tl.AssertFunction('...->...d', positional_encoder)
# pylint: disable=g-complex-comprehension
encoder_blocks = [
EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes, mode,
ff_activation, ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, ff_sparsity_type,
attention_chunk_size, attention_type)
for i in range(n_layers)
]
# pylint: enable=g-complex-comprehension
# Assemble and return the model.
return tl.Serial( # toks
# Encode.
tl.Branch(
positional_encoder, tl.PaddingMask()), # vecs masks
encoder_blocks, # vecs masks
tl.Select([0], n_in=2), # vecs
tl.LayerNorm(), # vecs
# Map to output categories.
tl.Mean(axis=1), # vecs
tl.Dense(n_classes), # vecs
) |
Returns a Transformer language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, feed-forward (`Dense`), and activation layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder block
will include dropout; else, it will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
loss_sparsity_type: string, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use for the decoder part.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def ConfigurableTransformerLM(vocab_size,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
max_len=2048,
dropout=0.1,
dropout_shared_axes=None,
mode='train',
ff_activation=tl.Relu,
ff_dropout=0.1,
ff_chunk_size=0,
ff_use_sru=0,
ff_sparsity=0,
ff_sparsity_type='1inN',
loss_sparsity_type='mult',
loss_sparsity=0,
loss_d_lowrank=0,
loss_sparsity_prob=None,
attention_chunk_size=0,
attention_type=tl.CausalAttention,
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None,
pos_start_from_zero_prob=1.0,
pos_max_offset_to_add=0):
"""Returns a Transformer language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
n_layers: Number of encoder blocks. Each block includes attention, dropout,
residual, feed-forward (`Dense`), and activation layers.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder block
will include dropout; else, it will pass all values through unaltered.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
loss_sparsity_type: string, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use for the decoder part.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
pos_start_from_zero_prob: how often to start from 0 during training,
(if 1.0, we always start from position 0, if less, we randomize).
pos_max_offset_to_add: maximum offset to add to positions during training
when randomizing; this offset plus input length must still be less than
max_len for all training examples.
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
positional_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
PositionalEncoder(
mode, dropout, max_len, pos_type, pos_axial_shape, pos_d_axial_embs,
pos_start_from_zero_prob, pos_max_offset_to_add)
]
# pylint: disable=g-complex-comprehension
decoder_blocks = [
DecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes, mode,
ff_activation, ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, ff_sparsity_type,
attention_chunk_size, attention_type)
for i in range(n_layers)
]
# pylint: enable=g-complex-comprehension
# Assemble and return the model.
return tl.Serial( # tokens (or chunked tuple of tokens)
tl.ShiftRight(mode=mode), # toks
positional_encoder, # vecs
decoder_blocks, # vecs
tl.LayerNorm(), # vecs
tl.SparseDenseWithOptions( # vecs
vocab_size, d_input=d_model, sparsity_type=loss_sparsity_type,
sparsity=loss_sparsity, d_lowrank=loss_d_lowrank,
prob_sparse=loss_sparsity_prob, mode=mode),
) |
Returns a full Transformer model.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if None, then input and target integers (token IDs) are assumed to come
from the same vocabulary.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
and decoder block.
n_encoder_layers: Number of encoder blocks.
n_decoder_layers: Number of decoder blocks.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder/decoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
ff_activation: Type of activation function at the end of each
encoder/decoder block; must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
loss_sparsity_type: str, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
encoder_attention_type: The attention layer to use for the encoder part.
encoder_decoder_attention_type: The attention layer to use for the
encoder-decoder attention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
enc_dec_attention_sparsity: int, if > 0 use this sparsity in attention.
Returns:
A Transformer model as a layer that maps from a source-target tokenized
text pair to activations over a vocab set. | def ConfigurableTransformer(input_vocab_size,
output_vocab_size=None,
d_model=512,
d_ff=2048,
n_encoder_layers=6,
n_decoder_layers=6,
n_heads=8,
max_len=2048,
dropout=0.1,
dropout_shared_axes=None,
mode='train',
ff_activation=tl.Relu,
ff_dropout=0.1,
ff_chunk_size=0,
ff_use_sru=0,
ff_sparsity=0,
ff_sparsity_type='1inN',
loss_sparsity_type='mult',
loss_sparsity=0,
loss_d_lowrank=0,
loss_sparsity_prob=None,
attention_chunk_size=0,
encoder_attention_type=tl.Attention,
encoder_decoder_attention_type=tl.CausalAttention,
pos_type=None,
pos_axial_shape=None,
pos_d_axial_embs=None,
enc_dec_attention_sparsity=0):
"""Returns a full Transformer model.
This model is an encoder-decoder that performs tokenized string-to-string
("source"-to-"target") transduction:
- inputs (2):
- source: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(input_vocab_size)`, and `0`
values mark padding positions.
- target: rank 2 tensor representing a batch of text strings via token
IDs plus padding markers; shape is (batch_size, sequence_length). The
tensor elements are integers in `range(output_vocab_size)`, and `0`
values mark padding positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
An example use would be to translate (tokenized) sentences from English to
German.
Args:
input_vocab_size: Input vocabulary size -- each element of the input tensor
should be an integer in `range(vocab_size)`. These integers typically
represent token IDs from a vocabulary-based tokenizer.
output_vocab_size: If specified, gives the vocabulary size for the targets;
if None, then input and target integers (token IDs) are assumed to come
from the same vocabulary.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
and decoder block.
n_encoder_layers: Number of encoder blocks.
n_decoder_layers: Number of decoder blocks.
n_heads: Number of attention heads.
max_len: Maximum symbol length for positional encoding.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder/decoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'predict'`, use fast inference. If `'train'`, each encoder/decoder
block will include dropout; else, it will pass all values through
unaltered.
ff_activation: Type of activation function at the end of each
encoder/decoder block; must be an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
loss_sparsity_type: str, type of sparsity to used in loss layer. See
SparseDenseWithOptions for options. None if no sparsity should be used.
loss_sparsity: int, the sparsity for loss layer (if used)
loss_d_lowrank: int, the dimensions for intermediate layer (if used)
loss_sparsity_prob: float, the probability for sparse version of loss to be
used. If None, only sparse version is used.
attention_chunk_size: int, if > 0 run attention chunked at this size
encoder_attention_type: The attention layer to use for the encoder part.
encoder_decoder_attention_type: The attention layer to use for the
encoder-decoder attention.
pos_type: string, the type of positional embeddings to use.
pos_axial_shape: tuple of ints: input shape to use for the axial position
encoding. If unset, axial position encoding is disabled.
pos_d_axial_embs: tuple of ints: depth of position embedding for each axis.
Tuple length must match pos_axial_shape, and values must sum to d_model.
enc_dec_attention_sparsity: int, if > 0 use this sparsity in attention.
Returns:
A Transformer model as a layer that maps from a source-target tokenized
text pair to activations over a vocab set.
"""
in_encoder, out_encoder, output_vocab_size = (
EmbeddingAndPositionalEncodings(
input_vocab_size,
d_model,
mode,
dropout,
dropout_shared_axes,
max_len,
output_vocab_size=output_vocab_size,
pos_type=pos_type,
pos_axial_shape=pos_axial_shape,
pos_d_axial_embs=pos_d_axial_embs)
)
# pylint: disable=g-complex-comprehension
encoder_blocks = [
EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes, mode,
ff_activation, ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, ff_sparsity_type,
attention_chunk_size, encoder_attention_type)
for i in range(n_encoder_layers)
]
# pylint: enable=g-complex-comprehension
encoder = tl.Serial(in_encoder, encoder_blocks, tl.LayerNorm())
if mode == 'predict':
encoder = tl.Cache(encoder)
# pylint: disable=g-complex-comprehension
encoder_decoder_blocks = [
EncoderDecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation, ff_dropout, ff_chunk_size,
ff_use_sru, ff_sparsity, ff_sparsity_type,
attention_chunk_size, encoder_decoder_attention_type,
enc_dec_attention_sparsity)
for i in range(n_decoder_layers)
]
# pylint: enable=g-complex-comprehension
# Assemble and return the model.
return tl.Serial(
# Input: encoder_side_tokens, decoder_side_tokens
# Copy decoder tokens for use in loss.
tl.Select([0, 1, 1]), # tok_e tok_d tok_d
# Encode.
tl.Branch([], tl.PaddingMask()), # tok_e masks ..... .....
encoder, # vec_e ..... ..... .....
# Decode.
tl.Select([2, 1, 0]), # tok_d masks vec_e .....
tl.ShiftRight(mode=mode), # tok_d ..... ..... .....
out_encoder, # vec_d ..... ..... .....
tl.Branch(
[], tl.EncoderDecoderMask()), # vec_d masks ..... .....
encoder_decoder_blocks, # vec_d masks ..... .....
tl.LayerNorm(), # vec_d ..... ..... .....
# Map to output vocab.
tl.Select([0], n_in=3), # vec_d tok_d
tl.SparseDenseWithOptions( # vec_d .....
output_vocab_size, d_input=d_model, sparsity_type=loss_sparsity_type,
sparsity=loss_sparsity, d_lowrank=loss_d_lowrank,
prob_sparse=loss_sparsity_prob, mode=mode),
) |
Returns a list of layers that implements a Transformer encoder block.
The input to the block is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
Returns:
A list of layers that maps (activations, mask) to (activations, mask). | def EncoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation,
ff_dropout,
ff_chunk_size,
ff_use_sru,
ff_sparsity,
ff_sparsity_type,
attention_chunk_size,
attention_type,
n_attention_layers=1,
n_feedforward_layers=1):
"""Returns a list of layers that implements a Transformer encoder block.
The input to the block is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
Returns:
A list of layers that maps (activations, mask) to (activations, mask).
"""
# `n_attention_layers` number of residuals of attention layer + dropout.
# pylint: disable=g-complex-comprehension
residual_attentions = [
tl.Residual(tl.LayerNorm(),
ApplyAttentionLayer(attention_type,
d_model,
n_heads,
d_model // n_heads,
d_model // n_heads,
causal=False,
masked=True,
attention_dropout=dropout,
output_dropout=dropout,
attention_chunk_size=attention_chunk_size,
mode=mode),
tl.Dropout(rate=dropout,
shared_axes=dropout_shared_axes,
mode=mode)
)
for _ in range(n_attention_layers)
]
feed_forwards = [
tl.Residual(
FeedForwardWithOptions(d_model, d_ff, dropout,
dropout_shared_axes, ff_activation,
ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, True, mode, False,
ff_sparsity_type)
)
for _ in range(n_feedforward_layers)
]
# pylint: enable=g-complex-comprehension
return residual_attentions + feed_forwards |
Returns a list of layers that implements a Transformer decoder block.
The input is an activation tensor.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
Returns:
A list of layers that maps an activation tensor to an activation tensor. | def DecoderBlock(d_model,
d_ff,
n_heads,
dropout,
dropout_shared_axes,
mode,
ff_activation,
ff_dropout,
ff_chunk_size,
ff_use_sru,
ff_sparsity,
ff_sparsity_type,
attention_chunk_size,
attention_type,
n_attention_layers=1,
n_feedforward_layers=1):
"""Returns a list of layers that implements a Transformer decoder block.
The input is an activation tensor.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
n_attention_layers: how many residual causal attention layers should we
have before the feed-forward block (default: 1, the standard block)
n_feedforward_layers: how many FFNN layers should we have (default 1).
Returns:
A list of layers that maps an activation tensor to an activation tensor.
"""
# pylint: disable=g-complex-comprehension
causal_attentions = [ApplyAttentionLayer(
attention_type,
d_model,
n_heads,
d_model // n_heads,
d_model // n_heads,
causal=True,
masked=False,
attention_dropout=dropout,
output_dropout=dropout,
attention_chunk_size=attention_chunk_size,
mode=mode) for _ in range(n_attention_layers)]
residual_attentions = [
tl.Residual(
tl.LayerNorm(),
causal_attentions[i],
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
) for i in range(n_attention_layers)]
feed_forwards = [
tl.Residual(
FeedForwardWithOptions(d_model, d_ff, dropout,
dropout_shared_axes, ff_activation,
ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, True, mode, False,
ff_sparsity_type)
)
for _ in range(n_feedforward_layers)
]
# pylint: enable=g-complex-comprehension
return residual_attentions + feed_forwards |
Returns a list of layers implementing a Transformer encoder-decoder block.
The input is a triple (decoder_activations, mask, encoder_activiations) where
the mask is created from the original input token IDs to prevent attending to
the padding part of the encoder.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
enc_dec_attention_sparsity: Sparsity to use in encoder-decoder attention.
Returns:
A list of layers which maps triples (decoder_activations, mask,
encoder_activations) to triples of the same sort. | def EncoderDecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation, ff_dropout, ff_chunk_size,
ff_use_sru, ff_sparsity, ff_sparsity_type,
attention_chunk_size, attention_type,
enc_dec_attention_sparsity=0):
"""Returns a list of layers implementing a Transformer encoder-decoder block.
The input is a triple (decoder_activations, mask, encoder_activiations) where
the mask is created from the original input token IDs to prevent attending to
the padding part of the encoder.
Args:
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
ff_dropout: Stochastic rate (probability) for dropping an activation value
when applying dropout after the FF dense layer.
ff_chunk_size: int; if > 0, chunk feed-forward into this-sized chunks
ff_use_sru: int or pair of ints; if > 0, we use this many SRU layers
in addition to the feed-forward block (second int specifies sru size)
ff_sparsity: int, if > 0 use sparse feed-forward block with this sparsity
ff_sparsity_type: string, if ff_sparsity >0,
use SparseFF if ff_sparsity_type=`'1inN'` and
use BlockSparseFF if ff_sparsity_type=`'Block'`
attention_chunk_size: int, if > 0 run attention chunked at this size
attention_type: The attention layer to use.
enc_dec_attention_sparsity: Sparsity to use in encoder-decoder attention.
Returns:
A list of layers which maps triples (decoder_activations, mask,
encoder_activations) to triples of the same sort.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
# TODO(afrozm): This layer isn't configurable because: We currently don't have
# any alternative for it (LSH cannot do it fundamentally, that's why we have
# NoEncDec models, and local attention doesn't make sense in the general
# setting where we don't know what in input is local to what in output;
# some variants of FAVOR can do it, so maybe in the future,
# but we don't have them yet).
if isinstance(enc_dec_attention_sparsity, tuple):
q_sparsity, result_sparsity = enc_dec_attention_sparsity
elif enc_dec_attention_sparsity > 0:
q_sparsity = enc_dec_attention_sparsity
result_sparsity = 'noop' # We simply skip Dense layer after attention.
else:
q_sparsity = None
result_sparsity = None
attention_qkv = tl.AttentionQKV(
d_model, n_heads=n_heads, dropout=dropout, mode=mode,
cache_KV_in_predict=True,
q_sparsity=q_sparsity, result_sparsity=result_sparsity)
causal_attention = ApplyAttentionLayer(
attention_type,
d_model,
n_heads,
d_model // n_heads,
d_model // n_heads,
causal=True,
masked=True,
attention_dropout=dropout,
output_dropout=dropout,
attention_chunk_size=attention_chunk_size,
mode=mode)
feed_forward = FeedForwardWithOptions(d_model, d_ff, dropout,
dropout_shared_axes, ff_activation,
ff_dropout, ff_chunk_size, ff_use_sru,
ff_sparsity, True, mode, False,
ff_sparsity_type)
return [ # vec_d masks vec_e
tl.Residual(
tl.LayerNorm(), # vec_d ..... .....
causal_attention, # vec_d ..... .....
_Dropout(), # vec_d ..... .....
),
tl.Residual(
tl.LayerNorm(), # vec_d ..... .....
tl.Select([0, 2, 2, 1, 2]), # vec_d vec_e vec_e masks vec_e
attention_qkv, # vec_d masks vec_e
_Dropout(), # vec_d masks vec_e
),
tl.Residual(
feed_forward # vec_d masks vec_e
),
] |
Returns a list of layers.
The layers implement a Transformer decoder block with relative attention
parametrization.
The input to the block is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
attention_type: attention type.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
context_bias_layer: context bias layer.
location_bias_layer: location bias layer.
total_pooling: The combined pool size of previously used funnel blocks.
Returns:
A list of layers that maps (activations, att_vecs, mask) to
(activations, att_vecs, mask). | def _RelativeDecoderBlock(attention_type, d_model, d_ff, n_heads, dropout,
dropout_shared_axes, mode, ff_activation,
context_bias_layer, location_bias_layer,
total_pooling):
"""Returns a list of layers.
The layers implement a Transformer decoder block with relative attention
parametrization.
The input to the block is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
attention_type: attention type.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each block.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within a block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: If `'train'`, each block will include dropout; else, it will pass all
values through unaltered.
ff_activation: Type of activation function at the end of each block; must be
an activation-type subclass of `Layer`.
context_bias_layer: context bias layer.
location_bias_layer: location bias layer.
total_pooling: The combined pool size of previously used funnel blocks.
Returns:
A list of layers that maps (activations, att_vecs, mask) to
(activations, att_vecs, mask).
"""
if attention_type == RelativeAttentionWrapper:
attention = RelativeAttentionWrapper(
d_model,
n_heads,
dropout,
mode=mode,
context_bias_layer=context_bias_layer,
location_bias_layer=location_bias_layer,
total_pooling=total_pooling)
else:
attention = ApplyAttentionLayer(
attention_type,
d_model,
n_heads,
d_model // n_heads,
d_model // n_heads,
causal=True,
masked=False,
attention_dropout=dropout,
output_dropout=dropout,
attention_chunk_size=0, # Disables tl.Chunk in ApplyAttentionLayer.
mode=mode,
)
feed_forward = FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes,
mode, ff_activation)
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Residual( # vecs
tl.LayerNorm(),
attention,
_Dropout(),
), # vecs
tl.Residual(
tl.LayerNorm(),
feed_forward,
_Dropout(),
), # vecs
] |
Parse hierarchy for Hourglass definition. | def _parse_hierarchy(hierarchy_str): # pylint: disable = invalid-name
"""Parse hierarchy for Hourglass definition."""
levels = hierarchy_str.split(' ')
if levels != levels[::-1]:
raise ValueError('Hierarchy is not a palindrome')
layer_level_pairs = [(x.split('@')) for x in levels[:1 + (len(levels) // 2)]]
hierarchy_n_layers = [int(x[0]) for x in layer_level_pairs]
total_sf_per_level = [int(x[1]) for x in layer_level_pairs]
hierarchy_shorten_factors = []
for current_sf, prev_sf in zip(total_sf_per_level,
[1] + total_sf_per_level[:-1]):
if current_sf % prev_sf != 0:
raise ValueError(
f'Hierarchy not divisible by previous level: {current_sf}, {prev_sf}')
hierarchy_shorten_factors.append(current_sf // prev_sf)
return hierarchy_n_layers, hierarchy_shorten_factors |
Returns a hierarchical Transformer language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
vanilla_layers: (pre_layers, post_layers) tuple - number of full token-level
Transformer decoder layers before and after shortening.
hierarchy: string - shortening hierarchy, as described in the paper.
Hierarchy levels must form a palindrome, e.g. '1@2 2@6 1@2'.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: str: 'train' or 'eval'.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
vanilla_attn_type: class: attention class such as SelfAttention to use in
the layers before and after shortening (vanilla layers).
middle_attn_type: class: attention class to use in the middle layers (these
operating on the shortened sequence).
downsampling_fn: function that takes full token-level vectors of length `l`
and transforms them into `l` / `k` vectors, where `k` denotes
`shorten_factor` parameter.
upsampling_fn: function that takes shortened representations of a sequence,
consisting of `l` / `k` vectors and transforms them into full token-level
representations of length `l`.
attention_downsampling_fn: Downsampling function that transforms token-level
vectors into query vectors with reduced length. Necessary only when
AttentionResampling is used as `downsampling_fn`.
attention_upsampling_fn: Upsampling function for AttentionResampling. Valid
only when AttentionResampling is used as a `upsampling_fn`.
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def HourglassLM(vocab_size,
d_model=512,
d_ff=2048,
vanilla_layers=(1, 1),
hierarchy='6@3',
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
mode='train',
ff_activation=tl.FastGelu,
vanilla_attn_type=RelativeAttentionWrapper,
middle_attn_type=RelativeAttentionWrapper,
downsampling_fn=AttentionResampling,
upsampling_fn=AttentionResampling,
attention_downsampling_fn=AveragePooling,
attention_upsampling_fn=LinearUpsampling):
"""Returns a hierarchical Transformer language model.
This model performs autoregressive language modeling:
- input: rank 2 tensor representing a batch of text strings via token IDs
plus padding markers; shape is (batch_size, sequence_length). The tensor
elements are integers in `range(vocab_size)`, and `0` values mark padding
positions.
- output: rank 3 tensor representing a batch of log-probability
distributions for each sequence position over possible token IDs;
shape is (batch_size, sequence_length, `vocab_size`).
This model uses only the decoder part of the overall Transformer.
Args:
vocab_size: Input vocabulary size -- each element of the input tensor should
be an integer in `range(vocab_size)`. These integers typically represent
token IDs from a vocabulary-based tokenizer.
d_model: Final dimension of tensors at most points in the model, including
the initial embedding output.
d_ff: Size of special dense layer in the feed-forward part of each encoder
block.
vanilla_layers: (pre_layers, post_layers) tuple - number of full token-level
Transformer decoder layers before and after shortening.
hierarchy: string - shortening hierarchy, as described in the paper.
Hierarchy levels must form a palindrome, e.g. '1@2 2@6 1@2'.
n_heads: Number of attention heads.
dropout: Stochastic rate (probability) for dropping an activation value when
applying dropout within an encoder block.
dropout_shared_axes: Tensor axes on which to share a dropout mask. Sharing
along batch and sequence axes (`dropout_shared_axes=(0,1)`) is a useful
way to save memory and apply consistent masks to activation vectors at
different sequence positions.
mode: str: 'train' or 'eval'.
ff_activation: Type of activation function at the end of each encoder block;
must be an activation-type subclass of `Layer`.
vanilla_attn_type: class: attention class such as SelfAttention to use in
the layers before and after shortening (vanilla layers).
middle_attn_type: class: attention class to use in the middle layers (these
operating on the shortened sequence).
downsampling_fn: function that takes full token-level vectors of length `l`
and transforms them into `l` / `k` vectors, where `k` denotes
`shorten_factor` parameter.
upsampling_fn: function that takes shortened representations of a sequence,
consisting of `l` / `k` vectors and transforms them into full token-level
representations of length `l`.
attention_downsampling_fn: Downsampling function that transforms token-level
vectors into query vectors with reduced length. Necessary only when
AttentionResampling is used as `downsampling_fn`.
attention_upsampling_fn: Upsampling function for AttentionResampling. Valid
only when AttentionResampling is used as a `upsampling_fn`.
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
assert mode != 'predict' # For now, 'predict' mode is unsupported.
hierarchy_n_layers, hierarchy_shorten_factors = _parse_hierarchy(hierarchy)
token_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
]
context_bias_layer, location_bias_layer = get_rel_att_inputs(d_model, n_heads)
n_pre_decoder_blocks, n_post_decoder_blocks = vanilla_layers
def create_decoder_blocks(n_layers, total_pooling, # pylint: disable = invalid-name
attention_type):
decoder_blocks = [
# pylint: disable=g-complex-comprehension
_RelativeDecoderBlock(attention_type, d_model, d_ff, n_heads, dropout,
dropout_shared_axes, mode, ff_activation,
context_bias_layer, location_bias_layer,
total_pooling) for _ in range(n_layers)
]
return decoder_blocks + [tl.LayerNorm()]
def create_hourglass_valley(rest_shorten_factors, rest_n_funnel_blocks, # pylint: disable = invalid-name
current_total_pooling):
assert rest_shorten_factors
assert len(rest_shorten_factors) == len(rest_n_funnel_blocks)
current_sf = rest_shorten_factors[0]
current_n_layers = rest_n_funnel_blocks[0]
shortening_layer = downsampling_fn(
current_sf,
d_model,
is_upsampling=False,
d_ff=d_ff,
n_heads=n_heads,
dropout=dropout,
dropout_shared_axes=dropout_shared_axes,
mode=mode,
ff_activation=ff_activation,
context_bias_layer=context_bias_layer,
location_bias_layer=location_bias_layer,
total_pooling=current_total_pooling,
resampling_fn=attention_downsampling_fn)
upsampling_layer = upsampling_fn(
current_sf,
d_model=d_model,
is_upsampling=True,
d_ff=d_ff,
n_heads=n_heads,
dropout=dropout,
dropout_shared_axes=dropout_shared_axes,
mode=mode,
ff_activation=ff_activation,
context_bias_layer=context_bias_layer,
location_bias_layer=location_bias_layer,
total_pooling=current_total_pooling,
resampling_fn=attention_upsampling_fn)
if len(rest_shorten_factors) > 1: # we need to go deeper again
pre_stage_blocks = create_decoder_blocks(
current_n_layers, current_total_pooling * current_sf,
middle_attn_type)
post_stage_blocks = create_decoder_blocks(
current_n_layers, current_total_pooling * current_sf,
middle_attn_type)
return [
tl.Dup(),
tl.ShiftRight(current_sf - 1, mode=mode), shortening_layer,
pre_stage_blocks, *create_hourglass_valley(
rest_shorten_factors[1:], rest_n_funnel_blocks[1:],
current_total_pooling * current_sf), post_stage_blocks,
upsampling_layer,
tl.LayerNorm(),
tl.Add()
]
else:
blocks = create_decoder_blocks(current_n_layers,
current_total_pooling * current_sf,
middle_attn_type)
return [
tl.Dup(),
tl.ShiftRight(current_sf - 1), shortening_layer, blocks,
upsampling_layer,
tl.LayerNorm(),
tl.Add()
]
pre_decoder_blocks = create_decoder_blocks(n_pre_decoder_blocks, 1,
vanilla_attn_type)
post_decoder_blocks = create_decoder_blocks(n_post_decoder_blocks, 1,
vanilla_attn_type)
valley = create_hourglass_valley(hierarchy_shorten_factors,
hierarchy_n_layers, 1)
# Assemble and return the model.
return tl.Serial( # tokens (or chunked tuple of tokens)
tl.ShiftRight(mode=mode), # toks
token_encoder, # vecs
pre_decoder_blocks, # vecs
valley, # shortened vecs
post_decoder_blocks, # vecs
tl.Dense(vocab_size), # vecs
) |
Checks if the input is larger than a certain value. | def LargerThan(val):
"""Checks if the input is larger than a certain value."""
return tl.Fn('LargerThan', lambda x: x > val) |
Returns a Skipping Transformer language model.
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_fraction: fraction of times to skip some layers
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def SkippingTransformerLM(vocab_size,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
mode='train',
ff_activation=tl.Relu,
skip_fraction=0.4):
"""Returns a Skipping Transformer language model.
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_fraction: fraction of times to skip some layers
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
embedder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.PositionalEncoding(max_len=max_len, mode=mode),
]
@assert_shape('...sd,->...sd,')
def ConditionedBlock(current_layer_num):
return tl.Serial(
# stack: embedding, n_layers_to_keep
tl.Select([1, 0, 1]), # n_layers_to_keep, embedding, n_layers_to_keep
tl.Cond(
# if n_layers_to_keep > current_layer_num
LargerThan(float(current_layer_num)),
# then: run block
tl.Serial(transformer._DecoderBlock( # pylint: disable=g-complex-comprehension,protected-access
d_model, d_ff, n_heads, dropout, [], mode, ff_activation)),
# else: run noop
tl.Serial()
)
# stack: embedding, n_layers_to_keep
)
if mode == 'train':
if skip_fraction == 0.0:
minimum_layers = float(n_layers)
maximum_layers = float(n_layers)
else:
minimum_layers = 0.0
maximum_layers = float(n_layers) / skip_fraction
else:
minimum_layers = maximum_layers = float(n_layers)
return tl.Serial(
tl.ShiftRight(mode=mode),
embedder,
# stack: embedding
tl.RandomUniform(minimum_layers, maximum_layers, sync=True),
# stack: n_layers_to_keep, embedding
tl.Swap(),
# stack: embedding, n_layers_to_keep
[ConditionedBlock(i) for i in range(n_layers)],
# stack: embedding, n_layers_to_keep
tl.AssertShape('...sd,'),
tl.Select([0], n_in=2), # stack: embedding
tl.AssertShape('...sd'),
tl.LayerNorm(),
tl.Dense(vocab_size),
) |
Returns an "EveryOther" LayerDrop Transformer language model.
During each training step it either runs all layers, or skips a subset of
layers. This subset is the same every time, and it is specified by
"skip_mode".
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_mode: which layers to skip when skipping: even/odd/1half/2half.
skip_fraction: fraction of times to skip layers
eval_skip_fraction: fraction of times to skip layers during eval
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def EveryOtherLayerDropTransformerLM(vocab_size,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
mode='train',
ff_activation=tl.Relu,
skip_mode='even',
skip_fraction=0.5,
eval_skip_fraction=0.0):
"""Returns an "EveryOther" LayerDrop Transformer language model.
During each training step it either runs all layers, or skips a subset of
layers. This subset is the same every time, and it is specified by
"skip_mode".
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_mode: which layers to skip when skipping: even/odd/1half/2half.
skip_fraction: fraction of times to skip layers
eval_skip_fraction: fraction of times to skip layers during eval
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
embedder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.PositionalEncoding(max_len=max_len, mode=mode),
]
if mode == 'train':
pass
else:
skip_fraction = eval_skip_fraction
skip_mode_funs = { # which layers should be skipped?
'even': (lambda num: num%2 == 0), # 0th layer is even
'odd': (lambda num: num%2 == 1),
'1half': (lambda num: num < (n_layers/2)),
'2half': (lambda num: num >= (n_layers/2)),
}
skip_mode_fun = skip_mode_funs[skip_mode]
@assert_shape('...sd,->...sd,')
def ConditionedBlock(current_layer_num):
return tl.Serial(
# stack: embedding, n_layers_to_keep
tl.Select([1, 0, 1]), # n_layers_to_keep, embedding, n_layers_to_keep
tl.Cond(
# if random() > skip_fraction OR layer not in skip_mode ...
LargerThan(skip_fraction if skip_mode_fun(current_layer_num)
else 0.0),
# then: run block
tl.Serial(transformer._DecoderBlock( # pylint: disable=g-complex-comprehension,protected-access
d_model, d_ff, n_heads, dropout, [], mode, ff_activation))
# else: noop (implicit)
)
# stack: embedding, n_layers_to_keep
)
return tl.Serial(
tl.ShiftRight(mode=mode),
embedder,
# stack: embedding
tl.RandomUniform(0., 1., sync=True),
# stack: n_layers_to_keep, embedding
tl.Swap(),
# stack: embedding, n_layers_to_keep
[ConditionedBlock(i) for i in range(n_layers)],
# stack: embedding, n_layers_to_keep
tl.Select([0], n_in=2), # stack: embedding
tl.LayerNorm(),
tl.Dense(vocab_size),
) |
Returns a LayerDrop Transformer language model.
Based on Fan, Grave, Joulin 2019, https://arxiv.org/abs/1909.11556 .
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_fraction: probability of skipping a layer; it can be a single
probability or a list of probabilities different for each layer
eval_skip_fraction: probability of skipping a layer during eval; it can be a
single probability, or a list of probabilities different for each layer,
or a string "every other" implementing a strategy from original paper
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set. | def LayerDropTransformerLM(vocab_size,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
max_len=2048,
mode='train',
ff_activation=tl.Relu,
skip_fraction=0.4,
eval_skip_fraction='every_other'):
"""Returns a LayerDrop Transformer language model.
Based on Fan, Grave, Joulin 2019, https://arxiv.org/abs/1909.11556 .
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall Transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
skip_fraction: probability of skipping a layer; it can be a single
probability or a list of probabilities different for each layer
eval_skip_fraction: probability of skipping a layer during eval; it can be a
single probability, or a list of probabilities different for each layer,
or a string "every other" implementing a strategy from original paper
Returns:
A Transformer language model as a layer that maps from a tensor of tokens
to activations over a vocab set.
"""
embedder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, mode=mode),
tl.PositionalEncoding(max_len=max_len, mode=mode),
]
if not isinstance(skip_fraction, (list, tuple)):
# If we don't get a list of skip_fractions we use the same skip_fraction
# for each layer.
skip_fraction = [skip_fraction for i in range(n_layers)]
if len(skip_fraction) != n_layers:
raise ValueError('n_layers ({}) must be equal to len(skip_fraction) ({})'
.format(n_layers, len(skip_fraction)))
if eval_skip_fraction == 'every_other':
# 100% skipping for even-numbered layers; 0% for odd-numbered layers.
eval_skip_fraction = [(1.0 if i % int(1./skip_fraction[i]) == 0 else 0.0)
if skip_fraction[i] != 0 else 0.0
for i in range(n_layers)]
if eval_skip_fraction == 'same':
# Same skip_fraction as in training.
eval_skip_fraction = skip_fraction
if not isinstance(eval_skip_fraction, (list, tuple)):
# If we don't get a list of eval_skip_fractions we use the same
# eval_skip_fraction for each layer.
eval_skip_fraction = [eval_skip_fraction for i in range(n_layers)]
if len(eval_skip_fraction) != n_layers:
raise ValueError(
'n_layers ({}) must be equal to len(eval_skip_fraction) ({})'
.format(n_layers, len(eval_skip_fraction)))
@assert_shape('...sd->...sd')
def ConditionedBlock(current_layer_num):
return tl.Serial(
# stack: embedding
tl.RandomUniform(0., 1, sync=True),
# stack: random_uniform, embedding
tl.Cond(
# if random_uniform > skip_fraction
LargerThan(skip_fraction[current_layer_num] if mode == 'train'
else eval_skip_fraction[current_layer_num]),
# then: run block
tl.Serial(transformer._DecoderBlock( # pylint: disable=g-complex-comprehension,protected-access
d_model, d_ff, n_heads, dropout, [], mode, ff_activation)),
# else: run noop
tl.Serial()
)
# stack: embedding
)
return tl.Serial(
tl.ShiftRight(mode=mode),
embedder,
[ConditionedBlock(i) for i in range(n_layers)],
tl.LayerNorm(),
tl.Dense(vocab_size),
) |
Wraps a series of layers with a ReZero-style residual connection.
Instead of computing `(shortcut) + (output of layers)`, like in classical
Residual connection, ResidualZero computes
`(shortcut) + alpha * (output of layers)`, where `alpha` is a learnable scalar
initialized with zero.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series. | def ResidualZero(*layers, shortcut=None):
"""Wraps a series of layers with a ReZero-style residual connection.
Instead of computing `(shortcut) + (output of layers)`, like in classical
Residual connection, ResidualZero computes
`(shortcut) + alpha * (output of layers)`, where `alpha` is a learnable scalar
initialized with zero.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else tl.Serial(layers)
# TODO(jaszczur): perhaps change inner Serial to Branch?
return tl.Serial(
tl.Branch(shortcut, tl.Serial(
layer,
tl.Weights(lambda shape, rng: jnp.zeros(shape, dtype=jnp.float32)),
tl.Multiply()
)),
tl.Add(), # pylint: disable=no-value-for-parameter
) |
Returns a ReZero transformer encoder model.
The input to the model is a tensor of tokens.
Args:
vocab_size: int: vocab size
n_classes: how many classes on output
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer model as a layer that maps from a tensor of tokens to
activations over a set of output classes. | def ReZeroTransformerEncoder(vocab_size,
n_classes=10,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
max_len=2048,
mode='train',
ff_activation=tl.Relu):
"""Returns a ReZero transformer encoder model.
The input to the model is a tensor of tokens.
Args:
vocab_size: int: vocab size
n_classes: how many classes on output
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer model as a layer that maps from a tensor of tokens to
activations over a set of output classes.
"""
positional_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
tl.PositionalEncoding(max_len=max_len)]
encoder_blocks = [
_EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
for i in range(n_layers)]
# Assemble and return the model.
return tl.Serial( # toks
# Encode.
tl.Branch(
positional_encoder, tl.PaddingMask()), # vecs masks
encoder_blocks, # vecs masks
tl.Select([0], n_in=2), # vecs
tl.LayerNorm(), # vecs
# Map to output categories.
tl.Mean(axis=1), # vecs
tl.Dense(n_classes), # vecs
) |
Returns a ReZero transformer decoder model.
The input to the model is either continuous or discrete - controlled by
vocab_size. Does not shift the input to the right, i.e. the output for
timestep t is based on inputs up to timestep t inclusively.
Args:
vocab_size: int or None: vocab size if running on discrete input, None
otherwise.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer decoder as a layer that maps from a continuous or
discrete tensor to a continuous tensor. | def ReZeroTransformerDecoder(vocab_size=None,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
max_len=2048,
mode='train',
ff_activation=tl.Relu):
"""Returns a ReZero transformer decoder model.
The input to the model is either continuous or discrete - controlled by
vocab_size. Does not shift the input to the right, i.e. the output for
timestep t is based on inputs up to timestep t inclusively.
Args:
vocab_size: int or None: vocab size if running on discrete input, None
otherwise.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer decoder as a layer that maps from a continuous or
discrete tensor to a continuous tensor.
"""
positional_encoder = [
(tl.Embedding(vocab_size, d_model) if vocab_size is not None
else tl.Dense(d_model)),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
tl.PositionalEncoding(max_len=max_len)]
decoder_blocks = [
# pylint: disable=g-complex-comprehension
_DecoderBlock(d_model, d_ff, n_heads,
dropout, dropout_shared_axes, mode, ff_activation)
for i in range(n_layers)]
# Assemble and return the model.
return tl.Serial( # toks
positional_encoder, # vecs
decoder_blocks, # vecs
tl.LayerNorm(), # vecs
) |
Returns a ReZero transformer language model.
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall ReZero transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer language model as a layer that maps from a tensor of
tokens to activations over a vocab set. | def ReZeroTransformerLM(vocab_size,
d_model=512,
d_ff=2048,
n_layers=6,
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
max_len=2048,
mode='train',
ff_activation=tl.Relu):
"""Returns a ReZero transformer language model.
The input to the model is a tensor of tokens. (This model uses only the
decoder part of the overall ReZero transformer.)
Args:
vocab_size: int: vocab size
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_layers: int: number of encoder/decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train', 'eval' or 'predict', predict mode is for fast inference
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer language model as a layer that maps from a tensor of
tokens to activations over a vocab set.
"""
positional_encoder = [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
tl.PositionalEncoding(max_len=max_len, mode=mode)]
decoder_blocks = [
# pylint: disable=g-complex-comprehension
_DecoderBlock(d_model, d_ff, n_heads,
dropout, dropout_shared_axes, mode, ff_activation)
for i in range(n_layers)]
# Assemble and return the model.
return tl.Serial( # tokens (or chunked tuple of tokens)
tl.ShiftRight(mode=mode), # toks
positional_encoder, # vecs
decoder_blocks, # vecs
tl.LayerNorm(), # vecs
tl.Dense(vocab_size), # vecs
) |
Returns a ReZero transformer model.
This model expects an input pair: source, target.
Args:
input_vocab_size: int: vocab size of the source.
output_vocab_size: int (optional): vocab size of the target. If None, the
source and target are assumed to have the same vocab.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_encoder_layers: int: number of encoder layers
n_decoder_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer model as a layer that maps from a source, target pair
to activations over a vocab set. | def ReZeroTransformer(input_vocab_size,
output_vocab_size=None,
d_model=512,
d_ff=2048,
n_encoder_layers=6,
n_decoder_layers=6,
n_heads=8,
dropout=0.1,
dropout_shared_axes=None,
max_len=2048,
mode='train',
ff_activation=tl.Relu):
"""Returns a ReZero transformer model.
This model expects an input pair: source, target.
Args:
input_vocab_size: int: vocab size of the source.
output_vocab_size: int (optional): vocab size of the target. If None, the
source and target are assumed to have the same vocab.
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_encoder_layers: int: number of encoder layers
n_decoder_layers: int: number of decoder layers
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A ReZero transformer model as a layer that maps from a source, target pair
to activations over a vocab set.
"""
def Embedder(vocab_size): # tokens --> vectors
return [
tl.Embedding(vocab_size, d_model),
tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode),
]
in_embedder = Embedder(input_vocab_size)
out_embedder = (in_embedder if output_vocab_size is None
else Embedder(output_vocab_size))
# Positional encoding are not shared between encoder and decoder.
# Since encoder doesn't run stepwise, we do not use predict mode there.
encoder_mode = 'eval' if mode == 'predict' else mode
in_encoder = in_embedder + [
tl.PositionalEncoding(max_len=max_len, mode=encoder_mode)
]
out_encoder = out_embedder + [
tl.PositionalEncoding(max_len=max_len, mode=mode)
]
if output_vocab_size is None:
output_vocab_size = input_vocab_size
encoder_blocks = [
_EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
for i in range(n_encoder_layers)]
encoder = tl.Serial(
in_encoder,
encoder_blocks,
tl.LayerNorm()
)
if mode == 'predict':
encoder = tl.Cache(encoder)
encoder_decoder_blocks = [
_EncoderDecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation)
for i in range(n_decoder_layers)]
# Assemble and return the model.
return tl.Serial(
# Input: encoder_side_tokens, decoder_side_tokens
# Copy decoder tokens for use in loss.
tl.Select([0, 1, 1]), # tok_e tok_d tok_d
# Encode.
tl.Branch([], tl.PaddingMask()), # tok_e masks ..... .....
encoder, # vec_e ..... ..... .....
# Decode.
tl.Select([2, 1, 0]), # tok_d masks vec_e .....
tl.ShiftRight(mode=mode), # tok_d ..... ..... .....
out_encoder, # vec_d ..... ..... .....
tl.Branch(
[], tl.EncoderDecoderMask()), # vec_d masks ..... .....
encoder_decoder_blocks, # vec_d masks ..... .....
tl.LayerNorm(), # vec_d ..... ..... .....
# Map to output vocab.
tl.Select([0], n_in=3), # vec_d tok_d
tl.Dense(output_vocab_size), # vec_d .....
) |
Returns a list of layers that implements a Transformer encoder block.
The input to the layer is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers that maps (activations, mask) to (activations, mask). | def _EncoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation):
"""Returns a list of layers that implements a Transformer encoder block.
The input to the layer is a pair, (activations, mask), where the mask was
created from the original source tokens to prevent attending to the padding
part of the input.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers that maps (activations, mask) to (activations, mask).
"""
attention = tl.Attention(
d_model, n_heads=n_heads, dropout=dropout, mode=mode)
feed_forward = _FeedForwardBlock(
d_model, d_ff, dropout, dropout_shared_axes, mode, ff_activation)
dropout_ = tl.Dropout(
rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
ResidualZero(
tl.LayerNorm(),
attention,
dropout_,
),
ResidualZero(
tl.LayerNorm(),
feed_forward,
dropout_,
),
] |
Returns a list of layers that implements a Transformer decoder block.
The input is an activation tensor.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers that maps an activation tensor to an activation tensor. | def _DecoderBlock(d_model, d_ff, n_heads,
dropout, dropout_shared_axes, mode, ff_activation):
"""Returns a list of layers that implements a Transformer decoder block.
The input is an activation tensor.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers that maps an activation tensor to an activation tensor.
"""
causal_attention = tl.CausalAttention(
d_model, n_heads=n_heads, dropout=dropout, mode=mode),
feed_forward = _FeedForwardBlock(
d_model, d_ff, dropout, dropout_shared_axes, mode, ff_activation)
dropout_ = tl.Dropout(
rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
ResidualZero(
tl.LayerNorm(),
causal_attention,
dropout_,
),
ResidualZero(
tl.LayerNorm(),
feed_forward,
dropout_,
),
] |
Returns a list of layers implementing a Transformer encoder-decoder block.
The input is a triple (decoder_input, mask, encoder) where the mask is
created from the original source to prevent attending to the padding part
of the encoder.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers which maps triples (decoder_activations, mask,
encoder_activations) to triples of the same sort. | def _EncoderDecoderBlock(d_model, d_ff, n_heads, dropout, dropout_shared_axes,
mode, ff_activation):
"""Returns a list of layers implementing a Transformer encoder-decoder block.
The input is a triple (decoder_input, mask, encoder) where the mask is
created from the original source to prevent attending to the padding part
of the encoder.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
n_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: axes on which to share dropout mask
mode: str: 'train' or 'eval'
ff_activation: the non-linearity in feed-forward layer
Returns:
A list of layers which maps triples (decoder_activations, mask,
encoder_activations) to triples of the same sort.
"""
def _Dropout():
return tl.Dropout(rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
attention_qkv = tl.AttentionQKV(
d_model, n_heads=n_heads, dropout=dropout, mode=mode,
cache_KV_in_predict=True)
causal_attention = tl.CausalAttention(
d_model, n_heads=n_heads, mode=mode)
feed_forward = _FeedForwardBlock(
d_model, d_ff, dropout, dropout_shared_axes, mode, ff_activation)
return [ # vec_d masks vec_e
ResidualZero(
tl.LayerNorm(), # vec_d ..... .....
causal_attention, # vec_d ..... .....
_Dropout(), # vec_d ..... .....
),
ResidualZero(
tl.LayerNorm(), # vec_d ..... .....
tl.Select([0, 2, 2, 1, 2]), # vec_d vec_e vec_e masks vec_e
attention_qkv, # vec_d masks vec_e
_Dropout(), # vec_d masks vec_e
),
ResidualZero(
tl.LayerNorm(),
feed_forward, # vec_d masks vec_e
_Dropout(),
),
] |
Returns a list of layers implementing a feed-forward block.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: list of integers, axes to share dropout mask
mode: str: 'train' or 'eval'
activation: the non-linearity in feed-forward layer
Returns:
A list of layers which maps vectors to vectors. | def _FeedForwardBlock(d_model, d_ff, dropout, dropout_shared_axes,
mode, activation):
"""Returns a list of layers implementing a feed-forward block.
Args:
d_model: int: depth of embedding
d_ff: int: depth of feed-forward layer
dropout: float: dropout rate (how much to drop out)
dropout_shared_axes: list of integers, axes to share dropout mask
mode: str: 'train' or 'eval'
activation: the non-linearity in feed-forward layer
Returns:
A list of layers which maps vectors to vectors.
"""
dropout = tl.Dropout(
rate=dropout, shared_axes=dropout_shared_axes, mode=mode)
return [
tl.Dense(d_ff),
activation(),
dropout,
tl.Dense(d_model),
] |
Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects. | def _deep_flatten(items):
"""Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects.
"""
def _flat_gen(xs):
for x in xs:
if isinstance(x, (list, tuple)):
for y in _flat_gen(x):
yield y
else:
yield x
return list(_flat_gen(items)) |
Ensures that layers is a single flat list of Layer instances. | def _ensure_flat(layers):
"""Ensures that layers is a single flat list of Layer instances."""
if len(layers) == 1 and layers[0] is None:
layers = ()
else:
layers = _deep_flatten(layers)
for obj in layers:
if not isinstance(obj, tl.Layer):
raise ValueError(
f'Found nonlayer object ({obj}) in layers: {layers}')
return layers |
RSU (Residual Switch Unit) layer as in https://arxiv.org/pdf/2004.04662.pdf.
As defined in the paper:
.. math::
i &= [i_1, i_2] \\
g &= GELU(LayerNorm(Z i)) \\
c &= W g + B \\
[o_1, o_2] &= \sigma(S) \bigodot i + h \bigodot c
where Z, W, B, S are learnable parameters with sizes 2m × 4m, 4m × 2m, 2m, 2m.
We assume that both i_1 and i_2 have size m. h is a scalar value.
We assume the input is of shape [batch, length, depth].
Args:
d_model: output depth of the SRU layer
dropout: dropout rate used in 'train' mode
mode: mode for dropout layer
residual_weight: value used in initializing vector S and constant h
Returns:
The RSU layer. | def ResidualSwitchUnit(
d_model, dropout=0.1, mode='train', residual_weight=0.9):
r"""RSU (Residual Switch Unit) layer as in https://arxiv.org/pdf/2004.04662.pdf.
As defined in the paper:
.. math::
i &= [i_1, i_2] \\
g &= GELU(LayerNorm(Z i)) \\
c &= W g + B \\
[o_1, o_2] &= \sigma(S) \bigodot i + h \bigodot c
where Z, W, B, S are learnable parameters with sizes 2m × 4m, 4m × 2m, 2m, 2m.
We assume that both i_1 and i_2 have size m. h is a scalar value.
We assume the input is of shape [batch, length, depth].
Args:
d_model: output depth of the SRU layer
dropout: dropout rate used in 'train' mode
mode: mode for dropout layer
residual_weight: value used in initializing vector S and constant h
Returns:
The RSU layer.
"""
return tl.Serial(
tl.Fn(
'Reshape2Pairs',
lambda x: jnp.reshape(x, (x.shape[0], x.shape[1] // 2, -1)),
n_out=1),
tl.Residual(
tl.Dense(4 * d_model, use_bias=False),
tl.LayerNorm(),
tl.Gelu(),
tl.Dense(2 * d_model),
tl.Fn('Scaling',
lambda x: x * np.sqrt(1 - residual_weight**2) * 0.25,
n_out=1),
shortcut=_ClippedScaling(residual_weight)),
tl.Fn(
'UnPair',
lambda x: jnp.reshape(x, (x.shape[0], x.shape[1] * 2, -1)),
n_out=1),
tl.Dropout(rate=dropout, mode=mode)
) |
Bitwise right rotation.
Args:
x: np.array
n: Bit count to represent each value of x
p: Bit positions to shift
Returns:
np.array: x with all values shifted by p positions in n bits | def _ror(x, n, p=1):
"""Bitwise right rotation.
Args:
x: np.array
n: Bit count to represent each value of x
p: Bit positions to shift
Returns:
np.array: x with all values shifted by p positions in n bits
"""
a = np.right_shift(x, p)
b = np.left_shift(1, p) - 1
c = np.bitwise_and(x, b)
d = np.left_shift(c, n - p)
return a + d |
Bitwise left rotation.
Args:
x: np.array
n: Bit count to represent each value of x
p: Bit positions to shift
Returns:
np.array: x with all values shifted by p positions in n bits | def _rol(x, n, p=1):
"""Bitwise left rotation.
Args:
x: np.array
n: Bit count to represent each value of x
p: Bit positions to shift
Returns:
np.array: x with all values shifted by p positions in n bits
"""
a = np.left_shift(x, p)
b = np.left_shift(1, n) - 1
c = np.bitwise_and(a, b)
d = np.right_shift(x, n - p)
return np.bitwise_or(c, d) |
Shuffles the elements according to bitwise left or right rotation.
Args:
inputs: Tensor input from previous layer
shuffle_fn: Shift function rol or ror
Returns:
tf.Tensor: Inputs shifted according to shuffle_fn | def _shuffle_layer(inputs, shuffle_fn):
"""Shuffles the elements according to bitwise left or right rotation.
Args:
inputs: Tensor input from previous layer
shuffle_fn: Shift function rol or ror
Returns:
tf.Tensor: Inputs shifted according to shuffle_fn
"""
seq_length = inputs.shape[1]
n_bits = np.int32(np.log(seq_length - 1) / np.log(2.0)) + 1
indices = np.arange(0, seq_length).astype('int32')
rev_indices = shuffle_fn(indices, n_bits)
return jnp.take(inputs, rev_indices, axis=1, mode='clip') |
Takes (n_layer, state) and returns (n_layer, shuffle_layer(rsu(state))). | def _ForwardStep(d_model, dropout, mode):
"""Takes (n_layer, state) and returns (n_layer, shuffle_layer(rsu(state)))."""
return tl.Parallel([], tl.Serial(
ResidualSwitchUnit(d_model, dropout, mode),
ShuffleLayer(),
)) |
Takes (n_layer, state) and returns (n_layer, reverse_shuffle_layer(rsu(state))). | def _BackwardStep(d_model, dropout, mode):
"""Takes (n_layer, state) and returns (n_layer, reverse_shuffle_layer(rsu(state)))."""
return tl.Parallel([], tl.Serial(
ResidualSwitchUnit(d_model, dropout, mode),
ReverseShuffleLayer(),
)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.