uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
f4315a08-a5f0-461d-8704-8c3177daca71 | attn_qk_int8_per_block_hd128_causal.py | rodjjo/editorium | editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py | 7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694 | 0 | @triton.jit
def _attn_fwd(Q, K, V, Q_scale, K_scale, Out, stride_qz, stride_qh,
stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh,
stride_om, stride_on, Z, H, N_CTX, HEAD_DIM: tl.constexpr, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, STAGE: tl.constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64
) * stride_qh
vk_offset = qvk_offset // stride_qm
q_scale_offset = off_hz * tl.cdiv(N_CTX, BLOCK_M)
k_scale_offset = off_hz * tl.cdiv(N_CTX, BLOCK_N)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, HEAD_DIM)
Q_ptrs = Q + qvk_offset + offs_m[:, None] * stride_qm + offs_k[None, :
] * stride_qk
Q_scale_ptr = Q_scale + q_scale_offset + start_m
K_ptrs = K + qvk_offset + offs_k[:, None] + offs_n[None, :] * stride_kn
K_scale_ptr = K_scale + k_scale_offset
V_ptrs = V + qvk_offset + offs_n[:, None] * stride_qm + offs_k[None, :
] * stride_qk
O_block_ptr = Out + qvk_offset + offs_m[:, None] * stride_qm + offs_k[
None, :] * stride_qk
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
q = tl.load(Q_ptrs, mask=offs_m[:, None] < N_CTX)
q_scale = tl.load(Q_scale_ptr)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs,
K_scale_ptr, V_ptrs, start_m, BLOCK_M, HEAD_DIM, BLOCK_N, 4 - STAGE,
offs_m, offs_n, N_CTX)
acc, l_i, _ = _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs,
K_scale_ptr, V_ptrs, start_m, BLOCK_M, HEAD_DIM, BLOCK_N, 2, offs_m,
offs_n, N_CTX)
acc = acc / l_i[:, None]
tl.store(O_block_ptr, acc.to(Out.type.element_ty), mask=offs_m[:, None] <
N_CTX)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py |
c29a2e86-f522-4cb9-9e23-8953c5a58d34 | GELUglu.py | huyz2023/2by4-pretrain | sparse/GELUglu.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _gelu_glu_bwd_kernel_(grad_output_ptr, grad_input_ptr, input_ptr,
grad_output_row_stride, grad_input_row_stride, input_row_stride,
grad_output_col_stride, grad_input_col_stride, input_col_stride, n_rows,
n_cols, BLOCK_SIZE: tl.constexpr):
col_idx = tl.program_id(0)
row_idx = tl.arange(0, BLOCK_SIZE)
grad_output = tl.load(grad_output_ptr + row_idx *
grad_output_row_stride + col_idx * grad_output_col_stride, mask=tl.
arange(0, BLOCK_SIZE) < n_rows, other=-float('inf'))
x = tl.load(input_ptr + row_idx * input_row_stride + col_idx *
input_col_stride, mask=tl.arange(0, BLOCK_SIZE) < n_rows, other=-
float('inf'))
gate = tl.load(input_ptr + row_idx * input_row_stride + (col_idx +
n_cols // 2) * input_col_stride, mask=tl.arange(0, BLOCK_SIZE) <
n_rows, other=-float('inf'))
gate_cube = gate * gate * gate
beta = 0.7978845608028654
kappa = 0.044715
inner = beta * (gate + kappa * gate_cube)
inner_tanh = tanh(inner)
gate_gelu = 0.5 * gate * (inner_tanh + 1)
grad_x = grad_output * gate_gelu
grad_gelu = grad_output * x
grad_gate = grad_gelu * (0.5 * (1 + inner_tanh) + 0.5 * gate * (1 -
inner_tanh * inner_tanh) * beta * (1 + kappa * 3 * gate * gate))
tl.store(grad_input_ptr + row_idx * grad_input_row_stride + col_idx *
grad_input_col_stride, grad_x, mask=tl.arange(0, BLOCK_SIZE) < n_rows)
tl.store(grad_input_ptr + row_idx * grad_input_row_stride + (col_idx +
n_cols // 2) * grad_input_col_stride, grad_gate, mask=tl.arange(0,
BLOCK_SIZE) < n_rows)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py |
aed16229-3b7a-4b1c-8e82-17e1c95bb947 | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def triton_dense_to_jagged(jagged_value_ptr, jagged_offsets_ptr,
jagged_value_row_stride: int, output_dense_ptr, dense_indices_ptr,
dense_col_stride, dense_row_stride: int, dense_matrix_stride,
JAGGED_DIM: tl.constexpr, thread_block_row_size: tl.constexpr,
thread_block_col_size: tl.constexpr, operation_function: tl.constexpr,
operation_jagged_value_ptr) ->None:
pid = tl.program_id(0)
begin = tl.load(jagged_offsets_ptr + pid)
end = tl.load(jagged_offsets_ptr + (pid + 1))
N = jagged_value_row_stride
M = end - begin
dense_boundary_col = dense_row_stride
if N < dense_row_stride:
dense_boundary_col = N
dense_boundary_row = tl.minimum(dense_matrix_stride // dense_row_stride, M)
jagged_value_ptr += begin * jagged_value_row_stride
if JAGGED_DIM > 2:
dense_indice = tl.load(dense_indices_ptr + pid)
if dense_indice == -1:
dense_boundary_col = -1
else:
output_dense_ptr += dense_indice
else:
output_dense_ptr += pid * dense_matrix_stride
if operation_function is not None:
operation_jagged_value_ptr += begin * jagged_value_row_stride
offset_row = tl.arange(0, thread_block_row_size)
for _i in range(begin, end, thread_block_row_size):
offset_col = tl.arange(0, thread_block_col_size)
block_offset = offset_row[:, None] * dense_row_stride + offset_col[
None, :] * dense_col_stride
for _j in range(0, N, thread_block_col_size):
dense_mask = (offset_row[:, None] < dense_boundary_row) & (
offset_col[None, :] < dense_boundary_col)
jagged_mask = (offset_row[:, None] < M) & (offset_col[None, :] < N)
dense_values = tl.load(output_dense_ptr + block_offset, mask=
dense_mask, other=0)
if operation_function is not None:
operation_jagged_value = tl.load(operation_jagged_value_ptr +
block_offset, mask=jagged_mask, other=0)
if operation_function == 'add':
dense_values = tensor_elementwise_add(dense_values,
operation_jagged_value)
else:
dense_values = tensor_elementwise_mul(dense_values,
operation_jagged_value)
tl.store(jagged_value_ptr + block_offset, dense_values, mask=
jagged_mask)
offset_col += thread_block_col_size
block_offset += thread_block_col_size
offset_row += thread_block_row_size
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
935a82ae-0233-49d1-bc2a-756762601b08 | triton_implicit_gemm.py | l1351868270/implicit_gemm.triton | triton_implicit_gemm.py | 64eb8548ccf4576883c928f6315be8b24680a455 | 0 | @triton.autotune(configs=get_autotune_config(), key=['N', 'C', 'H', 'W',
'K', 'P', 'Q', 'R', 'S', 'U', 'V', 'pad_h', 'pad_w', 'dila_h', 'dila_w'])
@triton.jit
def conv2d_kernel(x_ptr, w_ptr, y_ptr, N, C, H, W, K, P, Q, R, S, U, V,
pad_h, pad_w, dila_h, dila_w, GEMM_M, GEMM_N, GEMM_K, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(GEMM_M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(GEMM_N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
gemm_i = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % GEMM_M
gemm_j = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % GEMM_N
n = gemm_i // (P * Q)
npq_residual = gemm_i % (P * Q)
p = npq_residual // Q
q = npq_residual % Q
k = gemm_j
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for idx_k in range(0, tl.cdiv(GEMM_K, BLOCK_SIZE_K)):
gemm_k = idx_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
r = gemm_k // (S * C)
rsc_residual = gemm_k % (S * C)
s = rsc_residual // C
c = rsc_residual % C
h = p[:, None] * U + r[None, :] * dila_h - pad_h
w = q[:, None] * V + s[None, :] * dila_w - pad_w
mask_x = (h >= 0) & (h < H) & (w >= 0) & (w < W)
mask_w = (r < R) & (s < S) & (c < C)
offs_x = n[:, None] * H * W * C + h * W * C + w * C + c
offs_w = k[None, :] * R * S * C + r[:, None] * S * C + s[:, None
] * C + c[:, None]
x_ptrs = x_ptr + offs_x
w_ptrs = w_ptr + offs_w
x_data = tl.load(x_ptrs, mask=mask_x, other=0.0)
w_data = tl.load(w_ptrs, mask=mask_w[:, None], other=0.0)
accumulator = tl.dot(x_data, w_data, accumulator)
c_data = accumulator.to(tl.float16)
offs_y = gemm_i[:, None] * GEMM_N + gemm_j[None, :]
mask_y = (gemm_i[:, None] < GEMM_M) & (gemm_j[None, :] < GEMM_N)
y_ptrs = y_ptr + offs_y
tl.store(y_ptrs, c_data, mask=mask_y)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_implicit_gemm.py |
cd190af4-66fc-4636-9bc4-2a8357d80887 | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def causal_alibi_mask(slope, offs_m, offs_n, M, N, EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr):
shift = N - M
alibi = (offs_n[None, :] - offs_m[:, None] - shift) * slope
mask = alibi <= 0
if not EVEN_M & EVEN_N:
mask = mask & make_bounds(offs_m, offs_n, M, N, EVEN_M, EVEN_N)
return tl.where(mask, alibi, float('-inf'))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
0a360c43-f9f2-46b3-bcbc-051ade3b0924 | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.jit
def _attn_fwd_compute(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid, Z,
H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.
constexpr, STAGE: tl.constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE:
tl.constexpr):
start_m = pid
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64
) * stride_qh
K_block_ptr = None
V_block_ptr = None
Q_block_ptr = None
O_block_ptr = None
if not ENABLE_TMA:
Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX,
HEAD_DIM), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0))
v_order: tl.constexpr = (0, 1
) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX,
HEAD_DIM), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM), order=v_order)
K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=(
HEAD_DIM, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(HEAD_DIM, BLOCK_N), order=(0, 1))
O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX,
HEAD_DIM), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
qk_scale = sm_scale
qk_scale *= 1.44269504
if ENABLE_TMA:
q = tl._experimental_descriptor_load(desc_q, [(qvk_offset //
stride_qm + start_m * BLOCK_M).to(tl.int32), 0], [BLOCK_M,
HEAD_DIM], Q.dtype.element_ty)
else:
q = tl.load(Q_block_ptr)
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr,
V_block_ptr, desc_k, desc_v, Q, qvk_offset, stride_kn,
stride_vn, stride_vk, start_m, qk_scale, BLOCK_M, HEAD_DIM,
BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty ==
tl.float8e5, ENABLE_TMA, LOOP_SCHEDULE)
if STAGE & 2:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr,
V_block_ptr, desc_k, desc_v, Q, qvk_offset, stride_kn,
stride_vn, stride_vk, start_m, qk_scale, BLOCK_M, HEAD_DIM,
BLOCK_N, 2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.
float8e5, ENABLE_TMA, LOOP_SCHEDULE)
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
if ENABLE_TMA:
tl._experimental_descriptor_store(desc_o, acc.to(Out.type.
element_ty), [(qvk_offset // stride_om + start_m * BLOCK_M).to(
tl.int32), 0])
else:
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
9ea4a729-2b49-4f30-a3b7-315143733125 | quant_triton.py | CompendiumLabs/ziggy | ziggy/backends/quant_triton.py | bd12fe50ca3475743f62ae26d4c184108e441e03 | 0 | @triton.jit
def dequantize_kernel(X, Y, N, K, K1, scale, zero_point, BITS: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, BLOCK_SIZE_K1:
tl.constexpr):
dtype = Y.dtype.element_ty
scale_ty = tl.full((), scale, dtype=dtype)
zero_point_ty = tl.full((), zero_point, dtype=dtype)
QFACT = 8 // BITS
QMASK = (1 << BITS) - 1
QMASK_INT = tl.full((), QMASK, dtype=tl.uint8)
pid_n = tl.program_id(0)
pid_k = tl.program_id(1)
bk = tl.arange(0, BLOCK_SIZE_K)
bk1, bq1 = bk // QFACT, bk % QFACT
x_shift = BITS * bq1
rn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
rk = pid_k * BLOCK_SIZE_K + bk
rk1 = pid_k * BLOCK_SIZE_K1 + bk1
mask_x = rn[:, None] < N
mask_y = rn[:, None] < N
X1 = X + (rn[:, None] * K1 + rk1[None, :])
Y1 = Y + (rn[:, None] * K + rk[None, :])
x = tl.load(X1, mask=mask_x)
xi = x >> x_shift & QMASK_INT
xf = scale_ty * (xi.to(dtype) - zero_point_ty)
tl.store(Y1, xf, mask=mask_y)
| {
"Data Type": [
"uint8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/CompendiumLabs/ziggy/blob/bd12fe50ca3475743f62ae26d4c184108e441e03/ziggy/backends/quant_triton.py |
767c7fac-160a-4bf1-a459-648f00cc19be | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_fwd_kernel_h(k, v, z, h, h0, ht, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, T: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT:
tl.constexpr, NORMK: tl.constexpr, USE_INITIAL_STATE: tl.constexpr,
STORE_FINAL_STATE: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h += tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
if NORMK:
p_z0 = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), (i_k *
BK,), (BK,), (0,))
else:
p_z0 = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), (i_v *
BV,), (BV,), (0,))
b_zp = tl.load(p_z0).to(tl.float32)
for i_t in range(NT):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), (
s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
if NORMK:
p_zc = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,),
((i_t * BT + BT - 1) * K + i_k * BK,), (BK,), (0,))
b_zc = tl.load(p_zc, boundary_check=(0,))
b_r, b_zp = tl.exp(b_zp - b_zc), b_zc
b_h = b_h * b_r[:, None]
b_k = tl.exp(b_k - b_zc[:, None]).to(b_k.dtype)
else:
p_zc = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,),
((i_t * BT + BT - 1) * V + i_v * BV,), (BV,), (0,))
b_zc = tl.load(p_zc, boundary_check=(0,))
b_r, b_zp = tl.exp(b_zp - b_zc), b_zc
b_h = b_h * b_r[None, :]
b_v = tl.exp(b_v - b_zc[None, :]).to(b_v.dtype)
b_h += tl.dot(b_k, b_v, allow_tf32=False)
if STORE_FINAL_STATE:
p_h = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
b8c40fe7-a4c6-44cc-b7fe-36d33f96630e | ln_linear_triton_2.py | ethansmith2000/fused-layer-norm | ln_linear_triton_2.py | 84fe243a829364acdcfd7cd70b699db04838af0f | 0 | @triton.jit
def _layer_norm_fwd_fused(X_ptr, W_ptr, B_ptr, Mean_ptr, RSTD_ptr, stride,
n_cols, eps, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
X_ptr += row_idx * stride
Mean_ptr += row_idx
RSTD_ptr += row_idx
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0)
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0)
B_row = tl.load(B_ptr + col_offsets, mask=mask, other=0)
mean = tl.sum(X_row, axis=0) / n_cols
demeaned = X_row - mean
var = tl.sum(demeaned * demeaned, axis=0) / n_cols
rstd = rsqrt(var + eps)
tl.store(Mean_ptr, mean)
tl.store(RSTD_ptr, rstd)
Y_row = tl.fma(demeaned * rstd, W_row, B_row)
tl.store(X_ptr + col_offsets, Y_row, mask=mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Batch-Oriented",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ethansmith2000/fused-layer-norm/blob/84fe243a829364acdcfd7cd70b699db04838af0f/ln_linear_triton_2.py |
fc3fd787-4ea0-4a55-9c09-59fdb3df32dd | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=MATMUL_CONFIGS_NON_PERSISTENT, key=['M', 'N', 'K'],
prune_configs_by={'early_config_prune': prune_configs, 'perf_model':
None, 'top_k': None}, use_cuda_graph=True)
@triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] *
args['SPLIT_K']) == 0})
@triton.jit
def _kernel_matmul_fp8_row_non_persistent(A, B, C, M, N, K, m_key, n_key,
k_key, A_scale, B_scale, stride_am, stride_ak, stride_bn, stride_bk,
stride_cm, stride_cn, dot_out_dtype: tl.constexpr, allow_tf32: tl.
constexpr, fp8_fast_accum: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N:
tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl
.constexpr, EVEN_K: tl.constexpr, AB_DTYPE: tl.constexpr) ->None:
"""Matmul kernel of [M, K] @ [N, K] with row-wise scales
performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles.
Args:
A (TensorWrapper): [M, K] input tensor.
B (TensorWrapper): [N, K] input tensor.
C (TensorWrapper): [M, N] output tensor.
M (int): M dimension of input tensor.
N (int): N dimension of input tensor.
K (int): K dimension of input tensor.
m_key (int): Autotuning key for M dimension of input tensor.
n_key (int): Autotuning key for N dimension of input tensor.
k_key (int): Autotuning key for K dimension of input tensor.
A_scale (TensorWrapper): [M] reciprocal scale tensor per row. A * A_scale = original A
B_scale (TensorWrapper): [N] reciprocal scale tensor per row. B * B_scale = original B
stride_am (int): Stride of M dimension of A.
stride_ak (int): Stride of K dimension of A.
stride_bn (int): Stride of N dimension of B.
stride_bk (int): Stride of K dimension of B.
stride_cm (int): Stride of M dimension of C.
stride_cn (int): Stride of N dimension of C.
dot_out_dtype (torch.dtype): Output type of tensor core.
allow_tf32 (bool): Whether to use TF32 for tensor core.
fp8_fast_accum (bool): Whether to use fast accumulation for tensor core.
BLOCK_M (int): Block size for M dimension.
BLOCK_N (int): Block size for N dimension.
BLOCK_K (int): Block size for K dimension.
GROUP_M (int): Number of groups for M dimension swizzle.
SPLIT_K (int): Number of SM's to launch per row.
EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K.
AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core.
"""
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
k_remaining = K - k * (BLOCK_K * SPLIT_K)
_0 = tl.zeros((1, 1), dtype=C.dtype.element_ty)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0)
b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0)
if AB_DTYPE:
a = a.to(C.dtype.element_ty)
b = b.to(C.dtype.element_ty)
if fp8_fast_accum:
acc = tl.dot(a, b, acc, out_dtype=dot_out_dtype, allow_tf32=
allow_tf32)
else:
acc += tl.dot(a, b, out_dtype=dot_out_dtype, allow_tf32=allow_tf32)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
a_scale = tl.load(A_scale + rm, mask=rm < M)
b_scale = tl.load(B_scale + rn, mask=rn < N)
scale = a_scale[:, None] * b_scale[None, :]
acc *= scale
acc = acc.to(C.dtype.element_ty)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
1831f041-03a2-4e24-a2de-415a1666eb42 | attention.py | e-b-daniel/flash_attention_replication | attention.py | 86ac6ecffcfb729201606840dc72da116751a43f | 0 | @triton.jit
def _fwd_kernel(Q, K, V, sm_scale, L, M, Out, stride_head, stride_seq,
hidden_dim, seq_len, Br: tl.constexpr, Bt: tl.constexpr, hidden: tl.
constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
offs_br = start_m * Br + tl.arange(0, Br)
offs_bt = tl.arange(0, Bt)
offs_d = tl.arange(0, hidden)
off_q = off_hz * stride_head + offs_br[:, None] * stride_seq + offs_d[
None, :] * hidden_dim
off_kv = off_hz * stride_head + offs_bt[:, None] * stride_seq + offs_d[
None, :] * hidden_dim
q_ptrs = Q + off_q
k_ptrs = K + off_kv
v_ptrs = V + off_kv
m_i = tl.zeros([Br], dtype=tl.float16) - float('inf')
l_i = tl.zeros([Br], dtype=tl.float16)
acc = tl.zeros([Br, hidden], dtype=tl.float16)
q = tl.load(q_ptrs)
for start_n in tl.range(0, (start_m + 1) * Br, Bt):
k = tl.load(k_ptrs + start_n * stride_seq)
v = tl.load(v_ptrs + start_n * stride_seq)
qk = tl.dot(q, tl.trans(k)) * sm_scale
qk += tl.where(offs_br[:, None] >= start_n + offs_bt[None, :], 0,
float('-inf'))
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
p = p * (beta / l_i_new)[:, None]
p = p.to(v.dtype)
acc_scale_factor = l_i / l_i_new * alpha
acc = acc * acc_scale_factor[:, None] + tl.dot(p, v)
acc = acc.to(v.dtype)
l_i = l_i_new.to(l_i.dtype)
m_i = m_i_new.to(m_i.dtype)
l_ptrs = L + off_hz * seq_len + offs_br
m_ptrs = M + off_hz * seq_len + offs_br
out_ptrs = Out + off_q
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
tl.store(out_ptrs, acc)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD"
] | https://github.com/e-b-daniel/flash_attention_replication/blob/86ac6ecffcfb729201606840dc72da116751a43f/attention.py |
51de4fd6-86e7-4186-a066-d17def98e550 | mhmoe.py | dtadpole/triton-playground | mhmoe.py | 2d317976722d63080133b1bf88b1f0cdec98f831 | 0 | @triton.jit
def _mlp_wide_kernel_bwd_dw1w2(dw1, dw2, pid_h, pid_e, x_ptr, w1_ptr,
w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr,
E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d,
stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e,
stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl.
constexpr, BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr):
"""Kernel for computing the mlp_bwd_dw1w2
Z = X @ W1, H = f(Z), O = H @ W2
- X has shape (B, D)
- W1 has shape (D, E)
- W2 has shape (E, D)
- O has shape (B, D)
- dX has shape (B, D)
- dW1 has shape (D, E)
- dW2 has shape (E, D)
- dO has shape (B, D)
"""
TARGET_TYPE = x_ptr.type.element_ty
offs_b = tl.arange(0, BLOCK_SIZE_B)
offs_d = tl.arange(0, D)
offs_e = tl.arange(0, BLOCK_SIZE_E)
x_ptrs = x_ptr + ((pid_h * B + offs_b[:, None]) * stride_xb + offs_d[
None, :] * stride_xd)
do_ptrs = do_ptr + ((pid_h * B + offs_b[:, None]) * stride_dob + offs_d
[None, :] * stride_dod)
do_mask = (offs_b[:, None] < B) & (offs_d[None, :] < D)
w1_ptrs = w1_ptr + ((pid_h * D + offs_d[:, None]) * stride_w1d + (pid_e *
BLOCK_SIZE_E + offs_e[None, :]) * stride_w1e)
w1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - pid_e *
BLOCK_SIZE_E)
w2_ptrs = w2_ptr + ((pid_h * E + pid_e * BLOCK_SIZE_E + offs_e[:, None]
) * stride_w2e + offs_d[None, :] * stride_w2d)
w2_mask = (offs_e[:, None] < E - pid_e * BLOCK_SIZE_E) & (offs_d[None,
:] < D)
w1 = tl.load(w1_ptrs, mask=w1_mask, other=0.0)
w2 = tl.load(w2_ptrs, mask=w2_mask, other=0.0)
do = tl.load(do_ptrs, mask=do_mask, other=0.0)
for b in range(0, tl.cdiv(B, BLOCK_SIZE_B)):
x_mask = (offs_b[:, None] < B - b * BLOCK_SIZE_B) & (offs_d[None, :
] < D)
do_mask = (offs_b[:, None] < B - b * BLOCK_SIZE_B) & (offs_d[None,
:] < D)
x = tl.load(x_ptrs, mask=x_mask, other=0.0)
do = tl.load(do_ptrs, mask=do_mask, other=0.0)
z = tl.dot(x, w1, out_dtype=tl.float32)
if ACTIVATION == 'leaky_relu':
h = leaky_relu(z).to(TARGET_TYPE)
elif ACTIVATION == 'silu':
h = silu(z).to(TARGET_TYPE)
elif ACTIVATION == 'sigmoid':
h = tl.sigmoid(z).to(TARGET_TYPE)
else:
h = z.to(TARGET_TYPE)
dh = tl.dot(do, tl.trans(w2), out_dtype=tl.float32)
dw2 += tl.dot(tl.trans(h), do, out_dtype=tl.float32)
if ACTIVATION == 'leaky_relu':
dz = (dh * d_leacky_relu(z)).to(TARGET_TYPE)
elif ACTIVATION == 'silu':
dz = (dh * d_silu(z, h)).to(TARGET_TYPE)
elif ACTIVATION == 'sigmoid':
dz = (dh * d_sigmoid(h)).to(TARGET_TYPE)
else:
dz = dh.to(TARGET_TYPE)
dw1 += tl.dot(tl.trans(x), dz, out_dtype=tl.float32)
x_ptrs += BLOCK_SIZE_B * stride_xb
do_ptrs += BLOCK_SIZE_B * stride_dob
return dw1, dw2
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py |
92f2a427-dd63-44de-8442-1178d2139794 | gemm.py | TiledTensor/TiledBench | benchs/python/gemm/triton/gemm.py | 1191aecde1b29e0b7bb9ef1a06d0e156c1fce136 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128,
'BLOCK_K': 64}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M':
64, 'BLOCK_N': 256, 'BLOCK_K': 32}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32}, num_stages=3,
num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 64,
'BLOCK_N': 128, 'BLOCK_K': 32}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32}, num_stages=3,
num_warps=8), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 32,
'BLOCK_N': 64, 'BLOCK_K': 32}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128}, num_stages=3,
num_warps=8), triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K':
128}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 256,
'BLOCK_N': 64, 'BLOCK_K': 128}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128}, num_stages=3,
num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K':
128}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 128,
'BLOCK_N': 64, 'BLOCK_K': 64}, num_stages=3, num_warps=8), triton.
Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64}, num_stages=3,
num_warps=8), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K':
64}, num_stages=3, num_warps=8), triton.Config({'BLOCK_M': 128,
'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=3, num_warps=8)], key=['M',
'N', 'K'])
@triton.jit
def _gemm_kernel(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak,
stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offset_am = (pid_m * BLOCK_M + tl.arange(0, BLOCK_M)) % M
offset_bn = (pid_n * BLOCK_N + tl.arange(0, BLOCK_N)) % N
offset_k = tl.arange(0, BLOCK_K)
a_ptrs = a_ptr + (offset_am[:, None] * stride_am + offset_k[None, :] *
stride_ak)
b_ptrs = b_ptr + (offset_k[:, None] * stride_bk + offset_bn[None, :] *
stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_K)):
a = tl.load(a_ptrs, mask=offset_k[None, :] < K - k * BLOCK_K)
b = tl.load(b_ptrs, mask=offset_k[:, None] < K - k * BLOCK_K)
acc = tl.dot(a, b, acc)
a_ptrs += BLOCK_K * stride_ak
b_ptrs += BLOCK_K * stride_bk
offset_cm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
offset_cn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
c_ptrs = c_ptr + (offset_cm[:, None] * stride_cm + offset_cn[None, :] *
stride_cn)
c_mask = (offset_cm[:, None] < M) & (offset_cn[None, :] < N)
tl.store(c_ptrs, acc, mask=c_mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Transposed Access",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/TiledTensor/TiledBench/blob/1191aecde1b29e0b7bb9ef1a06d0e156c1fce136/benchs/python/gemm/triton/gemm.py |
abd73d7c-d83a-4608-8687-11f67fd11608 | gemm_streamk_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.jit
def swizzle_tile(tile_id, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE_M: tl.constexpr):
grid_m = tl.cdiv(M, BLOCK_SIZE_M)
grid_n = tl.cdiv(N, BLOCK_SIZE_N)
width = GROUP_SIZE_M * grid_n
group_id = tile_id // width
group_size = tl.minimum(GROUP_SIZE_M, grid_m - group_id * GROUP_SIZE_M)
pid_m = group_id * GROUP_SIZE_M + tile_id % group_size
pid_n = tile_id % width // group_size
return pid_m, pid_n
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py |
b7ea7c6d-cb9a-4a55-bf9c-8f193083482e | softmax_kernels.py | BobMcDear/attorch | attorch/softmax_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def softmax_backward_kernel(output_grad_pointer, output_pointer,
input_grad_pointer, batch_dim, feat_dim, output_grad_batch_stride,
output_grad_feat_stride, output_batch_stride, output_feat_stride,
input_grad_batch_stride, input_grad_feat_stride, log: tl.constexpr,
BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr):
"""
Calculates the input gradient of softmax.
Args:
output_grad_pointer: Pointer to softmax's output gradients.
The output gradients must be of shape [batch_dim, feat_dim].
output_pointer: Pointer to softmax's output.
The output must be of shape [batch_dim, feat_dim].
input_grad_pointer: Pointer to a container the input's gradients are written to.
The container must be of shape [batch_dim, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
output_grad_batch_stride: Stride necessary to jump one element along the
output gradients' batch dimension.
output_grad_feat_stride: Stride necessary to jump one element along the
output gradients' feature dimension.
output_batch_stride: Stride necessary to jump one element along the
output's batch dimension.
output_feat_stride: Stride necessary to jump one element along the
output's feature dimension.
input_grad_batch_stride: Stride necessary to jump one element along the
input gradient container's batch dimension.
input_grad_feat_stride: Stride necessary to jump one element along the
input gradient container's feature dimension.
log: Flag indicating if log of softmax was taken.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
output_grad_pointer += output_grad_batch_stride * batch_offset[:, None
] + output_grad_feat_stride * feat_offset[None, :]
output_pointer += output_batch_stride * batch_offset[:, None
] + output_feat_stride * feat_offset[None, :]
input_grad_pointer += input_grad_batch_stride * batch_offset[:, None
] + input_grad_feat_stride * feat_offset[None, :]
output_grad = tl.load(output_grad_pointer, mask=batch_mask[:, None] &
feat_mask[None, :]).to(tl.float32)
output = tl.load(output_pointer, mask=batch_mask[:, None] & feat_mask[
None, :]).to(tl.float32)
if log:
input_grad = output_grad - tl.exp(output) * tl.sum(output_grad, axis=1
)[:, None]
else:
input_grad = output * (output_grad - tl.sum(output_grad * output,
axis=1)[:, None])
tl.store(input_grad_pointer, input_grad, mask=batch_mask[:, None] &
feat_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/softmax_kernels.py |
b2ad8a84-f1bc-4160-86c6-16c93aa73c9d | dynamic_quant.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/dynamic_quant.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _abs_max(val1, val2):
val1_abs = tl.abs(val1)
val2_abs = tl.abs(val2)
if val1_abs >= val2_abs:
return val1_abs
else:
return val2_abs
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/dynamic_quant.py |
7cc562e7-5cf7-4828-8797-4dacdc2c7611 | sgmv_expand.py | IBM/vllm | vllm/lora/ops/sgmv_expand.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _sgmv_expand_kernel(input_ptr, lora_ptr, out_ptr, N, K, b_seq_start_loc,
seq_lens, lora_indices, xm_stride, xk_stride, l0_stride, lora_k_stride,
lora_n_stride, cm_stride, cn_stride, BLOCK_M: tl.constexpr, BLOCK_N: tl
.constexpr, BLOCK_K: tl.constexpr, EVEN_K: tl.constexpr, ADD_INPUTS: tl
.constexpr, CAST_TYPE: tl.constexpr):
"""
The sgmv's expand triton kernel is based on GroupGEMM.
"""
pid = tl.program_id(axis=0)
cur_batch = tl.program_id(axis=1)
cta_n_num = tl.cdiv(N, BLOCK_N)
pid_m = pid // cta_n_num
pid_n = pid % cta_n_num
M = tl.load(seq_lens + cur_batch)
if pid_m * BLOCK_M > M:
return
lora_index = tl.load(lora_indices + cur_batch)
if lora_index == -1:
return
cur_seq_start = tl.load(b_seq_start_loc + cur_batch)
offset_m = tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_n = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
offset_k = tl.arange(0, BLOCK_K)
ram = tl.max_contiguous(tl.multiple_of(offset_m % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N)
a_ptr = input_ptr + cur_seq_start * xm_stride + ram[:, None
] * xm_stride + offset_k[None, :] * xk_stride,
b_ptr = lora_ptr + l0_stride * lora_index + offset_k[:, None
] * lora_n_stride + rbn[None, :] * lora_k_stride
accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(tl.cdiv(K, BLOCK_K)):
if EVEN_K:
tiled_a = tl.load(a_ptr)
tiled_b = tl.load(b_ptr)
else:
tiled_a = tl.load(a_ptr, mask=offset_k[None, :] < K - k *
BLOCK_K, other=0)
tiled_b = tl.load(b_ptr, mask=offset_k[:, None] < K - k *
BLOCK_K, other=0)
if CAST_TYPE:
tiled_a = tiled_a.to(lora_ptr.dtype.element_ty)
accumulator += tl.dot(tiled_a, tiled_b)
a_ptr += BLOCK_K * xk_stride
b_ptr += BLOCK_K * lora_n_stride
tiled_c = accumulator.to(lora_ptr.dtype.element_ty)
offset_cm = cur_seq_start + tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_cn = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
c_ptr = out_ptr + offset_cm[:, None] * cm_stride + offset_cn[None, :
] * cn_stride
M = tl.load(seq_lens + cur_batch)
c_mask = (offset_cm[:, None] < cur_seq_start + M) & (offset_cn[None, :] < N
)
if ADD_INPUTS:
tiled_out = tl.load(c_ptr, mask=c_mask, other=None)
tiled_c += tiled_out
tl.store(c_ptr, tiled_c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/sgmv_expand.py |
ed539db4-d433-434f-8d65-9170cc127c3b | triton_attention.py | pytorch-labs/tritonbench | tritonbench/operators/template_attention/triton_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64,
'BLOCK_DMODEL': 64}, num_stages=3, num_warps=4)], key=['num_queries'])
@triton.jit
def triton_tem_fused_with_exp2(arg_Q, arg_K, arg_V, out_ptr0, num_queries:
tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
BLOCK_DMODEL: tl.constexpr):
SCORE_MOD_IS_LINEAR: tl.constexpr = False
ROWS_GUARANTEED_SAFE: tl.constexpr = False
Q = arg_Q
K = arg_K
V = arg_V
stride_qz = 4194304
stride_qh = 262144
stride_qm = 64
stride_qk = 1
stride_kz = 4194304
stride_kh = 262144
stride_kn = 64
stride_kk = 1
stride_vz = 4194304
stride_vh = 262144
stride_vk = 64
stride_vn = 1
Z = 16
H = 16
N_CTX = 4096
qk_scale = 1.0
MATMUL_PRECISION = Q.dtype.element_ty
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qkv_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(base=Q + qkv_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + qkv_offset, shape=(
BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + qkv_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
q = tl.load(Q_block_ptr)
if SCORE_MOD_IS_LINEAR:
qk_scale *= 1.44269504
q = (q * qk_scale).to(MATMUL_PRECISION)
lo = 0
hi = N_CTX
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk)
tmp0 = tl.full([1], 1024, tl.int64)
tmp1 = offs_m[:, None] <= tmp0
tmp2 = start_n + offs_n[None, :] <= tmp0
tmp3 = tmp1 & tmp2
tmp4 = offs_m[:, None] >= start_n + offs_n[None, :]
tmp5 = tmp3 | tmp4
tmp6 = float('-inf')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.where(tmp5, qk, tmp7)
qk = tmp8
if not SCORE_MOD_IS_LINEAR:
qk *= 1.44269504
row_max = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, row_max)
masked_out_rows = m_i_new == float('-inf')
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
if not ROWS_GUARANTEED_SAFE:
alpha = tl.where(masked_out_rows, 0, alpha)
p = tl.where(masked_out_rows[:, None], 0, p)
acc_scale = l_i * 0 + alpha
acc *= acc_scale[:, None]
acc = tl.dot(p.to(MATMUL_PRECISION), v.to(MATMUL_PRECISION), acc)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
acc = acc / l_i[:, None]
idx_z = tl.program_id(1) // H
idx_h = tl.program_id(1) % H
idx_m = offs_m[:, None]
idx_d = tl.arange(0, BLOCK_DMODEL)[None, :]
mask = (idx_m != -1) & (idx_d != -1)
xindex = idx_d + 64 * idx_m + 262144 * idx_h + 4194304 * idx_z
tl.store(out_ptr0 + xindex, acc, None)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings",
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/template_attention/triton_attention.py |
065a20ba-b651-4f54-89ce-6c8b91a0e483 | y_6.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_6.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def sixth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size:
tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr,
col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST002 = 3.26558761940328
CONST003 = 3.26558761940328
CONST004 = 6.53117523880657
CONST006 = 8.38944649544891
CONST007 = 9.79676285820985
CONST008 = 10.3266947761614
CONST009 = 3.60555127546399
CONST010 = -1.78863600265677
CONST011 = 14.5309475774982
CONST012 = 8.94318001328386
CONST013 = 16.5227116418583
CONST014 = 16.5227116418583
CONST015 = 17.8863600265677
CONST017 = 20.6533895523229
CONST018 = 20.2812259244849
CONST019 = -107.318160159406
CONST020 = 17.8863600265677
CONST022 = 29.3902885746295
CONST024 = 40.5624518489699
CONST025 = 41.9472324772445
CONST026 = -1.63279380970164
CONST027 = -83.8944649544891
CONST028 = -78.3741028656788
CONST030 = -71.5454401062709
CONST032 = -52.2494019104525
CONST033 = -52.2494019104525
CONST035 = -48.4364919249939
CONST036 = -41.3067791046458
CONST037 = -36.3273689437454
CONST038 = -29.3902885746295
CONST039 = -27.0416345659799
CONST040 = -26.1247009552263
CONST041 = -26.1247009552263
CONST042 = -19.5935257164197
CONST043 = -2.4218245962497
CONST044 = -9.79676285820985
CONST045 = -7.15454401062709
CONST046 = -3.38020432074749
CONST047 = -1.1267347735825
VAR07 = x * x * x
VAR08 = x * x
VAR04 = VAR07 * VAR07
VAR05 = VAR07 * VAR08
VAR06 = VAR08 * VAR08
VAR16 = y * y * y
VAR17 = y * y
VAR13 = VAR16 * VAR16
VAR14 = VAR16 * VAR17
VAR15 = VAR17 * VAR17
VAR25 = z * z * z
VAR26 = z * z
VAR22 = VAR25 * VAR25
VAR23 = VAR25 * VAR26
VAR24 = VAR26 * VAR26
Y00 = (CONST011 * VAR05 * z + CONST011 * VAR23 * x + CONST035 * VAR07 *
VAR25)
Y01 = y * (CONST006 * VAR05 + CONST025 * VAR24 * x + CONST027 * VAR07 *
VAR26)
Y02 = -CONST045 * VAR05 * z + CONST045 * VAR23 * x + VAR17 * (CONST030 *
VAR07 * z - CONST030 * VAR25 * x)
Y03 = VAR16 * (-CONST028 * VAR26 * x + CONST040 * VAR07) + y * (
CONST007 * VAR05 + CONST038 * VAR24 * x + CONST042 * VAR07 * VAR26)
Y04 = CONST003 * VAR05 * z + VAR07 * (CONST004 * VAR25 + CONST033 *
VAR17 * z) + x * (CONST002 * VAR23 - CONST032 * VAR15 * z +
CONST032 * VAR17 * VAR25)
Y05 = CONST008 * VAR05 * y + VAR07 * (CONST017 * VAR26 * y + CONST036 *
VAR16) + x * (CONST008 * VAR24 * y + CONST013 * VAR14 + CONST036 *
VAR16 * VAR26)
Y06 = (CONST009 * VAR13 + CONST018 * VAR17 * VAR24 + CONST039 * VAR15 *
VAR26 + CONST047 * VAR04 + CONST047 * VAR22 + VAR06 * (CONST018 *
VAR17 + CONST046 * VAR26) + VAR08 * (CONST024 * VAR17 * VAR26 +
CONST039 * VAR15 + CONST046 * VAR24))
Y07 = CONST008 * VAR23 * y + VAR25 * (CONST017 * VAR08 * y + CONST036 *
VAR16) + z * (CONST008 * VAR06 * y + CONST014 * VAR14 + CONST036 *
VAR08 * VAR16)
Y08 = (CONST026 * VAR04 - CONST026 * VAR22 + CONST040 * VAR17 * VAR24 -
CONST041 * VAR15 * VAR26 + VAR06 * (CONST026 * VAR26 - CONST041 *
VAR17) + VAR08 * (-CONST026 * VAR24 + CONST041 * VAR15))
Y09 = VAR16 * (CONST028 * VAR08 * z - CONST041 * VAR25) + y * (CONST022 *
VAR06 * z - CONST042 * VAR08 * VAR25 + CONST044 * VAR23)
Y10 = (CONST010 * VAR04 + CONST010 * VAR22 + CONST020 * VAR17 * VAR24 +
VAR06 * (CONST012 * VAR26 + CONST015 * VAR17) + VAR08 * (CONST012 *
VAR24 + CONST019 * VAR17 * VAR26))
Y11 = y * (CONST006 * VAR23 + CONST025 * VAR06 * z + CONST027 * VAR08 *
VAR25)
Y12 = (-CONST037 * VAR06 * VAR26 + CONST037 * VAR08 * VAR24 + CONST043 *
VAR04 - CONST043 * VAR22)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
tl.store(output_ptr + output_row_offset + 9, Y09, mask=
output_row_offset + 9 < output_numel)
tl.store(output_ptr + output_row_offset + 10, Y10, mask=
output_row_offset + 10 < output_numel)
tl.store(output_ptr + output_row_offset + 11, Y11, mask=
output_row_offset + 11 < output_numel)
tl.store(output_ptr + output_row_offset + 12, Y12, mask=
output_row_offset + 12 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_6.py |
c57cca70-6e96-47ee-9f5c-ee9125d45b80 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_backward_input_grad_kernel_notune(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_an, stride_cm, stride_ck, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_an=stride_an, stride_cm=stride_cm, stride_ck=
stride_ck, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
61065322-dd42-4a37-af64-ca295705d2b2 | rms_norm_kernels.py | BobMcDear/attorch | attorch/rms_norm_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def rms_norm_forward_kernel(input_pointer, weight_pointer, inv_rms_pointer,
output_pointer, batch_dim, feat_dim, input_batch_stride,
input_feat_stride, output_batch_stride, output_feat_stride, eps,
scale_by_weight: tl.constexpr, save_stats: tl.constexpr,
BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr):
"""
Root-mean-square-normalizes the input.
Args:
input_pointer: Pointer to the input to root-mean-square-normalize.
The input must be of shape [batch_dim, feat_dim].
weight_pointer: Pointer to optional weights for linear transform.
The weights, if provided, must be of shape [feat_dim].
inv_rms_pointer: Pointer to an optional container the input's inverse
root mean square is written to if save_stats is True.
The container, if provided, must be of shape [batch_dim].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_feat_stride: Stride necessary to jump one element along the
output container's feature dimension.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
scale_by_weight: Flag for scaling the normalized output by weights.
save_stats: Flag for saving the root mean square.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
input_pointer += input_batch_stride * batch_offset[:, None
] + input_feat_stride * feat_offset[None, :]
output_pointer += output_batch_stride * batch_offset[:, None
] + output_feat_stride * feat_offset[None, :]
input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[
None, :]).to(tl.float32)
inv_rms = tl.rsqrt(tl.sum(input * input, axis=1) / feat_dim + eps)
output = input * inv_rms[:, None]
if save_stats:
tl.store(inv_rms_pointer + batch_offset, inv_rms, mask=batch_mask)
if scale_by_weight:
weight = tl.load(weight_pointer + feat_offset, mask=feat_mask)
output *= weight
tl.store(output_pointer, output, mask=batch_mask[:, None] & feat_mask[
None, :])
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/rms_norm_kernels.py |
c9a38822-1126-4d55-bac7-f5990f7d18ec | mlp.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/mlp.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_mlp_activation_backward_kernel(grad_output_ptr, grad_input_ptr,
input_ptr, output_ptr, gated: tl.constexpr, activation_type: tl.
constexpr, recompute: tl.constexpr, n_cols: tl.constexpr, block_size:
tl.constexpr):
row_idx = tl.program_id(0).to(tl.int64)
columns = tl.program_id(1) * block_size + tl.arange(0, block_size)
output_offsets = n_cols * row_idx + columns
input_offsets = 2 * n_cols * row_idx + columns if gated else output_offsets
input_ptr = input_ptr + input_offsets
grad_input_ptr = grad_input_ptr + input_offsets
mask = columns < n_cols
input_ = tl.load(input_ptr, mask=mask).to(tl.float32)
output_grad = tl.load(grad_output_ptr + output_offsets, mask=mask).to(tl
.float32)
if activation_type == _TritonActivationType.gelu:
tanh_input = 0.79788456 * input_ * (1 + 0.044715 * input_ * input_)
tanh = 1 - 2 / (1 + tl.exp(2 * tanh_input))
grad = 0.5 * input_ * ((1 - tanh * tanh) * (0.79788456 +
0.1070322243 * input_ * input_)) + 0.5 * (1 + tanh)
if gated or recompute:
out = input_ * 0.5 * (1.0 + tanh)
elif activation_type == _TritonActivationType.silu:
exp = tl.exp(-input_)
sigma = 1 / (1 + exp)
grad = sigma * sigma + (1 + input_) / (2 + exp + 1 / exp)
if gated or recompute:
out = input_ * sigma
elif activation_type == _TritonActivationType.relu:
grad = tl.where(input_ > 0, 1, 0)
if gated or recompute:
out = tl.where(input_ > 0, input_, 0)
elif activation_type == _TritonActivationType.squared_relu:
relu_out = tl.where(input_ > 0, input_, 0)
grad = 2 * relu_out
if gated or recompute:
out = relu_out * relu_out
else:
raise NotImplementedError()
if gated:
other = tl.load(input_ptr + n_cols, mask=mask)
tl.store(grad_input_ptr, grad * other * output_grad, mask=mask)
tl.store(grad_input_ptr + n_cols, out * output_grad, mask=mask)
out = out * other
else:
tl.store(grad_input_ptr, grad * output_grad, mask=mask)
if recompute:
tl.store(output_ptr + output_offsets, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound",
"Low Latency"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/mlp.py |
78c3a947-9a20-4a2d-b1c0-8cdcf8617d2f | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.autotune(configs=[triton.Config({}, num_stages=stages, num_warps=
warps) for stages in [0, 1, 3, 4] for warps in [4, 8, 16]], key=[
'QUERY_GROUP_SIZE', 'HEAD_SIZE', 'KV_BLOCK_SIZE'])
@triton.jit
def _paged_attn_w_mma_kernel(m_i_ptr, l_i_ptr, out_ptr, q_ptr, k_cache_ptr,
v_cache_ptr, context_lens_ptr, block_tables_ptr, attn_scale, stride_bt0,
stride_bt1, stride_q0, stride_q1, stride_q2, stride_kv0, stride_kv1,
stride_kv2, stride_kv3, stride_o0, stride_o1, stride_o2, stride_o3,
stride_o4, HEAD_SIZE: tl.constexpr, QUERY_GROUP_SIZE: tl.constexpr,
PADDED_QUERY_GROUP_SIZE: tl.constexpr, NUM_KV_HEADS: tl.constexpr,
KV_BLOCK_SIZE: tl.constexpr, PARTITION_SIZE: tl.constexpr):
seq_idx = tl.program_id(0)
kv_head_idx = tl.program_id(1)
part_idx = tl.program_id(2)
max_num_partitions = tl.num_programs(2)
log2e: tl.constexpr = 1.4426950408889634
USE_PARTITIONING = PARTITION_SIZE > 0
context_len = tl.load(context_lens_ptr + seq_idx)
if USE_PARTITIONING:
context_start_idx = part_idx * PARTITION_SIZE
if context_start_idx >= context_len:
return
context_end_idx = tl.minimum(context_start_idx + PARTITION_SIZE,
context_len)
num_blocks = tl.cdiv(context_end_idx - context_start_idx, KV_BLOCK_SIZE
)
else:
num_blocks = tl.cdiv(context_len, KV_BLOCK_SIZE)
block_offset = tl.arange(0, KV_BLOCK_SIZE)
head_offset = tl.arange(0, HEAD_SIZE)
padding_group_offset = tl.arange(0, PADDED_QUERY_GROUP_SIZE)
kv_offset = kv_head_idx * stride_kv1 + block_offset[:, None
] * stride_kv2 + head_offset[None, :] * stride_kv3
q_offset = seq_idx * stride_q0 + (kv_head_idx * QUERY_GROUP_SIZE +
padding_group_offset[:, None]) * stride_q1 + head_offset[None, :
] * stride_q2
group_mask = padding_group_offset[:, None] < QUERY_GROUP_SIZE
q = tl.load(q_ptr + q_offset, mask=group_mask, other=0.0)
q = (q * attn_scale).to(q_ptr.dtype.element_ty)
m_i = tl.zeros([PADDED_QUERY_GROUP_SIZE], dtype=tl.float32) - float('inf')
l_i = tl.zeros([PADDED_QUERY_GROUP_SIZE], dtype=tl.float32)
acc = tl.zeros([PADDED_QUERY_GROUP_SIZE, HEAD_SIZE], dtype=tl.float32)
num_prev_blocks = part_idx * (PARTITION_SIZE // KV_BLOCK_SIZE)
for i in range(num_blocks):
block_idx = num_prev_blocks + i
block_number = tl.load(block_tables_ptr + seq_idx * stride_bt0 +
block_idx * stride_bt1)
kv_block_offset = block_number * stride_kv0 + kv_offset
mask_offset = block_idx * KV_BLOCK_SIZE + block_offset
kv_mask = mask_offset[:, None] < context_len
k = tl.load(k_cache_ptr + kv_block_offset, mask=kv_mask, other=0.0)
if PADDED_QUERY_GROUP_SIZE == 1:
qk = tl.sum(q[:, None, :] * k[None, :, :], axis=2)
else:
qk = tl.dot(q, k.T, out_dtype=tl.float32)
qk = tl.where(mask_offset < context_len, qk, float('-inf'))
m_i_new = tl.maximum(m_i, tl.max(qk, axis=1))
p = tl.math.exp2((qk - m_i_new[:, None]) * log2e)
alpha = tl.math.exp2((m_i - m_i_new) * log2e)
acc *= alpha[:, None]
v = tl.load(v_cache_ptr + kv_block_offset, mask=kv_mask, other=0.0)
if PADDED_QUERY_GROUP_SIZE == 1:
acc += tl.sum(p.T[:, :, None] * v[:, None, :], axis=0)
else:
p = p.to(v.dtype)
acc += tl.dot(p, v, out_dtype=tl.float32)
l_i = l_i * alpha + tl.sum(p, axis=1)
m_i = m_i_new
acc = acc / l_i[:, None]
if USE_PARTITIONING:
part_offset = ((seq_idx * NUM_KV_HEADS + kv_head_idx) *
max_num_partitions * QUERY_GROUP_SIZE + part_idx *
QUERY_GROUP_SIZE + padding_group_offset)
mask = padding_group_offset < QUERY_GROUP_SIZE
tl.store(m_i_ptr + part_offset, m_i, mask=mask)
tl.store(l_i_ptr + part_offset, l_i, mask=mask)
out_offset = seq_idx * stride_o0
if USE_PARTITIONING:
out_offset += kv_head_idx * stride_o1
else:
out_offset += kv_head_idx * QUERY_GROUP_SIZE * stride_o1
out_offset += part_idx * stride_o2 + padding_group_offset[:, None
] * stride_o3 + head_offset[None, :] * stride_o4
group_mask = padding_group_offset[:, None] < QUERY_GROUP_SIZE
tl.store(out_ptr + out_offset, acc, mask=group_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
97731521-c61c-4321-aa31-93651e9adc55 | _matmul.py | IBM/qattn | qattn/nn/functional/_matmul.py | 07ceda0aceb9afd299d622325944c0c0471827fe | 0 | @triton.autotune(configs=int8_configs(), key=['M', 'N', 'K'],
prune_configs_by={'early_config_prune': early_config_prune,
'perf_model': _estimate_matmul_time, 'top_k': 10})
@triton.heuristics({'EVEN_K': lambda args: args['K'] % args['BLOCK_K'] == 0})
@triton.jit
def _kernel(A, B, C, bias, M, N, K, stride_am, stride_ak, stride_bk,
stride_bn, stride_cm, stride_cn, a_scale_ptr, b_scale_ptr,
out_scale_ptr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K:
tl.constexpr, GROUP_M: tl.constexpr, EVEN_K: tl.constexpr, BIAS_ADD: tl
.constexpr, A_PER_CHANNEL: tl.constexpr, B_PER_CHANNEL: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)
for k in range(0, tl.cdiv(K, BLOCK_K)):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
k_remaining = K - k * BLOCK_K
_0 = tl.zeros((1, 1), dtype=tl.int8)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0)
b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0)
acc += tl.dot(a, b, allow_tf32=True, out_dtype=tl.int32)
A += BLOCK_K * stride_ak
B += BLOCK_K * stride_bk
if A_PER_CHANNEL:
_0 = tl.zeros((1,), dtype=a_scale_ptr.dtype.element_ty)
mask = ram < M
a_scale = tl.load(a_scale_ptr + ram, mask=mask, other=_0)
else:
a_scale = tl.load(a_scale_ptr)
if B_PER_CHANNEL:
_0 = tl.zeros((1,), dtype=b_scale_ptr.dtype.element_ty)
mask = rbn < N
b_scale = tl.load(b_scale_ptr + rbn, mask=mask, other=_0)
else:
b_scale = tl.load(b_scale_ptr)
if BIAS_ADD:
bias = tl.load(bias + rn)
if A_PER_CHANNEL and B_PER_CHANNEL:
bias = tl.math.llrint(bias / (a_scale[:, None] * b_scale[None, :])
).to(tl.int32)
acc = acc + bias
else:
bias = tl.math.llrint(bias / (a_scale * b_scale)).to(tl.int32)
acc = acc + bias[None, :]
if A_PER_CHANNEL and B_PER_CHANNEL:
mask = ram < M
_0 = tl.zeros((1,), dtype=out_scale_ptr.dtype.element_ty)
out_scale = tl.load(out_scale_ptr + ram, mask=mask, other=_0)
acc = tl.math.llrint(acc.to(tl.float32) * a_scale[:, None] *
b_scale[None, :] * out_scale[:, None]).to(tl.int8)
else:
out_scale = tl.load(out_scale_ptr)
acc = tl.math.llrint(acc.to(tl.float32) * (a_scale * b_scale *
out_scale)).to(tl.int8)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc, mask=mask)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_matmul.py |
b7fb0c98-69f2-48d3-a107-c7746602da7f | triton_fused_local_attn2.py | LouChao98/vqtree | ops/triton_fused_local_attn2.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q, K, V, Out, L, softmax_scale, stride_qb, stride_qh,
stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh,
stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, WINDOW_SIZE: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, WRITE_LSE: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + (off_b * stride_kb + off_h *
stride_kh), shape=(BLOCK_HEADDIM, seqlen_k), strides=(1, stride_kn),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q = tl.load(Q_block_ptr)
else:
q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 1)
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, seqlen_k,
WINDOW_SIZE, BLOCK_M, BLOCK_N, EVEN_M & EVEN_N, 3)
if WRITE_LSE:
l_ptrs = L + off_hb * seqlen_q + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
acc = acc / l_i[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn2.py |
fa4480e1-677b-4342-962a-c8f709e3fe8b | triton_call_test.py | jax-ml/jax-triton | tests/triton_call_test.py | 859cc392bec876d132bd0790ea6c00b6c246dd2b | 0 | @triton.jit
def add_scalar_kernel(x_ptr, y, output_ptr):
tl.store(output_ptr, tl.load(x_ptr) + y)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/tests/triton_call_test.py |
63cb7c43-5317-4ccc-88d2-92d74f1aa420 | masked_load_store.py | ROCm/aotriton | tritonsrc/masked_load_store.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def mstore2d(registers, REG_ROWS: tl.constexpr, REG_COLS: tl.constexpr,
o_base, o_start_row, o_start_col, o_rows, o_cols, stride_row, stride_col):
off_rows = tl.arange(0, REG_ROWS) + o_start_row
off_cols = tl.arange(0, REG_COLS) + o_start_col
o_ptrs = o_base + off_rows[:, None] * stride_row + off_cols[None, :
] * stride_col
o_ptrs_mask = tl.full([REG_ROWS, REG_COLS], 1, dtype=tl.int1)
row_overflow = o_start_row + REG_ROWS - o_rows
if row_overflow > 0:
o_ptrs_mask = o_ptrs_mask & (off_rows[:, None] < o_rows)
col_overflow = o_start_col + REG_COLS - o_cols
if col_overflow > 0:
o_ptrs_mask = o_ptrs_mask & (off_cols[None, :] < o_cols)
tl.store(o_ptrs, registers, mask=o_ptrs_mask)
return o_ptrs, o_ptrs_mask
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/masked_load_store.py |
5af2a241-3541-414f-90b4-b233dbb92a70 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps) for BK in [32, 64] for BV in [64, 128] for num_warps in [2,
4, 8]], key=['BT'])
@triton.jit
def chunk_rwkv6_bwd_kernel_inter(q, k, v, h, gi, ge, u, do, dh, dA, dq, dk,
dq2, dk2, dg, du, offsets, indices, scale, T: tl.constexpr, H: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
if HEAD_FIRST:
p_gk = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_gi = tl.make_block_ptr(gi + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(gi + i_bh * T * K + (min(T,
i_t * BT + BT) - 1) * K + o_k, BK), BK)
else:
p_gk = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_gi = tl.make_block_ptr(gi + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(gi + (bos + min(T, i_t * BT +
BT) - 1) * H * K + i_h * K + o_k, BK), BK)
b_gn = tl.load(p_gn, mask=m_k, other=0)
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_dgk = tl.zeros([BK], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * NT * K * V + i_t * K * V, (V,
K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bh * NT * K * V + i_t * K * V,
(V, K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dgk += tl.sum(b_h * b_dh, axis=0)
b_dq += tl.dot(b_do, b_h.to(b_do.dtype))
b_dk += tl.dot(b_v, b_dh.to(b_v.dtype))
b_dgk *= tl.exp(b_gn)
b_dq *= scale
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_gi = tl.load(p_gi, boundary_check=(0, 1))
b_dq = b_dq * tl.exp(b_gk)
b_dk = b_dk * tl.exp(b_gn[None, :] - b_gi)
o_i = tl.arange(0, BT)
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dA_dig = dA + (i_bh * T + i_t * BT + o_i) * BT + o_i
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dA_dig = dA + ((bos + i_t * BT + o_i) * H + i_h) * BT + o_i
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dgk += tl.sum(b_dk * b_k, axis=0)
b_dq += tl.load(p_dq, boundary_check=(0, 1))
b_dk += tl.load(p_dk, boundary_check=(0, 1))
b_dg = b_q * b_dq - b_k * b_dk
b_dg = b_dg - tl.cumsum(b_dg, axis=0) + tl.sum(b_dg, axis=0)[None, :
] + b_dgk[None, :] - b_q * b_dq
b_dA_dig = tl.load(p_dA_dig, mask=i_t * BT + o_i < T, other=0)
p_u = tl.make_block_ptr(u + i_h * K, (K,), (1,), (i_k * BK,), (BK,), (0,))
b_u = tl.load(p_u, boundary_check=(0,))
b_dq += b_dA_dig[:, None] * b_u[None, :] * b_k
b_dk += b_dA_dig[:, None] * b_u[None, :] * b_q
b_du = tl.sum(b_dA_dig[:, None] * b_q * b_k, axis=0)
p_du = tl.make_block_ptr(du + (i_tg * H + i_h) * K, (K,), (1,), (i_k *
BK,), (BK,), (0,))
tl.store(p_du, b_du, boundary_check=(0,))
if HEAD_FIRST:
p_dq = tl.make_block_ptr(dq2 + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk2 + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_dq = tl.make_block_ptr(dq2 + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk2 + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py |
5850800a-a6b5-4982-b610-acbd6bb5f405 | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _inner_paged_attn_unroll_8_kernel(q, k_cache, v_cache, stride_km,
block_base_ptrs, base_offs_kv, alibi_slope, block_offs, seq_len, qkv,
qk_max, exp_sum, BLOCK_SIZE: tl.constexpr, LO: tl.constexpr, HI: tl.
constexpr):
for block_idx in range(LO, HI, 8):
offs_kv_0 = tl.load(block_base_ptrs + block_idx + 0
) * stride_km + base_offs_kv
offs_kv_1 = tl.load(block_base_ptrs + block_idx + 1
) * stride_km + base_offs_kv
offs_kv_2 = tl.load(block_base_ptrs + block_idx + 2
) * stride_km + base_offs_kv
offs_kv_3 = tl.load(block_base_ptrs + block_idx + 3
) * stride_km + base_offs_kv
offs_kv_4 = tl.load(block_base_ptrs + block_idx + 4
) * stride_km + base_offs_kv
offs_kv_5 = tl.load(block_base_ptrs + block_idx + 5
) * stride_km + base_offs_kv
offs_kv_6 = tl.load(block_base_ptrs + block_idx + 6
) * stride_km + base_offs_kv
offs_kv_7 = tl.load(block_base_ptrs + block_idx + 7
) * stride_km + base_offs_kv
k_0 = tl.load(k_cache + offs_kv_0)
k_1 = tl.load(k_cache + offs_kv_1)
k_2 = tl.load(k_cache + offs_kv_2)
k_3 = tl.load(k_cache + offs_kv_3)
k_4 = tl.load(k_cache + offs_kv_4)
k_5 = tl.load(k_cache + offs_kv_5)
k_6 = tl.load(k_cache + offs_kv_6)
k_7 = tl.load(k_cache + offs_kv_7)
v_0 = tl.load(v_cache + offs_kv_0)
v_1 = tl.load(v_cache + offs_kv_1)
v_2 = tl.load(v_cache + offs_kv_2)
v_3 = tl.load(v_cache + offs_kv_3)
v_4 = tl.load(v_cache + offs_kv_4)
v_5 = tl.load(v_cache + offs_kv_5)
v_6 = tl.load(v_cache + offs_kv_6)
v_7 = tl.load(v_cache + offs_kv_7)
_qk_0 = tl.sum((q[None, :] * k_0).to(tl.float32), axis=1)
_qk_1 = tl.sum((q[None, :] * k_1).to(tl.float32), axis=1)
_qk_2 = tl.sum((q[None, :] * k_2).to(tl.float32), axis=1)
_qk_3 = tl.sum((q[None, :] * k_3).to(tl.float32), axis=1)
_qk_4 = tl.sum((q[None, :] * k_4).to(tl.float32), axis=1)
_qk_5 = tl.sum((q[None, :] * k_5).to(tl.float32), axis=1)
_qk_6 = tl.sum((q[None, :] * k_6).to(tl.float32), axis=1)
_qk_7 = tl.sum((q[None, :] * k_7).to(tl.float32), axis=1)
if alibi_slope is not None:
_qk_0 += alibi_slope * ((block_idx + 0) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_1 += alibi_slope * ((block_idx + 1) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_2 += alibi_slope * ((block_idx + 2) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_3 += alibi_slope * ((block_idx + 3) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_4 += alibi_slope * ((block_idx + 4) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_5 += alibi_slope * ((block_idx + 5) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_6 += alibi_slope * ((block_idx + 6) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_7 += alibi_slope * ((block_idx + 7) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_max = tl.maximum(tl.max(_qk_0, axis=0), qk_max)
_qk_max = tl.maximum(tl.max(_qk_1, axis=0), _qk_max)
_qk_max = tl.maximum(tl.max(_qk_2, axis=0), _qk_max)
_qk_max = tl.maximum(tl.max(_qk_3, axis=0), _qk_max)
_qk_max = tl.maximum(tl.max(_qk_4, axis=0), qk_max)
_qk_max = tl.maximum(tl.max(_qk_5, axis=0), _qk_max)
_qk_max = tl.maximum(tl.max(_qk_6, axis=0), _qk_max)
_qk_max = tl.maximum(tl.max(_qk_7, axis=0), _qk_max)
exp_tmp = tl.exp(_qk_0 - _qk_max) + tl.exp(_qk_1 - _qk_max) + tl.exp(
_qk_2 - _qk_max) + tl.exp(_qk_3 - _qk_max) + tl.exp(_qk_4 - _qk_max
) + tl.exp(_qk_5 - _qk_max) + tl.exp(_qk_6 - _qk_max) + tl.exp(
_qk_7 - _qk_max)
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(exp_tmp, axis=0)
qkv_sum_tmp = tl.exp(_qk_0[:, None] - _qk_max).to(v_cache.dtype.
element_ty) * v_0 + tl.exp(_qk_1[:, None] - _qk_max).to(v_cache
.dtype.element_ty) * v_1 + tl.exp(_qk_2[:, None] - _qk_max).to(
v_cache.dtype.element_ty) * v_2 + tl.exp(_qk_3[:, None] - _qk_max
).to(v_cache.dtype.element_ty) * v_3 + tl.exp(_qk_4[:, None] -
_qk_max).to(v_cache.dtype.element_ty) * v_4 + tl.exp(_qk_5[:,
None] - _qk_max).to(v_cache.dtype.element_ty) * v_5 + tl.exp(
_qk_6[:, None] - _qk_max).to(v_cache.dtype.element_ty
) * v_6 + tl.exp(_qk_7[:, None] - _qk_max).to(v_cache.dtype.
element_ty) * v_7
qkv = (qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + qkv_sum_tmp
) / _exp_sum
qk_max = _qk_max
exp_sum = _exp_sum
return qkv, qk_max, exp_sum
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
b88cea39-85a6-4298-a841-7a49f83348ed | dequant.py | AutoGPTQ/AutoGPTQ | auto_gptq/nn_modules/triton_utils/dequant.py | 6689349625de973b9ee3016c28c11f32acf7f02c | 0 | @triton.autotune(DEFAULT_DEQUANT_CONFIGS, key=['numels'])
@triton.jit
def dequant_kernel_248(g_idx_ptr, scales_ptr, qweight_ptr, qzeros_ptr,
out_ptr, numels, maxq: tl.constexpr, bits: tl.constexpr, outfeatures:
tl.constexpr, num_groups: tl.constexpr, X_BLOCK: tl.constexpr):
xoffset = tl.program_id(0) * X_BLOCK
x_index = xoffset + tl.arange(0, X_BLOCK)
xmask = x_index < numels
row_idx = x_index // outfeatures
col_idx = x_index % outfeatures
elements_per_feature: tl.constexpr = 32 // bits
g_idx = tl.load(g_idx_ptr + row_idx, None, eviction_policy='evict_last')
qweights = tl.load(qweight_ptr + (col_idx + outfeatures * (row_idx //
elements_per_feature)), None)
wf_weights = row_idx % elements_per_feature * bits
wf_zeros = col_idx % elements_per_feature * bits
tmp1 = g_idx + num_groups
tmp2 = g_idx < 0
tl.device_assert(g_idx >= 0, 'index out of bounds: 0 <= tmp0 < 0')
groups = tl.where(tmp2, tmp1, g_idx)
scales = tl.load(scales_ptr + (col_idx + outfeatures * groups), None).to(tl
.float32)
weights = qweights >> wf_weights
weights = weights & maxq
qzero_ncols: tl.constexpr = outfeatures // elements_per_feature
qzeros = tl.load(qzeros_ptr + (qzero_ncols * groups + col_idx //
elements_per_feature), None, eviction_policy='evict_last')
zeros = qzeros >> wf_zeros
zeros = zeros & maxq
zeros = zeros + 1
weights = weights - zeros
weights = weights.to(tl.float32)
weights = scales * weights
tl.store(out_ptr + x_index, weights, mask=xmask)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/AutoGPTQ/AutoGPTQ/blob/6689349625de973b9ee3016c28c11f32acf7f02c/auto_gptq/nn_modules/triton_utils/dequant.py |
49d86003-ce5d-4605-a9f2-3f4ce9c8722f | fused_attention.py | bortkomencw/jax-triton | examples/fused_attention.py | abfc627619f36f289d72d61bb16e1c9a222d0609 | 0 | @triton.jit
def fused_attention_kernel(q_ptr, k_ptr, v_ptr, tmp_ptr, l_ptr, m_ptr,
out_ptr, stride_qz: tl.constexpr, stride_qh: tl.constexpr, stride_qm:
tl.constexpr, stride_qk: tl.constexpr, stride_kz: tl.constexpr,
stride_kh: tl.constexpr, stride_kk: tl.constexpr, stride_kn: tl.
constexpr, stride_vz: tl.constexpr, stride_vh: tl.constexpr, stride_vk:
tl.constexpr, stride_vn: tl.constexpr, stride_oz: tl.constexpr,
stride_oh: tl.constexpr, stride_om: tl.constexpr, stride_on: tl.
constexpr, z: tl.constexpr, h: tl.constexpr, n_ctx: tl.constexpr,
block_m: tl.constexpr, block_dmodel: tl.constexpr, block_n: tl.constexpr):
"""Flash attention kernel."""
start_qm = tl.program_id(0)
off_hz = tl.program_id(1)
offs_m = start_qm * block_m + tl.arange(0, block_m)
offs_n = tl.arange(0, block_n)
offs_d = tl.arange(0, block_dmodel)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :
] * stride_qk
off_k = off_hz * stride_qh + offs_n[None, :] * stride_kn + offs_d[:, None
] * stride_kk
off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :
] * stride_qk
q_ptrs = q_ptr + off_q
k_ptrs = k_ptr + off_k
v_ptrs = v_ptr + off_v
t_ptrs = tmp_ptr + off_hz * n_ctx + offs_m
acc = tl.zeros([block_m, block_dmodel], dtype=tl.float32)
m_i = tl.zeros([block_m], dtype=tl.float32) - float('inf')
l_i = tl.zeros([block_m], dtype=tl.float32)
q = tl.load(q_ptrs)
for start_n in range(0, start_qm + 1):
k = tl.load(k_ptrs)
qk = tl.dot(q, k)
qk += tl.where(offs_m[:, None] >= start_n * block_n + offs_n[None,
:], 0, float('-inf'))
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
p_scale = beta / l_i_new
p = p * p_scale[:, None]
p = p.to(tl.float16)
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs)
acc = acc * acc_scale[:, None]
v = tl.load(v_ptrs)
acc += tl.dot(p, v)
k_ptrs += block_n * stride_kn
v_ptrs += block_n * stride_vk
l_i = l_i_new
m_i = m_i_new
start_qm = tl.program_id(0)
offs_m = start_qm * block_m + tl.arange(0, block_m)
l_ptrs = l_ptr + off_hz * n_ctx + offs_m
m_ptrs = m_ptr + off_hz * n_ctx + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
offs_n = tl.arange(0, block_dmodel)
off_out = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :
] * stride_on
out_ptrs = out_ptr + off_out
tl.store(out_ptrs, acc)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/bortkomencw/jax-triton/blob/abfc627619f36f289d72d61bb16e1c9a222d0609/examples/fused_attention.py |
f2dadc40-d715-428a-a34d-6968f5971b95 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_delta_rule_bwd_kernel_dqkw(q, k, v, h, do, dh, dq, dk, dv, dw,
offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr, NT: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK,
i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H * K),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_dw = tl.zeros([BT, BK], dtype=tl.float32)
b_ds = tl.zeros([BT, BT], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * NT * K * V + i_t * K * V, (V,
K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bh * NT * K * V + i_t * K * V,
(V, K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_ds += tl.dot(b_do, tl.trans(b_v), allow_tf32=False)
b_dq += tl.dot(b_do, b_h, allow_tf32=False)
b_dk += tl.dot(b_v, b_dh, allow_tf32=False)
b_dv = tl.load(p_dv, boundary_check=(0, 1))
b_dw += tl.dot(b_dv.to(b_v.dtype), b_h.to(b_v.dtype), allow_tf32=False)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_ds = tl.where(o_i[:, None] >= o_i[None, :], b_ds, 0).to(b_q.dtype)
b_dq += tl.dot(b_ds, b_k, allow_tf32=False)
b_dq *= scale
b_dk += tl.trans(tl.dot(b_q, b_ds, allow_tf32=False))
if HEAD_FIRST:
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dw = tl.make_block_ptr(dw + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dw, -b_dw.to(p_dw.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py |
1541bbe9-40db-47b2-be52-64a94c59d7fe | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4)], key=['BK', 'BV'])
@triton.jit
def fused_recurrent_rwkv6_bwd_kernel_dq(k, v, w, u, do, dq, dq1, h0,
offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
REVERSE: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, USE_OFFSETS: tl
.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl.
int64), tl.program_id(2).to(tl.int64)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
o_k = i_k * BK + tl.arange(0, BK)
o_v = i_v * BV + tl.arange(0, BV)
if HEAD_FIRST:
p_k = k + i_nh * T * K + ((T - 1) * K if REVERSE else 0) + o_k
p_v = v + i_nh * T * V + ((T - 1) * V if REVERSE else 0) + o_v
p_w = w + i_nh * T * K + ((T - 1) * K if REVERSE else 0) + o_k
p_do = do + i_nh * T * V + ((T - 1) * V if REVERSE else 0) + o_v
p_dq = dq + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if REVERSE else
0) + o_k
p_dq1 = dq1 + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if
REVERSE else 0) + o_k
else:
p_k = k + (bos + (T - 1 if REVERSE else 0)) * H * K + i_h * K + o_k
p_v = v + (bos + (T - 1 if REVERSE else 0)) * H * V + i_h * V + o_v
p_w = w + (bos + (T - 1 if REVERSE else 0)) * H * K + i_h * K + o_k
p_do = do + (bos + (T - 1 if REVERSE else 0)) * H * V + i_h * V + o_v
p_dq = dq + (i_v * all + bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + o_k
p_dq1 = dq1 + (i_v * all + bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + o_k
p_u = u + i_h * K + o_k
mask_k = o_k < K
mask_v = o_v < V
mask_h = mask_k[:, None] & mask_v[None, :]
b_u = tl.load(p_u, mask=mask_k, other=0).to(tl.float32)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + o_k[:, None] * V + o_v[None, :]
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_w = tl.load(p_w, mask=mask_k, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
b_kv = b_k[:, None] * b_v[None, :]
b_hq = b_h * b_do[None, :]
b_dq = tl.sum(b_hq + b_kv * b_u[:, None] * b_do[None, :], 1) * scale
b_dq1 = tl.sum(b_hq, 1)
b_h = b_h * tl.exp(b_w)[:, None]
b_h += b_kv
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), mask=mask_k)
tl.store(p_dq1, b_dq1.to(p_dq1.dtype.element_ty), mask=mask_k)
p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_w += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_do += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_dq += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_dq1 += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/fused_recurrent.py |
081c52c9-4bb1-427c-bd83-d90875518644 | rwkv_log.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_log.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def wkv_triton_log_space_forward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr,
k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b,
state_s_abe, state_s_c, wkv_ptr, wkv_s_b, wkv_s_t, wkv_s_c,
state_out_ptr, state_out_s_b, state_out_s_abe, state_out_s_t,
state_out_s_c, chans, tsz, eps: tl.constexpr, log_eps: tl.constexpr,
normalize: tl.constexpr, BLOCK_SIZE_C: tl.constexpr):
b_idx = tl.program_id(0)
c_idx = tl.program_id(1)
cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C)
cmask = cs < chans
k_ptr = k_ptr + b_idx * k_s_b
v_ptr = v_ptr + b_idx * v_s_b
ln_alpha_p_ptr = state_ptr + b_idx * state_s_b
ln_alpha_m_ptr = state_ptr + b_idx * state_s_b + state_s_abe
ln_beta_ptr = state_ptr + b_idx * state_s_b + 2 * state_s_abe
wkv_ptr = wkv_ptr + b_idx * wkv_s_b
ln_alpha_p_out_ptr = state_out_ptr + b_idx * state_out_s_b
ln_alpha_m_out_ptr = (state_out_ptr + b_idx * state_out_s_b +
state_out_s_abe)
ln_beta_out_ptr = (state_out_ptr + b_idx * state_out_s_b + 2 *
state_out_s_abe)
ln_alpha_p = tl.load(ln_alpha_p_ptr + cs * state_s_c, mask=cmask).to(tl
.float32)
ln_alpha_m = tl.load(ln_alpha_m_ptr + cs * state_s_c, mask=cmask).to(tl
.float32)
ln_beta = tl.load(ln_beta_ptr + cs * state_s_c, mask=cmask).to(tl.float32)
w = tl.load(w_ptr + cs * w_s_c, mask=cmask).to(tl.float32)
u = tl.load(u_ptr + cs * u_s_c, mask=cmask).to(tl.float32)
for t in range(tsz):
kt = tl.load(k_ptr + t * k_s_t + cs * k_s_c, mask=cmask).to(tl.float32)
vt = tl.load(v_ptr + t * v_s_t + cs * v_s_c, mask=cmask).to(tl.float32)
vt_p = tl.maximum(vt, 0) + eps
vt_m = tl.maximum(-vt, 0) + eps
ln_v_p = tl.log(vt_p)
ln_v_m = tl.log(vt_m)
if normalize:
ln_alpha_pm = tl.minimum(ln_alpha_p, ln_alpha_m) - eps
ln_alpha_p = logsubexp(ln_alpha_p, ln_alpha_pm, log_eps)
ln_alpha_m = logsubexp(ln_alpha_m, ln_alpha_pm, log_eps)
ln_wkv_p = logaddexp(u + kt + ln_v_p, ln_alpha_p) - logaddexp(u +
kt, ln_beta)
ln_wkv_m = logaddexp(u + kt + ln_v_m, ln_alpha_m) - logaddexp(u +
kt, ln_beta)
wkv = tl.exp(ln_wkv_p) - tl.exp(ln_wkv_m)
tl.store(wkv_ptr + t * wkv_s_t + cs * wkv_s_c, wkv, mask=cmask)
ln_alpha_p = logaddexp(w + ln_alpha_p, kt + ln_v_p)
ln_alpha_m = logaddexp(w + ln_alpha_m, kt + ln_v_m)
ln_beta = logaddexp(w + ln_beta, kt)
tl.store(ln_alpha_p_out_ptr + t * state_out_s_t + cs *
state_out_s_c, ln_alpha_p, mask=cmask)
tl.store(ln_alpha_m_out_ptr + t * state_out_s_t + cs *
state_out_s_c, ln_alpha_m, mask=cmask)
tl.store(ln_beta_out_ptr + t * state_out_s_t + cs * state_out_s_c,
ln_beta, mask=cmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py |
ad805737-e12a-4994-9da0-2020700b1dad | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=[Config({'BLOCK_SIZE': 512}), Config({'BLOCK_SIZE':
1024}), Config({'BLOCK_SIZE': 2048}), Config({'BLOCK_SIZE': 4096}),
Config({'BLOCK_SIZE': 8192})], key=['N'])
@triton.jit
def _kernel_scale_fp8_row(A, x_scale, w_scale, scaled_out, M, N, stride_am,
stride_an, stride_om, stride_on, BLOCK_SIZE: tl.constexpr) ->None:
"""
Scale each row of A by x_scale and each column of A by w_scale.
Args:
A (Tensor): [m, n] Input tensor to scale.
x_scale (Tensor): [m] Row-wise scale tensor.
w_scale (Tensor): [n] Col-wise scale tensor.
scaled_out (Tensor): [m, n] Output tensor.
M (int): Number of rows.
N (int): Number of columns.
stride_am (int): Stride of m dimension of A.
stride_an (int): Stride of n dimension of A.
stride_om (int): Stride of m dimension of output.
stride_on (int): Stride of n dimension of output.
BLOCK_SIZE (int): Block size for data loads.
"""
pid = tl.program_id(0)
n_offset = tl.arange(0, BLOCK_SIZE)
row_scale = tl.load(x_scale + pid)
for _k in range(0, tl.cdiv(N, BLOCK_SIZE)):
a = tl.load(A + pid * stride_am + n_offset * stride_an)
col_scale = tl.load(w_scale + n_offset)
scaled_a = a * row_scale * col_scale
tl.store(scaled_out + pid * stride_om + n_offset * stride_on,
scaled_a, mask=n_offset < N)
n_offset += BLOCK_SIZE
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
fed5f596-06e2-4fbb-aadc-c5659115de85 | _quantize.py | IBM/qattn | qattn/nn/functional/_quantize.py | 07ceda0aceb9afd299d622325944c0c0471827fe | 0 | @triton.jit
def quantize(x, scale, qmin, qmax) ->tl.tensor:
"""Quantize the tensor given quantization scale and data type.
Args:
x (tl.tensor): floating-point tensor
scale (tl.tensor): quantization scale factor.
qmin (Number): quantization minimum range.
qmax (Number): quantization maximum range
Returns:
tl.tensor: rounded and clamped tensor.
Note: this is still in floating point as we can't pass dtype to function
Example:
out = quantize(out, scale, -128, 127).to(tl.int8)
"""
return clamp(tl.math.round(x / scale), qmin, qmax)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_quantize.py |
1e3f0da1-6b39-42d3-9727-50923dfa01bf | masked_load_store.py | ROCm/aotriton | tritonsrc/masked_load_store.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def mload1d(REGS: tl.constexpr, i_base, i_start, i_nums):
offs = tl.arange(0, REGS) + i_start
i_ptrs = i_base + offs
overflow = i_start + REGS - i_nums
i_ptrs_mask = tl.full([REGS], 1, dtype=tl.int1)
i_ptrs_mask = i_ptrs_mask & (offs < i_nums)
return tl.load(i_ptrs, mask=i_ptrs_mask, other=0.0)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/masked_load_store.py |
c4a78501-d4c6-4271-9b23-2445016fa667 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def silu(x):
return x * tl.sigmoid(x)
| {
"Data Type": [],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
7098cab5-9d5a-47f3-ba34-1521893d3e8b | outer_softmax_online.py | iclementine/optimize_softmax | outer_softmax_online.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def prev_multiple_of(a, b):
return tl.cdiv(a, b) * b - b
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/outer_softmax_online.py |
980683e0-409c-46c6-a72e-852bc66ab9ba | kl.py | ardywibowo/triton-mode | kernels/kl.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_kl_backward(target_ptr, target_stride, grad_output_ptr,
grad_output_stride, num_classes, BLOCK_SIZE: tl.constexpr, log_target:
tl.constexpr=False):
row_id = tl.program_id(0).to(tl.int64)
target_ptr += row_id * target_stride
grad_output_ptr += row_id * grad_output_stride
base_offsets = tl.arange(0, BLOCK_SIZE)
mask = base_offsets < num_classes
for i in range(0, num_classes, BLOCK_SIZE):
offsets = i + base_offsets
mask = offsets < num_classes
target_val = tl.load(target_ptr + offsets, mask=mask, other=0.0)
if not log_target:
grad = target_val * -1
else:
grad = -tl.exp(target_val)
tl.store(grad_output_ptr + offsets, grad, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/kl.py |
16cfa389-a9f5-4b12-9d71-271ca2751f42 | prefix_prefill.py | IBM/vllm | vllm/attention/ops/prefix_prefill.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _fwd_kernel_alibi(Q, K, V, K_cache, V_cache, B_Loc, sm_scale, k_scale,
v_scale, B_Start_Loc, B_Seqlen, B_Ctxlen, Alibi_slopes, block_size, x,
Out, stride_b_loc_b, stride_b_loc_s, stride_qbs, stride_qh, stride_qd,
stride_kbs, stride_kh, stride_kd, stride_vbs, stride_vh, stride_vd,
stride_obs, stride_oh, stride_od, stride_k_cache_bs, stride_k_cache_h,
stride_k_cache_d, stride_k_cache_bl, stride_k_cache_x,
stride_v_cache_bs, stride_v_cache_h, stride_v_cache_d,
stride_v_cache_bl, num_queries_per_kv: int, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_DMODEL_PADDED: tl.constexpr, BLOCK_N:
tl.constexpr):
cur_batch = tl.program_id(0)
cur_head = tl.program_id(1)
start_m = tl.program_id(2)
cur_kv_head = cur_head // num_queries_per_kv
cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch)
cur_batch_seq_len = tl.load(B_Seqlen + cur_batch)
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
block_start_loc = BLOCK_M * start_m
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL_PADDED)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_q = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_qbs + cur_head * stride_qh + offs_d[None, :] * stride_qd
dim_mask = tl.where(tl.arange(0, BLOCK_DMODEL_PADDED) < BLOCK_DMODEL, 1, 0
).to(tl.int1)
q = tl.load(Q + off_q, mask=dim_mask[None, :] & (offs_m[:, None] <
cur_batch_seq_len - cur_batch_ctx_len), other=0.0)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL_PADDED], dtype=tl.float32)
alibi_slope = tl.load(Alibi_slopes + cur_head)
alibi_start_q = tl.arange(0, BLOCK_M) + block_start_loc + cur_batch_ctx_len
alibi_start_k = 0
for start_n in range(0, cur_batch_ctx_len, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + (start_n + offs_n
) // block_size * stride_b_loc_s, mask=start_n + offs_n <
cur_batch_ctx_len, other=0)
off_k = bn[None, :
] * stride_k_cache_bs + cur_kv_head * stride_k_cache_h + offs_d[
:, None] // x * stride_k_cache_d + (start_n + offs_n[None, :]
) % block_size * stride_k_cache_bl + offs_d[:, None
] % x * stride_k_cache_x
off_v = bn[:, None
] * stride_v_cache_bs + cur_kv_head * stride_v_cache_h + offs_d[
None, :] * stride_v_cache_d + (start_n + offs_n[:, None]
) % block_size * stride_v_cache_bl
k_load = tl.load(K_cache + off_k, mask=dim_mask[:, None] & (start_n +
offs_n[None, :] < cur_batch_ctx_len), other=0.0)
if k_load.dtype.is_fp8():
k = (k_load.to(tl.float32) * k_scale).to(q.dtype)
else:
k = k_load
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk = tl.where(start_n + offs_n[None, :] < cur_batch_ctx_len, qk,
float('-inf'))
qk *= sm_scale
alibi = (tl.arange(0, BLOCK_N)[None, :] + alibi_start_k -
alibi_start_q[:, None]) * alibi_slope
alibi = tl.where((alibi <= 0) & (alibi_start_q[:, None] <
cur_batch_seq_len), alibi, float('-inf'))
qk += alibi
alibi_start_k += BLOCK_N
m_ij = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, m_ij)
p = tl.math.exp(qk - m_i_new[:, None])
l_ij = tl.sum(p, 1)
alpha = tl.math.exp(m_i - m_i_new)
l_i_new = alpha * l_i + l_ij
acc_scale = alpha
acc = acc * acc_scale[:, None]
v_load = tl.load(V_cache + off_v, mask=dim_mask[None, :] & (start_n +
offs_n[:, None] < cur_batch_ctx_len), other=0.0)
if v_load.dtype.is_fp8():
v = (v_load.to(tl.float32) * v_scale).to(q.dtype)
else:
v = v_load
p = p.to(v.dtype)
acc += tl.dot(p, v, allow_tf32=False)
l_i = l_i_new
m_i = m_i_new
off_k = offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + offs_d[
:, None] * stride_kd
off_v = offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + offs_d[
None, :] * stride_vd
k_ptrs = K + off_k
v_ptrs = V + off_v
block_mask = tl.where(block_start_loc < cur_batch_seq_len -
cur_batch_ctx_len, 1, 0)
alibi_slope = tl.load(Alibi_slopes + cur_head)
alibi_start_q = tl.arange(0, BLOCK_M) + block_start_loc + cur_batch_ctx_len
alibi_start_k = cur_batch_ctx_len
for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(k_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_kbs, mask=dim_mask[:, None] & (start_n + offs_n[None, :] <
cur_batch_seq_len - cur_batch_ctx_len), other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, allow_tf32=False)
qk *= sm_scale
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
alibi = (tl.arange(0, BLOCK_N)[None, :] + alibi_start_k -
alibi_start_q[:, None]) * alibi_slope
alibi = tl.where((alibi <= 0) & (alibi_start_q[:, None] <
cur_batch_seq_len), alibi, float('-inf'))
qk += alibi
alibi_start_k += BLOCK_N
m_ij = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, m_ij)
p = tl.math.exp(qk - m_i_new[:, None])
l_ij = tl.sum(p, 1)
alpha = tl.math.exp(m_i - m_i_new)
l_i_new = alpha * l_i + l_ij
acc_scale = alpha
acc = acc * acc_scale[:, None]
v = tl.load(v_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_vbs, mask=dim_mask[None, :] & (start_n + offs_n[:, None] <
cur_batch_seq_len - cur_batch_ctx_len), other=0.0)
p = p.to(v.dtype)
acc += tl.dot(p, v, allow_tf32=False)
l_i = l_i_new
m_i = m_i_new
acc = acc / l_i[:, None]
off_o = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_obs + cur_head * stride_oh + offs_d[None, :] * stride_od
out_ptrs = Out + off_o
tl.store(out_ptrs, acc, mask=dim_mask[None, :] & (offs_m[:, None] <
cur_batch_seq_len - cur_batch_ctx_len))
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/prefix_prefill.py |
87c6037c-67a3-485d-86e0-1ffabb737c08 | test_autodiff.py | srush/triton-autodiff | tests/test_autodiff.py | f9d1a04d048e3252bfd222646db7175ad60a3c7c | 0 | @triton.jit
def dcomp2dx(x, b_return):
_return2 = tl.expand_dims(x, 1)
bx = zeroslike(x)
b_return2 = zeroslike(_return2)
_b_return2 = triton_unbroadcast(b_return * x, _return2.shape)
return bx
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py |
1ddbd0b4-73df-450b-8257-024ca9bc7937 | lion.py | Kitsunetic/kitsu | kitsu/nn/optim/lion.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE': 128}, num_warps=4),
triton.Config({'BLOCK_SIZE': 1024}, num_warps=8)], key=['n_elements'],
restore_value=['p_ptr', 'exp_avg_ptr'])
@triton.jit
def update_fn_kernel(p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2,
n_elements, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
p = tl.load(offset_p_ptr, mask=mask)
grad = tl.load(offset_grad_ptr, mask=mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask=mask)
p = p * (1 - lr * wd)
diff = exp_avg - grad
update = diff * beta1 + grad
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
exp_avg = diff * beta2 + grad
tl.store(offset_p_ptr, p, mask=mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/optim/lion.py |
06298522-b30b-4da3-8f8f-fcaa25129441 | softmax_online_v2_evict.py | iclementine/optimize_softmax | softmax_online_v2_evict.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr
):
pid_m = tl.program_id(0)
m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype.
element_ty)
z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty)
for start_n in range(0, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
final_m = tl.max(m, 0)
z = tl.sum(tl.exp(m - final_m) * z)
m = final_m
for start_n in range(0, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf'),
eviction_policy='evict_first').to(output_ptr.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound",
"Low Latency"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_evict.py |
c7a39ace-d71e-493b-bba3-fd7c6be6a34f | matrix_multiplication.py | gmgu/study-triton | 4_2d_grid_and_matmul/matrix_multiplication.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def triton_mm(x_ptr, y_ptr, out_ptr, n: tl.constexpr, m: tl.constexpr, p:
tl.constexpr, BLOCK_SIZE: tl.constexpr):
pid0 = tl.program_id(axis=0)
pid1 = tl.program_id(axis=1)
x_row = pid0 * BLOCK_SIZE * m + tl.arange(0, m)
x_col = tl.arange(0, BLOCK_SIZE) * m
x_offset = x_row[None, :] + x_col[:, None]
x_mask = tl.core.full((1, m), True, dtype=tl.int1) and (pid0 *
BLOCK_SIZE + tl.arange(0, BLOCK_SIZE))[:, None] < n
y_row = pid1 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
y_col = tl.arange(0, m) * p
y_offset = y_row[None, :] + y_col[:, None]
y_mask = (pid1 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE))[None, :
] < p and tl.core.full((m, 1), True, dtype=tl.int1)
x = tl.load(x_ptr + x_offset, mask=x_mask, other=0.0)
y = tl.load(y_ptr + y_offset, mask=y_mask, other=0.0)
out = tl.dot(x, y, allow_tf32=False)
out_row = pid0 * BLOCK_SIZE * p + pid1 * BLOCK_SIZE + tl.arange(0,
BLOCK_SIZE)
out_col = tl.arange(0, BLOCK_SIZE) * p
out_offset = out_row[None, :] + out_col[:, None]
out_mask = (pid1 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE))[None, :
] < p and (pid0 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE))[:, None] < n
tl.store(out_ptr + out_offset, out, mask=out_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/4_2d_grid_and_matmul/matrix_multiplication.py |
5a578960-7583-4a26-9bb1-7f85522153a1 | quantize.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/quantize.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _floor_log2(x):
"""Helper function to efficiently compute floor(log2(x))
Args:
x (Tensor): FP32 Input tensor to operate on.
Returns:
Tensor: Floor of log2(x).
"""
FP32_EXP_MASK: tl.constexpr = 2139095040
FP32_EXP_OFFSET: tl.constexpr = 23
FP32_EXP_BIAS: tl.constexpr = 127
x = x.to(tl.int32, bitcast=True) & FP32_EXP_MASK
x = x >> FP32_EXP_OFFSET
return (x - FP32_EXP_BIAS).to(tl.float32)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/quantize.py |
88abece4-2613-439d-ab06-1e7c8c46fe8d | y_10.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_10.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def tenth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size:
tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr,
col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST001 = 1.75869118663323
CONST002 = -1021.9231747532
CONST004 = 4.58257569495584
CONST005 = 6.632439808434
CONST006 = 4.82870805793735
CONST007 = 4.9743298563255
CONST008 = 1545.18657853995
CONST009 = 10.5521471197994
CONST010 = 12.1657520803952
CONST011 = 13.264879616868
CONST013 = 15.7883647328499
CONST014 = 15.7302121789667
CONST015 = 16.4144510752435
CONST016 = 12.8765548211663
CONST017 = 19.3148322317494
CONST018 = 16.7271353825295
CONST019 = 22.862985426232
CONST020 = 535.268332240943
CONST021 = 23.213539329519
CONST022 = 24.6216766128653
CONST023 = 27.2034486491732
CONST024 = 541.428124558099
CONST025 = -994.666978169547
CONST026 = 33.9852909359329
CONST027 = 33.9852909359329
CONST028 = 35.5238206489124
CONST029 = -984.86706451461
CONST030 = -4.82870805793735
CONST031 = 1070.53666448189
CONST032 = -463.555973561985
CONST034 = 53.2857309733686
CONST035 = 53.2857309733686
CONST036 = 56.3871618715269
CONST037 = 56.3871618715269
CONST039 = -1989.33395633909
CONST041 = -450.224943778107
CONST042 = 66.9085415301178
CONST043 = 69.640617988557
CONST044 = 69.640617988557
CONST045 = -437.967074894228
CONST046 = 77.2593289269976
CONST047 = 78.6510608948335
CONST049 = -1969.73412902922
CONST050 = 77.3468749368712
CONST051 = 1624.2843736743
CONST054 = 94.7301883970997
CONST056 = 100.362812295177
CONST057 = -412.04975427732
CONST058 = 101.517773354644
CONST059 = -5.63871618715269
CONST060 = -406.071093418574
CONST061 = 109.491768723557
CONST062 = -393.946825805844
CONST063 = -902.194589944431
CONST065 = -386.296644634988
CONST066 = -386.296644634988
CONST070 = 4.9743298563255
CONST071 = 150.074981259369
CONST074 = 685.526905959165
CONST075 = -337.668707833581
CONST076 = -337.668707833581
CONST077 = 176.178376404427
CONST078 = 176.592751833137
CONST079 = 185.708314636152
CONST080 = -326.441383790078
CONST081 = -1.60956935264578
CONST082 = -1.97354559160624
CONST083 = 196.973412902922
CONST085 = -824.099508554641
CONST087 = -1.97354559160624
CONST088 = -305.867618423396
CONST089 = -305.867618423396
CONST090 = 721.755671955545
CONST091 = -305.867618423396
CONST092 = -300.731529981477
CONST093 = -300.731529981477
CONST094 = -1.75869118663323
CONST095 = -290.050781013267
CONST097 = 225.548647486108
CONST098 = 225.548647486108
CONST099 = -284.190565191299
CONST101 = -278.562471954228
CONST102 = -278.562471954228
CONST103 = -787.893651611688
CONST104 = -787.893651611688
CONST105 = 772.593289269975
CONST106 = 787.893651611688
CONST107 = 787.893651611688
CONST108 = 278.562471954228
CONST109 = -742.833258544608
CONST110 = -1.6581099521085
CONST112 = -1761.78376404427
CONST113 = -223.028471767059
CONST114 = -734.07656835178
CONST116 = -220.222970505534
CONST117 = 1321.3378230332
CONST118 = 1321.3378230332
CONST119 = -203.035546709287
CONST120 = -1.6581099521085
CONST121 = -196.973412902922
CONST122 = -196.973412902922
CONST123 = -696.40617988557
CONST125 = 338.322971229162
CONST126 = -1181.84047741753
CONST127 = -669.085415301178
CONST128 = -669.085415301178
CONST129 = -154.518657853995
CONST130 = -154.518657853995
CONST131 = 360.877835977772
CONST132 = -150.074981259369
CONST133 = -2707.14062279049
CONST134 = -146.815313670356
CONST135 = 880.891882022136
CONST136 = 1392.81235977114
CONST137 = 1392.81235977114
CONST138 = -131.315608601948
CONST139 = -131.315608601948
CONST141 = -125.841697431734
CONST142 = -125.841697431734
CONST143 = -122.415518921279
CONST145 = 406.071093418574
CONST146 = -103.107953136506
CONST147 = -103.107953136506
CONST148 = -101.517773354644
CONST149 = -98.486706451461
CONST150 = 412.04975427732
CONST151 = -94.7301883970997
CONST152 = -1114.24988781691
CONST153 = -88.2963759165686
CONST154 = -1624.2843736743
CONST155 = -82.8889148474622
CONST156 = -82.8889148474622
CONST158 = -590.920238708766
CONST159 = -77.3468749368713
CONST160 = -77.2593289269975
CONST161 = 2486.66744542387
CONST162 = -2626.31217203896
CONST165 = -571.272421632637
CONST166 = -56.2781179722634
CONST167 = -49.2433532257305
CONST168 = -49.2433532257305
CONST169 = 984.86706451461
CONST170 = -541.428124558099
CONST171 = -24.6216766128653
CONST172 = -22.862985426232
CONST173 = -16.4144510752435
CONST174 = -15.7883647328499
CONST175 = -14.0695294930659
CONST176 = -13.264879616868
CONST177 = -11.2774323743054
CONST178 = -14.5025390506634
CONST179 = -6.632439808434
CONST180 = -5.63871618715269
CONST181 = 1532.8847621298
CONST182 = -3.21913870529156
CONST183 = -2.72034486491732
CONST184 = -1.12774323743054
VAR05 = x * x * x * x * x
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR00 = VAR05 * VAR05
VAR01 = VAR05 * VAR06
VAR02 = VAR06 * VAR06
VAR03 = VAR06 * VAR07
VAR04 = VAR07 * VAR07
VAR14 = y * y * y * y * y
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR09 = VAR14 * VAR14
VAR10 = VAR14 * VAR15
VAR11 = VAR15 * VAR15
VAR12 = VAR15 * VAR16
VAR13 = VAR16 * VAR16
VAR23 = z * z * z * z * z
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
VAR18 = VAR23 * VAR23
VAR19 = VAR23 * VAR24
VAR20 = VAR24 * VAR24
VAR21 = VAR24 * VAR25
VAR22 = VAR25 * VAR25
Y00 = (CONST023 * VAR01 * z + CONST023 * VAR19 * x + CONST074 * VAR05 *
VAR23 + CONST080 * VAR03 * VAR25 + CONST080 * VAR07 * VAR21)
Y01 = y * (CONST002 * VAR07 * VAR22 + CONST010 * VAR01 + CONST045 *
VAR03 * VAR26 + CONST061 * VAR20 * x + CONST181 * VAR05 * VAR24)
Y02 = (CONST013 * VAR01 * z + CONST054 * VAR07 * VAR21 + CONST151 *
VAR03 * VAR25 + CONST174 * VAR19 * x + VAR17 * (-CONST039 * VAR05 *
VAR25 + CONST039 * VAR07 * VAR23 + CONST099 * VAR03 * z - CONST099 *
VAR21 * x))
Y03 = VAR16 * (CONST024 * VAR22 * x + CONST051 * VAR05 * VAR26 +
CONST133 * VAR07 * VAR24 + CONST159 * VAR03) + y * (CONST095 *
VAR03 * VAR26 - CONST119 * VAR05 * VAR24 + CONST145 * VAR07 * VAR22 +
CONST148 * VAR20 * x - CONST178 * VAR01)
Y04 = CONST009 * VAR01 * z + VAR03 * (CONST076 * VAR17 * z + CONST175 *
VAR25) + VAR05 * (CONST106 * VAR15 * z + CONST107 * VAR17 * VAR25 +
CONST167 * VAR23) + VAR07 * (CONST106 * VAR17 * VAR23 + CONST162 *
VAR15 * VAR25 + CONST175 * VAR21) + x * (CONST009 * VAR19 +
CONST075 * VAR17 * VAR21 + CONST106 * VAR15 * VAR23)
Y05 = VAR14 * (CONST077 * VAR05 + CONST112 * VAR07 * VAR26 + CONST135 *
VAR24 * x) + VAR16 * (-CONST114 * VAR07 * VAR24 + CONST114 * VAR22 *
x + CONST117 * VAR05 * VAR26 + CONST134 * VAR03) + y * (CONST014 *
VAR01 + CONST047 * VAR20 * x + CONST116 * VAR05 * VAR24 + CONST141 *
VAR03 * VAR26)
Y06 = CONST005 * VAR01 * z + VAR03 * (CONST011 * VAR25 + CONST102 *
VAR17 * z) + VAR05 * (CONST101 * VAR17 * VAR25 - CONST152 * VAR15 * z
) + VAR07 * (CONST108 * VAR17 * VAR23 + CONST109 * VAR13 * z +
CONST176 * VAR21) + x * (CONST108 * VAR17 * VAR21 - CONST109 *
VAR13 * VAR25 + CONST152 * VAR15 * VAR23 + CONST179 * VAR19)
Y07 = VAR12 * (-CONST041 * VAR26 * x + CONST132 * VAR07) + VAR14 * (-
CONST062 * VAR05 + CONST103 * VAR07 * VAR26 + CONST126 * VAR24 * x
) + VAR16 * (CONST083 * VAR05 * VAR26 + CONST121 * VAR03 - CONST158 *
VAR22 * x + CONST169 * VAR07 * VAR24) + y * (CONST015 * VAR01 +
CONST138 * VAR07 * VAR22 + CONST149 * VAR05 * VAR24 + CONST168 *
VAR20 * x)
Y08 = -CONST182 * VAR01 * z + VAR03 * (CONST016 * VAR25 + CONST129 *
VAR17 * z) + VAR05 * (CONST017 * VAR23 + CONST032 * VAR17 * VAR25 +
CONST105 * VAR15 * z) + VAR07 * (CONST008 * VAR15 * VAR25 +
CONST016 * VAR21 + CONST032 * VAR17 * VAR23 + CONST085 * VAR13 * z
) + x * (CONST078 * VAR11 * z + CONST085 * VAR13 * VAR25 + CONST105 *
VAR15 * VAR23 + CONST129 * VAR17 * VAR21 - CONST182 * VAR19)
Y09 = CONST018 * VAR01 * y + VAR03 * (CONST042 * VAR26 * y + CONST113 *
VAR16) + VAR05 * (CONST020 * VAR14 + CONST056 * VAR24 * y +
CONST128 * VAR16 * VAR26) + VAR07 * (CONST031 * VAR14 * VAR26 +
CONST042 * VAR22 * y + CONST088 * VAR12 + CONST127 * VAR16 * VAR24
) + x * (CONST018 * VAR20 * y + CONST020 * VAR14 * VAR24 + CONST026 *
VAR10 + CONST088 * VAR12 * VAR26 + CONST113 * VAR16 * VAR22)
Y10 = (CONST004 * VAR09 + CONST037 * VAR17 * VAR20 + CONST093 * VAR15 *
VAR22 + CONST131 * VAR13 * VAR24 + CONST147 * VAR11 * VAR26 +
CONST184 * VAR00 + CONST184 * VAR18 + VAR02 * (CONST036 * VAR17 +
CONST059 * VAR26) + VAR04 * (CONST092 * VAR15 + CONST098 * VAR17 *
VAR26 + CONST177 * VAR24) + VAR06 * (CONST063 * VAR15 * VAR26 +
CONST125 * VAR17 * VAR24 + CONST131 * VAR13 + CONST177 * VAR22) +
VAR08 * (CONST063 * VAR15 * VAR24 + CONST090 * VAR13 * VAR26 +
CONST097 * VAR17 * VAR22 + CONST146 * VAR11 + CONST180 * VAR20))
Y11 = CONST018 * VAR19 * y + VAR21 * (CONST042 * VAR08 * y + CONST113 *
VAR16) + VAR23 * (CONST020 * VAR14 + CONST056 * VAR06 * y +
CONST128 * VAR08 * VAR16) + VAR25 * (CONST031 * VAR08 * VAR14 +
CONST042 * VAR04 * y + CONST091 * VAR12 + CONST127 * VAR06 * VAR16
) + z * (CONST018 * VAR02 * y + CONST020 * VAR06 * VAR14 + CONST027 *
VAR10 + CONST089 * VAR08 * VAR12 + CONST113 * VAR04 * VAR16)
Y12 = (CONST057 * VAR13 * VAR24 - CONST066 * VAR15 * VAR22 + CONST081 *
VAR00 - CONST081 * VAR18 - CONST153 * VAR11 * VAR26 + CONST160 *
VAR17 * VAR20 + VAR02 * (CONST030 * VAR26 + CONST046 * VAR17) +
VAR04 * (CONST066 * VAR15 - CONST129 * VAR17 * VAR26 + CONST182 *
VAR24) + VAR06 * (CONST065 * VAR15 * VAR26 + CONST150 * VAR13 -
CONST182 * VAR22) + VAR08 * (CONST006 * VAR20 - CONST066 * VAR15 *
VAR24 + CONST130 * VAR17 * VAR22 + CONST153 * VAR11))
Y13 = VAR12 * (CONST041 * VAR08 * z + CONST071 * VAR25) + VAR14 * (
CONST062 * VAR23 + CONST107 * VAR08 * VAR25 - CONST126 * VAR06 * z
) + VAR16 * (CONST029 * VAR06 * VAR25 - CONST121 * VAR21 + CONST122 *
VAR08 * VAR23 + CONST158 * VAR04 * z) + y * (-CONST138 * VAR04 *
VAR25 - CONST149 * VAR06 * VAR23 - CONST168 * VAR02 * z + CONST173 *
VAR19)
Y14 = (CONST044 * VAR17 * VAR20 + CONST079 * VAR13 * VAR24 + CONST101 *
VAR15 * VAR22 + CONST110 * VAR00 + CONST120 * VAR18 + VAR02 * (
CONST043 * VAR17 + CONST070 * VAR26) + VAR04 * (CONST021 * VAR24 +
CONST101 * VAR15 + CONST101 * VAR17 * VAR26) + VAR06 * (CONST021 *
VAR22 + CONST079 * VAR13 + CONST123 * VAR17 * VAR24 + CONST137 *
VAR15 * VAR26) + VAR08 * (CONST007 * VAR20 + CONST101 * VAR17 *
VAR22 + CONST136 * VAR15 * VAR24 + CONST152 * VAR13 * VAR26))
Y15 = VAR14 * (CONST077 * VAR23 + CONST112 * VAR08 * VAR25 + CONST135 *
VAR06 * z) + VAR16 * (CONST114 * VAR04 * z - CONST114 * VAR06 *
VAR25 + CONST118 * VAR08 * VAR23 + CONST134 * VAR21) + y * (
CONST014 * VAR19 + CONST047 * VAR02 * z + CONST116 * VAR06 * VAR23 +
CONST142 * VAR08 * VAR21)
Y16 = (CONST001 * VAR18 + CONST094 * VAR00 - CONST139 * VAR15 * VAR22 +
CONST166 * VAR17 * VAR20 + VAR02 * (CONST019 * VAR26 - CONST166 *
VAR17) + VAR04 * (CONST022 * VAR24 + CONST104 * VAR17 * VAR26 +
CONST139 * VAR15) + VAR06 * (-CONST049 * VAR15 * VAR26 + CONST171 *
VAR22) + VAR08 * (CONST049 * VAR15 * VAR24 + CONST106 * VAR17 *
VAR22 + CONST172 * VAR20))
Y17 = VAR16 * (CONST050 * VAR21 - CONST133 * VAR06 * VAR25 + CONST154 *
VAR08 * VAR23 + CONST170 * VAR04 * z) + y * (CONST058 * VAR02 * z +
CONST060 * VAR04 * VAR25 - CONST095 * VAR08 * VAR21 + CONST119 *
VAR06 * VAR23 + CONST178 * VAR19)
Y18 = (CONST034 * VAR02 * VAR26 + CONST035 * VAR08 * VAR20 + CONST082 *
VAR00 + CONST087 * VAR18 + CONST155 * VAR04 * VAR24 + CONST156 *
VAR06 * VAR22 + VAR17 * (CONST025 * VAR04 * VAR26 + CONST025 *
VAR08 * VAR22 + CONST028 * VAR02 + CONST028 * VAR20 + CONST161 *
VAR06 * VAR24))
Y19 = y * (CONST002 * VAR04 * VAR25 + CONST010 * VAR19 + CONST045 *
VAR08 * VAR21 + CONST061 * VAR02 * z + CONST181 * VAR06 * VAR23)
Y20 = (-CONST143 * VAR02 * VAR26 + CONST143 * VAR08 * VAR20 + CONST165 *
VAR04 * VAR24 - CONST165 * VAR06 * VAR22 + CONST183 * VAR00 -
CONST183 * VAR18)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
tl.store(output_ptr + output_row_offset + 9, Y09, mask=
output_row_offset + 9 < output_numel)
tl.store(output_ptr + output_row_offset + 10, Y10, mask=
output_row_offset + 10 < output_numel)
tl.store(output_ptr + output_row_offset + 11, Y11, mask=
output_row_offset + 11 < output_numel)
tl.store(output_ptr + output_row_offset + 12, Y12, mask=
output_row_offset + 12 < output_numel)
tl.store(output_ptr + output_row_offset + 13, Y13, mask=
output_row_offset + 13 < output_numel)
tl.store(output_ptr + output_row_offset + 14, Y14, mask=
output_row_offset + 14 < output_numel)
tl.store(output_ptr + output_row_offset + 15, Y15, mask=
output_row_offset + 15 < output_numel)
tl.store(output_ptr + output_row_offset + 16, Y16, mask=
output_row_offset + 16 < output_numel)
tl.store(output_ptr + output_row_offset + 17, Y17, mask=
output_row_offset + 17 < output_numel)
tl.store(output_ptr + output_row_offset + 18, Y18, mask=
output_row_offset + 18 < output_numel)
tl.store(output_ptr + output_row_offset + 19, Y19, mask=
output_row_offset + 19 < output_numel)
tl.store(output_ptr + output_row_offset + 20, Y20, mask=
output_row_offset + 20 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_10.py |
4a890ec8-5225-43c0-953c-6992a9aa4780 | fused_bitlinear.py | sustcsonglin/flash-linear-attention | fla/modules/fused_bitlinear.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'RECOMPUTE_OUTPUT': lambda args: args['Y'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_DRESIDUAL', 'STORE_DRESIDUAL',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_bwd_kernel(X, W, B, Y, DY, DX, DW, DB, DRESIDUAL,
DRESIDUAL_IN, Mean, Rstd, stride_x_row, stride_y_row, stride_dy_row,
stride_dx_row, stride_dres_row, stride_dres_in_row, M, N, eps,
rows_per_program, IS_RMS_NORM: tl.constexpr, BLOCK_N: tl.constexpr,
HAS_DRESIDUAL: tl.constexpr, STORE_DRESIDUAL: tl.constexpr, HAS_WEIGHT:
tl.constexpr, HAS_BIAS: tl.constexpr, RECOMPUTE_OUTPUT: tl.constexpr):
row_block_id = tl.program_id(0)
row_start = row_block_id * rows_per_program
cols = tl.arange(0, BLOCK_N)
mask = cols < N
X += row_start * stride_x_row
if HAS_DRESIDUAL:
DRESIDUAL += row_start * stride_dres_row
if STORE_DRESIDUAL:
DRESIDUAL_IN += row_start * stride_dres_in_row
DY += row_start * stride_dy_row
DX += row_start * stride_dx_row
if RECOMPUTE_OUTPUT:
Y += row_start * stride_y_row
if HAS_WEIGHT:
w = tl.load(W + cols, mask=mask).to(tl.float32)
dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
if RECOMPUTE_OUTPUT and HAS_BIAS:
b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
if HAS_BIAS:
db = tl.zeros((BLOCK_N,), dtype=tl.float32)
row_end = min((row_block_id + 1) * rows_per_program, M)
for row in range(row_start, row_end):
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
if not IS_RMS_NORM:
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
xhat = tl.where(mask, xhat, 0.0)
if RECOMPUTE_OUTPUT:
y = xhat * w if HAS_WEIGHT else xhat
if HAS_BIAS:
y = y + b
scale = 127.0 / tl.maximum(tl.max(tl.abs(y), 0), 1e-05)
y = tl.math.round(y * scale)
y = tl.maximum(tl.minimum(y, 127), -128) / scale
tl.store(Y + cols, y, mask=mask)
wdy = dy
if HAS_WEIGHT:
wdy = dy * w
dw += dy * xhat
if HAS_BIAS:
db += dy
if not IS_RMS_NORM:
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
else:
c1 = tl.sum(xhat * wdy, axis=0) / N
dx = (wdy - xhat * c1) * rstd
if HAS_DRESIDUAL:
dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
dx += dres
if STORE_DRESIDUAL:
tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
tl.store(DX + cols, dx, mask=mask)
X += stride_x_row
if HAS_DRESIDUAL:
DRESIDUAL += stride_dres_row
if STORE_DRESIDUAL:
DRESIDUAL_IN += stride_dres_in_row
if RECOMPUTE_OUTPUT:
Y += stride_y_row
DY += stride_dy_row
DX += stride_dx_row
if HAS_WEIGHT:
tl.store(DW + row_block_id * N + cols, dw, mask=mask)
if HAS_BIAS:
tl.store(DB + row_block_id * N + cols, db, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_bitlinear.py |
292f0e36-f8a0-4131-a7e6-00d1bf2210e9 | lightseq_async_attn_varlen.py | EvolvingLMMs-Lab/LongVA | easy_context/dist_flash_attn/lightseq_async_attn_varlen.py | 76b7c33946936361eeb5a18b2c9fcc5fe63e9434 | 0 | @triton.jit
def _rescale_kernel(peer_m, m, peer_l, l, peer_o, o, L, stride_oz,
stride_oh, stride_om, stride_on, Z, H, N_CTX, seqlen_q_rounded,
seqlen_peer_q_rounded, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.
constexpr, BLOCK_N: tl.constexpr, LAST_STEP: tl.constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
o_offset = off_hz * stride_oh
peer_o_block_ptr = tl.make_block_ptr(base=peer_o + o_offset, shape=(
N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(
start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(
1, 0))
o_block_ptr = tl.make_block_ptr(base=o + o_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
peer_m_ptrs = peer_m + off_hz * seqlen_peer_q_rounded + offs_m
m_ptrs = m + off_hz * seqlen_q_rounded + offs_m
peer_l_ptrs = peer_l + off_hz * seqlen_peer_q_rounded + offs_m
l_ptrs = l + off_hz * seqlen_q_rounded + offs_m
peer_m_i = tl.load(peer_m_ptrs)
peer_m_i = peer_m_i.to(tl.float32)
m_i = tl.load(m_ptrs)
m_i = m_i.to(tl.float32)
peer_l_i = tl.load(peer_l_ptrs)
peer_l_i = peer_l_i.to(tl.float32)
l_i = tl.load(l_ptrs)
l_i = l_i.to(tl.float32)
peer_acc = tl.load(peer_o_block_ptr)
peer_acc = peer_acc.to(tl.float32)
acc = tl.load(o_block_ptr)
acc = acc.to(tl.float32)
lo = 0
hi = N_CTX
m_i_sync = tl.maximum(m_i, peer_m_i)
alpha = tl.math.exp2(m_i - m_i_sync)
peer_alpha = tl.math.exp2(peer_m_i - m_i_sync)
acc_scale = l_i * 0 + alpha
peer_acc_scale = peer_l_i * 0 + peer_alpha
acc *= acc_scale[:, None]
peer_acc *= peer_acc_scale[:, None]
acc += peer_acc
l_i = l_i * acc_scale + peer_l_i * peer_acc_scale
tl.store(m_ptrs, m_i_sync)
tl.store(l_ptrs, l_i)
if LAST_STEP:
acc = acc / l_i[:, None]
L_ptrs = L + off_hz * N_CTX + offs_m
tl.store(L_ptrs, m_i_sync / 1.44269504 + tl.math.log(l_i))
tl.store(o_block_ptr, acc.to(tl.bfloat16), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"bf16"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/EvolvingLMMs-Lab/LongVA/blob/76b7c33946936361eeb5a18b2c9fcc5fe63e9434/easy_context/dist_flash_attn/lightseq_async_attn_varlen.py |
36831d21-1e3f-457a-b793-a0041c4d3e90 | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def backward_scan(gates, tokens, outputs, SEQUENCE_LENGTH: tl.constexpr):
sequence_id = tl.num_programs(axis=1) * tl.program_id(axis=0
) + tl.program_id(axis=1)
forward_strides = tl.arange(0, SEQUENCE_LENGTH
) + sequence_id * SEQUENCE_LENGTH
reverse_strides = tl.num_programs(axis=0) * tl.num_programs(axis=1
) * SEQUENCE_LENGTH - 1 - forward_strides
tokens_ = tl.load(tokens + reverse_strides)
gates_ = tl.load(gates + reverse_strides)
tuples = pack64(tokens_, gates_)
output_tuples_ = tl.associative_scan(tuples, axis=0, combine_fn=
first_order_op)
output_tokens_, output_gates_ = unpack64(output_tuples_)
tl.store(outputs + reverse_strides, output_tokens_)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
d0a50eec-53be-4662-90a6-08833c06ea42 | modulation.py | ai-compiler-study/triton-kernels | triton_kernels/ops/modulation.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def triton_modulation_gate_proj(img_ptr, mod_ptr, proj_ptr, output_ptr,
batch_size, head_size, modulation_size, XBLOCK: tl.constexpr):
pid = tl.program_id(0)
xoffset = pid * XBLOCK + tl.arange(0, XBLOCK)[:]
batch_idx = xoffset // batch_size
head_dim_idx = xoffset % head_size
modulation_offset = head_dim_idx + modulation_size * batch_idx
img = tl.load(img_ptr + xoffset, None).to(tl.float32)
mod_gate = tl.load(mod_ptr + (modulation_offset + head_size * 2), None,
eviction_policy='evict_last').to(tl.float32)
proj = tl.load(proj_ptr + xoffset, None).to(tl.float32)
output = img + mod_gate * proj
tl.store(output_ptr + xoffset, output, None)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/modulation.py |
377ec787-3a8e-4549-bb1d-3e1b59b3efe5 | test_func.py | makslevental/triton-pp | tests/test_func.py | e2b3e2a35d96007fa1ae129432cf8e99f44588a1 | 0 | @triton.jit
def kernel_0123(arg0: (+T.float32), arg1: (+T.float32), arg2: (+T.float32),
arg3: T.int32):
v0 = tl.get_program_id(axis='x')
c32 = arith.constant(64, T.int32)
v1 = arith.muli(v0, c32)
v2 = arange(0, 64)
v3 = splat(v1, (64,))
v4 = arith.addi(v3, v2)
v5 = splat(arg3, (64,))
v6 = arith.cmpi('slt', v4, v5)
v7 = splat(arg0, (64,))
v8 = addptr(v7, v4)
v9 = load(v8, v6, cache='none', evict='normal', is_volatile=False)
v10 = splat(arg1, (64,))
v11 = addptr(v10, v4)
v12 = load(v11, v6, cache='none', evict='normal', is_volatile=False)
v13 = arith.addf(v9, v12)
v14 = splat(arg2, (64,))
v15 = addptr(v14, v4)
store(v15, v13, v6)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/makslevental/triton-pp/blob/e2b3e2a35d96007fa1ae129432cf8e99f44588a1/tests/test_func.py |
41df42df-c59d-449e-99bd-b4e436d8152f | fused_bitlinear.py | sustcsonglin/flash-linear-attention | fla/modules/fused_bitlinear.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_RESIDUAL', 'STORE_RESIDUAL_OUT',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_fwd_kernel_quant(X, Y, W, B, RESIDUAL, RESIDUAL_OUT, Mean,
Rstd, stride_x_row, stride_y_row, stride_res_row, stride_res_out_row, N,
eps, IS_RMS_NORM: tl.constexpr, BLOCK_N: tl.constexpr, HAS_RESIDUAL: tl
.constexpr, STORE_RESIDUAL_OUT: tl.constexpr, HAS_WEIGHT: tl.constexpr,
HAS_BIAS: tl.constexpr):
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_y_row
if HAS_RESIDUAL:
RESIDUAL += row * stride_res_row
if STORE_RESIDUAL_OUT:
RESIDUAL_OUT += row * stride_res_out_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
if HAS_RESIDUAL:
residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl
.float32)
x += residual
if STORE_RESIDUAL_OUT:
tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
if not IS_RMS_NORM:
mean = tl.sum(x, axis=0) / N
tl.store(Mean + row, mean)
xbar = tl.where(cols < N, x - mean, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
else:
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
tl.store(Rstd + row, rstd)
mask = cols < N
if HAS_WEIGHT:
w = tl.load(W + cols, mask=mask).to(tl.float32)
if HAS_BIAS:
b = tl.load(B + cols, mask=mask).to(tl.float32)
x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
y = x_hat * w if HAS_WEIGHT else x_hat
if HAS_BIAS:
y = y + b
scale = 127.0 / tl.maximum(tl.max(tl.abs(y), 0), 1e-05)
y = tl.math.round(y * scale)
y = tl.maximum(tl.minimum(y, 127), -128) / scale
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32",
"bf16",
"int8"
],
"Functionality": [
"Normalization",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access",
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Batch-Oriented"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_bitlinear.py |
47e6ffc6-ded8-4623-a79b-56645cfcad62 | GEMM.py | Forkxz/TritonDeepLearningKernel | kernel/GEMM.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.autotune(configs=get_cuda_autotune_config(), key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak,
stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr, ALLOWTF32: tl.constexpr, PRECISIONMATCH: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
a_tile = tl.make_block_ptr(a_ptr, shape=(M, K), strides=(stride_am,
stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(
BLOCK_SIZE_M, BLOCK_SIZE_K), order=(0, 1))
b_tile = tl.make_block_ptr(b_ptr, shape=(K, N), strides=(stride_bk,
stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(
BLOCK_SIZE_K, BLOCK_SIZE_N), order=(0, 1))
ASM: tl.constexpr = 'cvt.rna.tf32.f32 $0, $1;'
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_tile, boundary_check=(1,), padding_option='zero')
b = tl.load(b_tile, boundary_check=(0,), padding_option='zero')
if ALLOWTF32 and PRECISIONMATCH:
a = tl.inline_asm_elementwise(ASM, '=r, r', [a], dtype=tl.
float32, is_pure=True, pack=1)
b = tl.inline_asm_elementwise(ASM, '=r, r', [b], dtype=tl.
float32, is_pure=True, pack=1)
accumulator = tl.dot(a, b, accumulator, input_precision='tf32' if
ALLOWTF32 else 'ieee')
a_tile = tl.advance(a_tile, (0, BLOCK_SIZE_K))
b_tile = tl.advance(b_tile, (BLOCK_SIZE_K, 0))
c_tile = tl.make_block_ptr(c_ptr, shape=(M, N), strides=(stride_cm,
stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
c = accumulator.to(c_tile.dtype.element_ty)
tl.store(c_tile, c, boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/GEMM.py |
94697f0e-3719-4056-9cc6-daae59cf6664 | geglu.py | Kitsunetic/kitsu | kitsu/nn/geglu.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def gelu_backward(x):
x2 = x * x
tanh_ = tanh(_kAlpha * x * (1 + 0.044715 * x2))
dx = 0.5 * (x * (1 - tanh_ * tanh_) * (0.1070322244089 * x2 +
0.797884560802865) + tanh_ + 1)
return dx
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/geglu.py |
86772575-6e51-43f3-939c-f76536b27ef0 | linear_kernels.py | BobMcDear/attorch | attorch/linear_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=[linear_forward_config(32, 32, 32, n_warps=2,
n_stages=2), linear_forward_config(64, 32, 32, n_warps=2, n_stages=5),
linear_forward_config(64, 32, 128, n_warps=4, n_stages=4),
linear_forward_config(64, 32, 256, n_warps=4, n_stages=4),
linear_forward_config(128, 32, 32, n_warps=4, n_stages=4),
linear_forward_config(128, 32, 64, n_warps=4, n_stages=4),
linear_forward_config(128, 32, 128, n_warps=4, n_stages=4),
linear_forward_config(128, 64, 256, n_warps=8, n_stages=3)], key=[
'batch_dim', 'in_feat_dim', 'out_feat_dim', 'fp16'])
@triton.heuristics({'tf32': lambda _: allow_tf32()})
@triton.jit
def linear_forward_kernel(input_pointer, weight_pointer, bias_pointer,
pre_act_pointer, output_pointer, batch_dim, in_feat_dim, out_feat_dim,
input_batch_stride, input_in_feat_stride, weight_in_feat_stride,
weight_out_feat_stride, pre_act_batch_stride, pre_act_out_feat_stride,
output_batch_stride, output_out_feat_stride, param, add_bias: tl.
constexpr, act_func: tl.constexpr, save_pre_act: tl.constexpr, fp16: tl
.constexpr, tf32: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr,
BLOCK_SIZE_IN_FEAT: tl.constexpr, BLOCK_SIZE_OUT_FEAT: tl.constexpr,
GROUP_SIZE_BATCH: tl.constexpr):
"""
Linearly transforms the input using weights, optionally adding bias
and fusing an activation function.
Args:
input_pointer: Pointer to the input to transform.
The input must be of shape [batch_dim, in_feat_dim].
weight_pointer: Pointer to the weights input is transformed by.
The weights must be of shape [in_feat_dim, out_feat_dim].
bias_pointer: Pointer to an optional additive bias vector.
The bias vector, if provided, must be of shape [out_feat_dim].
pre_act_pointer: Pointer to an optional container the pre-activation input
is written to if act_func is not None and save_pre_act is True.
The container, if provided, must be of shape [batch_dim, out_feat_dim].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, out_feat_dim].
batch_dim: Batch dimension of the input and output.
in_feat_dim: Dimensionality of the input features.
out_feat_dim: Dimensionality of the output features.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_in_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
weight_in_feat_stride: Stride necessary to jump one element along the
weights' input feature dimension.
weight_out_feat_stride: Stride necessary to jump one element along the
weights' output feature dimension.
pre_act_batch_stride: Stride necessary to jump one element along the
pre-activation input container's batch dimension.
pre_act_out_feat_stride: Stride necessary to jump one element along the
pre-activation input container's feature dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_out_feat_stride: Stride necessary to jump one element along the
output container's feature dimension.
param: Parameter in the case of parameterized activation functions.
add_bias: Flag for adding a bias vector.
act_func: Name of activation function to apply, with None for identity.
Options are 'sigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardswish', 'selu', 'mish', and 'leaky_relu'.
save_pre_act: Flag for saving the pre-activation input.
fp16: Flag for loading the input, weights, and bias in FP16.
tf32: Flag for performing matrix products in TF32.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_IN_FEAT: Block size across the input feature dimension.
BLOCK_SIZE_OUT_FEAT: Block size across the output feature dimension.
GROUP_SIZE_BATCH: Group size across the batch dimension.
"""
pid = tl.program_id(axis=0)
n_batch_pids = tl.cdiv(batch_dim, BLOCK_SIZE_BATCH)
n_out_feat_pids = tl.cdiv(out_feat_dim, BLOCK_SIZE_OUT_FEAT)
pids_per_group = GROUP_SIZE_BATCH * n_out_feat_pids
group_id = pid // pids_per_group
first_batch_pid = group_id * GROUP_SIZE_BATCH
GROUP_SIZE_BATCH = min(n_batch_pids - first_batch_pid, GROUP_SIZE_BATCH)
batch_pid = first_batch_pid + pid % GROUP_SIZE_BATCH
out_feat_pid = pid % pids_per_group // GROUP_SIZE_BATCH
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
out_feat_offset = out_feat_pid * BLOCK_SIZE_OUT_FEAT + tl.arange(0,
BLOCK_SIZE_OUT_FEAT)
batch_mask = batch_offset < batch_dim
out_feat_mask = out_feat_offset < out_feat_dim
input_pointer += input_batch_stride * batch_offset[:, None]
weight_pointer += weight_out_feat_stride * out_feat_offset[None, :]
accum = tl.zeros((BLOCK_SIZE_BATCH, BLOCK_SIZE_OUT_FEAT), dtype=tl.float32)
for block_ind in range(0, tl.cdiv(in_feat_dim, BLOCK_SIZE_IN_FEAT)):
in_feat_offset = block_ind * BLOCK_SIZE_IN_FEAT + tl.arange(0,
BLOCK_SIZE_IN_FEAT)
in_feat_mask = in_feat_offset < in_feat_dim
curr_input_pointer = (input_pointer + input_in_feat_stride *
in_feat_offset[None, :])
curr_weight_pointer = (weight_pointer + weight_in_feat_stride *
in_feat_offset[:, None])
input_block = tl.load(curr_input_pointer, mask=batch_mask[:, None] &
in_feat_mask[None, :])
weight_block = tl.load(curr_weight_pointer, mask=out_feat_mask[None,
:] & in_feat_mask[:, None])
if fp16:
input_block = input_block.to(tl.float16)
weight_block = weight_block.to(tl.float16)
accum += tl.dot(input_block, weight_block, allow_tf32=tf32)
if add_bias:
bias = tl.load(bias_pointer + out_feat_offset, mask=out_feat_mask)
if fp16:
bias = bias.to(tl.float16)
accum += bias[None, :]
if act_func is not None:
if save_pre_act:
pre_act_pointer += pre_act_batch_stride * batch_offset[:, None
] + pre_act_out_feat_stride * out_feat_offset[None, :]
tl.store(pre_act_pointer, accum, mask=batch_mask[:, None] &
out_feat_mask[None, :])
accum = apply_act_func(accum, None, None, None, param, act_func, False)
output_pointer += output_batch_stride * batch_offset[:, None
] + output_out_feat_stride * out_feat_offset[None, :]
tl.store(output_pointer, accum, mask=batch_mask[:, None] &
out_feat_mask[None, :])
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Activation Functions"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/linear_kernels.py |
4450f891-1917-44af-86d7-af541511fc5f | real_rnn_tie_input_gate.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def bwd_sequential_scan_fused(grad_output, v, f, h, B, L, C, BLOCK_M: tl.
constexpr):
offset_b = tl.program_id(0)
if offset_b >= B:
return
offset_n = tl.program_id(1)
ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + (L - 1
) * C + offset_n * BLOCK_M
grad_h = tl.zeros([BLOCK_M], dtype=tl.float32)
for time_step in range(L - 1, -1, -1):
grad = tl.load(grad_output + ptr).to(tl.float32)
grad_h += grad
decay = tl.load(f + ptr).to(tl.float32)
decay = tl.sigmoid(decay)
input = tl.load(v + ptr).to(tl.float32)
grad_v = (1 - decay) * grad_h
tl.store(v + ptr, grad_v.to(v.dtype.element_ty))
hidden_state = tl.load(h + ptr - C, mask=ptr >= offset_b * L * C +
C, other=0.0).to(tl.float32)
grad_f = grad_h * (hidden_state - input) * decay * (1 - decay)
tl.store(f + ptr, grad_f.to(f.dtype.element_ty))
grad_h *= decay
ptr -= C
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py |
e799356f-0cb1-45e9-99e3-609fd4815e15 | layernorm.py | dame-cell/Triformer | triformer/layernorm.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.jit
def layernorm_forward(Y, Y_row_stride, X, X_row_stride, W, b, r, mu, n_cols,
eps, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
Y += row_idx * Y_row_stride
X += row_idx * X_row_stride
r += row_idx
mu += row_idx
X_row = tl.load(X + col_offsets, mask=mask, other=0).to(tl.float32)
W_row = tl.load(W + col_offsets, mask=mask, other=0).to(tl.float32)
b_row = tl.load(b + col_offsets, mask=mask, other=0).to(tl.float32)
mean_X = tl.sum(X_row, axis=0) / n_cols
XX = X_row - mean_X
row_var = tl.sum(XX * XX, axis=0) / n_cols
inv_var = tl.math.rsqrt(row_var + eps)
tl.store(r, inv_var)
tl.store(mu, mean_X)
output = XX * inv_var * W_row + b_row
tl.store(Y + col_offsets, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access",
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/layernorm.py |
8c552f13-b676-4f8c-a790-9b00891ef3c0 | logits_processor.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/logits_processor.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _triton_logits_processor_kernel(scores, penalty, input_ids_ptr,
input_ids_length, num_tokens: tl.constexpr, vocab_size: tl.constexpr,
max_ids_length: tl.constexpr, power_2_of_vocab_size: tl.constexpr,
power_2_of_max_ids_length: tl.constexpr, penalty_ty: tl.constexpr):
token_id = tl.program_id(0)
penalty_val = tl.load(penalty + token_id)
if tl.abs(penalty_val - 1.0) > 1e-09:
input_ids_address = tl.load(input_ids_ptr + token_id).to(tl.
pointer_type(tl.int64))
current_input_ids_length = tl.load(input_ids_length + token_id)
ids_offs = tl.arange(0, power_2_of_max_ids_length)
ids = tl.load(input_ids_address + ids_offs, mask=ids_offs <
current_input_ids_length, other=vocab_size)
ori_scores = tl.load(scores + token_id * vocab_size + ids[None, :],
mask=ids[None, :] < vocab_size, other=0.0)
tl.debug_barrier()
if penalty_ty == 'REPETITION':
new_scores = tl.where(ori_scores <= 0, ori_scores * penalty_val,
ori_scores / penalty_val)
elif penalty_ty == 'PRESENCE':
new_scores = ori_scores - penalty_val
tl.store(scores + token_id * vocab_size + ids[None, :], new_scores,
mask=ids[None, :] < vocab_size)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/logits_processor.py |
fda58767-fffe-4d57-a19a-4e44a1f929aa | triton_call_test.py | jax-ml/jax-triton | tests/triton_call_test.py | 859cc392bec876d132bd0790ea6c00b6c246dd2b | 0 | @triton.jit
def silly_add_kernel(x_ptr, y_ptr, output_ptr):
pid = tl.program_id(axis=0)
tl.store(output_ptr + pid, tl.load(x_ptr + pid) + tl.load(y_ptr + pid))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/tests/triton_call_test.py |
2e6f1a97-55af-4d82-a3eb-14a20678e5e9 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def fused_recurrent_gated_delta_rule_fwd_kernel(q, k, v, g, beta, o, h0, ht,
offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr,
IS_BETA_HEADWISE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
if HEAD_FIRST:
p_q = q + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_k = k + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + i_nh * T * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + i_nh * T
p_g = g + i_nh * T
p_o = o + (i_k * B * H + i_nh) * T * V + i_v * BV + tl.arange(0, BV)
else:
p_q = q + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_k = k + (bos * H + i_h) * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
if IS_BETA_HEADWISE:
p_beta = beta + (bos * H + i_h) * V + i_v * BV + tl.arange(0, BV)
else:
p_beta = beta + bos * H + i_h
p_g = g + bos * H + i_h
p_o = o + ((i_k * all + bos) * H + i_h) * V + i_v * BV + tl.arange(
0, BV)
mask_k = i_k * BK + tl.arange(0, BK) < K
mask_v = i_v * BV + tl.arange(0, BV) < V
mask_h = mask_k[None, :] & mask_v[:, None]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_g = tl.load(p_g).to(tl.float32)
b_h *= tl.exp(b_g)
b_v_minus = tl.sum(b_h * b_k[None, :], axis=1)
b_v -= b_v_minus
if IS_BETA_HEADWISE:
b_beta = tl.load(p_beta, mask=mask_v, other=0).to(tl.float32)
else:
b_beta = tl.load(p_beta).to(tl.float32)
b_v *= b_beta
b_h += b_k[None, :] * b_v[:, None]
b_o = b_h * b_q[None, :]
b_o = tl.sum(b_o, axis=1)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_v)
p_q += K if HEAD_FIRST else H * K
p_k += K if HEAD_FIRST else H * K
p_o += V if HEAD_FIRST else H * V
p_v += V if HEAD_FIRST else H * V
p_g += 1 if HEAD_FIRST else H
p_beta += (1 if HEAD_FIRST else H) * (V if IS_BETA_HEADWISE else 1)
if STORE_FINAL_STATE:
p_ht = ht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/fused_recurrent.py |
a5ecc712-03f7-4e99-82f2-544ad1ac17f3 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def stabilization_scan_op(x1, y1, x2, y2):
z1 = x2 + x1
z2 = tl.maximum(x2 + y1, y2)
return z1, z2
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
d786f710-c461-4bf9-9c41-578935182b95 | rotary.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/rotary.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_rotary_kernel(input_ptr, frequencies_ptr, stride_0, stride_1,
stride_2, rotary_dim: tl.constexpr, num_heads: tl.constexpr,
rotary_block_size: tl.constexpr, head_block_size: tl.constexpr,
backward: tl.constexpr):
pid_0 = tl.program_id(axis=0)
pid_1 = tl.program_id(axis=1)
pid_2 = tl.program_id(axis=2)
offsets = tl.arange(0, rotary_block_size)
head_offsets = pid_2 * head_block_size + tl.arange(0, head_block_size)[
:, None]
input_offsets = (stride_0 * pid_0 + stride_1 * pid_1 + stride_2 *
head_offsets + offsets[None, :])
input_re_ptr = input_ptr + input_offsets
input_im_ptr = input_re_ptr + rotary_dim
if (rotary_block_size % rotary_dim == 0 and num_heads % head_block_size ==
0):
input_re = tl.load(input_re_ptr).to(tl.float32)
input_im = tl.load(input_im_ptr).to(tl.float32)
else:
mask = (offsets[None, :] < rotary_dim) & (head_offsets < num_heads)
input_re = tl.load(input_re_ptr, mask=mask).to(tl.float32)
input_im = tl.load(input_im_ptr, mask=mask).to(tl.float32)
frequencies_offsets = 2 * rotary_dim * pid_1 + offsets
frequencies_re_ptr = frequencies_ptr + frequencies_offsets
frequencies_im_ptr = frequencies_re_ptr + rotary_dim
frequencies_re = tl.load(frequencies_re_ptr)
frequencies_im = tl.load(frequencies_im_ptr)
if backward:
out_re = input_re * frequencies_re + input_im * frequencies_im
out_im = input_im * frequencies_re - input_re * frequencies_im
else:
out_re = input_re * frequencies_re - input_im * frequencies_im
out_im = input_im * frequencies_re + input_re * frequencies_im
if (rotary_block_size % rotary_dim == 0 and num_heads % head_block_size ==
0):
tl.store(input_re_ptr, out_re)
tl.store(input_im_ptr, out_im)
else:
tl.store(input_re_ptr, out_re, mask=mask)
tl.store(input_im_ptr, out_im, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Transposed Access",
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/rotary.py |
ae472976-786a-4e37-9671-dccc1c8d91d6 | triton_ops.py | huyz2023/2by4-pretrain | sparse/triton_ops.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _sparse24_triton(dense_ptr, sparse_ptr, mask_ptr, dense_row_stride,
sparse_row_stride, mask_row_stride, dense_col_stride, sparse_col_stride,
mask_col_stride, m, k, BLOCK_SIZE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr
):
if ARRAY_LAYOUT == 'row':
row_idx = tl.program_id(0)
col_idx = tl.program_id(1) * 4 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE
) * 4
mask = col_idx < k
elif ARRAY_LAYOUT == 'col':
row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE
col_idx = tl.program_id(1) * 4
mask = row_idx < m
dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
0) * dense_col_stride, mask=mask)
dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
1) * dense_col_stride, mask=mask)
dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
2) * dense_col_stride, mask=mask)
dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx +
3) * dense_col_stride, mask=mask)
dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = _sparse24(dense_40
, dense_41, dense_42, dense_43)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) *
sparse_col_stride, dense_40, mask=mask & m0)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) *
sparse_col_stride, dense_41, mask=mask & m1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) *
sparse_col_stride, dense_42, mask=mask & m2)
tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) *
sparse_col_stride, dense_43, mask=mask & m3)
tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 0) *
mask_col_stride, m0, mask=mask & m0)
tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 1) *
mask_col_stride, m1, mask=mask & m1)
tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 2) *
mask_col_stride, m2, mask=mask & m2)
tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 3) *
mask_col_stride, m3, mask=mask & m3)
| {
"Data Type": [
"fp32"
],
"Functionality": [],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/triton_ops.py |
76f2a140-03c5-435a-ad18-589b322670ed | 03-matrix-multiplication.py | triton-lang/triton | python/tutorials/03-matrix-multiplication.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.autotune(configs=get_autotune_config(), key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak,
stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr, ACTIVATION: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] *
stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K,
other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=0.0)
accumulator = tl.dot(a, b, accumulator)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if ACTIVATION == 'leaky_relu':
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :
]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/03-matrix-multiplication.py |
d0523132-1573-4bfc-ad1a-db75a75ce64e | triton_chunk.py | NX-AI/xlstm-jax | xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py | 6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7 | 0 | @triton.jit
def chunk_mlstm_bwd_kernel_dC(q, f, m, m_total, norm, dh, dC, final_dC,
final_m, initial_dC, initial_m, s_qk_h, s_qk_t, s_qk_d, s_vh_h, s_vh_t,
s_vh_d, s_C_h, s_C_t, scale, H: tl.constexpr, T: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr, NT: tl.constexpr):
i_k, i_v, i_bC = tl.program_id(0), tl.program_id(1), tl.program_id(2)
p_dC = tl.make_block_ptr(final_dC + i_bC * K * V, (K, V), (s_C_t, 1), (
i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dC = tl.load(p_dC, boundary_check=(0, 1))
b_m = tl.load(final_m + i_bC)
for i_t in range(NT - 1, -1, -1):
p_q = tl.make_block_ptr(q + i_bC * s_qk_h, (K, T), (s_qk_d, s_qk_t),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bC * s_vh_h, (T, V), (s_vh_t,
s_vh_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dC = tl.make_block_ptr(dC + i_bC * s_C_h + i_t * K * V, (K, V), (
s_C_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dC, b_dC.to(p_dC.dtype.element_ty), boundary_check=(0, 1))
b_f_last = tl.load(f + i_bC * T + i_t * BT + BT - 1)
b_f = tl.load(f + i_bC * T + i_t * BT + tl.arange(0, BT))
b_m_p = tl.load(m + i_bC * (NT + 1) + i_t)
b_m_total = tl.load(m_total + i_bC * T + i_t * BT + tl.arange(0, BT))
b_norm = tl.load(norm + i_bC * T + i_t * BT + tl.arange(0, BT))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale * tl.math.exp2(b_f + b_m_p - b_m_total)[None, :]
).to(b_q.dtype)
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dh /= b_norm[:, None]
b_dC *= tl.math.exp2(b_f_last + b_m_p - b_m)
b_dC += tl.dot(b_q, b_dh.to(b_q.dtype), allow_tf32=False)
b_m = b_m_p
p_initial_dC = tl.make_block_ptr(initial_dC + i_bC * K * V, (K, V), (V,
1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_initial_dC, b_dC.to(p_initial_dC.dtype.element_ty),
boundary_check=(0, 1))
tl.store(initial_m + i_bC, b_m)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache",
"BSD"
] | https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py |
42cd92f0-6236-4849-b769-b3694bde27ff | sequential_rnn_scan.py | TushaarGVS/linear-rnn | linear_rnn/triton/sequential_rnn_scan.py | 48320589b73154484be7d09a144923a2b9e56b85 | 0 | @triton.jit
def _sequential_rnn_scan_bwd_kernel():
pass
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/sequential_rnn_scan.py |
71e1d145-99ab-4637-a976-6d25d864e20e | silu_and_mul.py | tascj/kaggle-lmsys-chatbot-arena | human_pref/inference/ops/silu_and_mul.py | 83cd93d50b9283c18711e8c63e4e1c6399c7b9ce | 0 | @triton.jit
def _silu_and_mul_kernel(input_ptr, stride_input_m, stride_input_n,
stride_output_m, stride_output_n, size_m, size_n, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr):
stride_input_m = stride_input_m.to(tl.int64)
stride_output_m = stride_output_m.to(tl.int64)
tid = tl.program_id(0)
input_m_offsets = tid * BLOCK_M + tl.arange(0, BLOCK_M)
output_m_offsets = tid * BLOCK_M + tl.arange(0, BLOCK_M)
pid = tl.program_id(1)
input_n_offsets = pid * BLOCK_N + tl.arange(0, BLOCK_N)
output_n_offsets = pid * BLOCK_N + tl.arange(0, BLOCK_N)
up_offsets = input_m_offsets[:, None] * stride_input_m + (input_n_offsets
[None, :] + size_n) * stride_input_n
gate_offsets = input_m_offsets[:, None] * stride_input_m + input_n_offsets[
None, :] * stride_input_n
res_offsets = output_m_offsets[:, None
] * stride_output_m + output_n_offsets[None, :] * stride_output_n
up = tl.load(input_ptr + up_offsets, mask=(input_n_offsets < size_n)[
None, :] * (input_m_offsets < size_m)[:, None], other=0.0)
gate = tl.load(input_ptr + gate_offsets, mask=(input_n_offsets < size_n
)[None, :] * (input_m_offsets < size_m)[:, None], other=0.0).to(tl.
float32)
gate = gate / (1 + tl.exp(-gate))
gate = gate.to(input_ptr.dtype.element_ty)
tl.store(input_ptr + res_offsets, up * gate, mask=(output_n_offsets <
size_n)[None, :] * (output_m_offsets < size_m)[:, None])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/tascj/kaggle-lmsys-chatbot-arena/blob/83cd93d50b9283c18711e8c63e4e1c6399c7b9ce/human_pref/inference/ops/silu_and_mul.py |
6fc9c97a-cb15-4764-a89b-ef90b31aac37 | geglu.py | Kitsunetic/kitsu | kitsu/nn/geglu.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def geglu_backward_kernel(x_ptr, dx_ptr, dy_ptr, N, C, C2, BLK_C: tl.
constexpr, BLK_N: tl.constexpr):
pid_n = tl.program_id(0)
pid_c = tl.program_id(1)
offs_n = pid_n * BLK_N + tl.arange(0, BLK_N)
offs_c = pid_c * BLK_C + tl.arange(0, BLK_C)
mask_n = offs_n < N
mask_c = offs_c < C2
mask = mask_n[:, None] & mask_c[None, :]
x_ptrs = x_ptr + offs_n[:, None] * C + offs_c[None, :]
x1 = tl.load(x_ptrs, mask=mask)
x2 = tl.load(x_ptrs + C2, mask=mask)
dy_ptrs = dy_ptr + offs_n[:, None] * C2 + offs_c[None, :]
dy = tl.load(dy_ptrs, mask=mask)
dx1 = dy * gelu_forward(x2)
dx2 = dy * x1
dx2 *= gelu_backward(x2)
dx_ptrs = dx_ptr + offs_n[:, None] * C + offs_c[None, :]
tl.store(dx_ptrs, dx1, mask=mask)
tl.store(dx_ptrs + C2, dx2, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/geglu.py |
faf12dcc-3a71-462c-9bd7-a076ddd12c3f | naive_associative_rnn_scan.py | TushaarGVS/linear-rnn | linear_rnn/triton/naive_associative_rnn_scan.py | 48320589b73154484be7d09a144923a2b9e56b85 | 0 | @triton.jit
def _naive_associative_rnn_scan_fwd_kernel(x_ptr, a_ptr, out_ptr,
stride_x_batch, stride_x_len, stride_x_dim, stride_a_batch,
stride_a_len, stride_a_dim, stride_out_batch, stride_out_len,
stride_out_dim, seq_len: tl.constexpr, BLOCK_SIZE: tl.constexpr):
pid_batch = tl.program_id(0)
pid_dim = tl.program_id(1)
x_ptr += pid_batch * stride_x_batch
a_ptr += pid_batch * stride_a_batch
out_ptr += pid_batch * stride_out_batch
offsets_dim = pid_dim * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
offsets_len = tl.arange(0, seq_len)
x_ptrs = x_ptr + offsets_dim[None, :] * stride_x_dim + offsets_len[:, None
] * stride_x_len
a_ptrs = a_ptr + offsets_dim[None, :] * stride_a_dim + offsets_len[:, None
] * stride_a_len
out_ptrs = out_ptr + offsets_dim[None, :] * stride_out_dim + offsets_len[
:, None] * stride_out_len
x = tl.load(x_ptrs).to(tl.float32)
a = tl.load(a_ptrs).to(tl.float32)
_, all_hiddens = tl.associative_scan(input=(a, x), axis=0, combine_fn=
_associative_scan_op)
tl.store(out_ptrs, all_hiddens.to(out_ptr.dtype.element_ty), mask=(
offsets_len == seq_len - 1)[:, None])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/naive_associative_rnn_scan.py |
8b8d9b04-0656-4233-8540-349f8e875a04 | fused_recurrent.py | sustcsonglin/hope-fla | fla/ops/hope/fused_recurrent.py | 0750c9a9a360fb72236dfaaaf21496959c5ef48d | 0 | @triton.jit
def fused_recurrent_fwd_kernel(q, k, k_l2, q_reflected, k_reflected,
dk_l2_partial, T, D: tl.constexpr, BK: tl.constexpr):
i_b, i_h = tl.program_id(0), tl.program_id(1)
p_q = q + i_b * T * D + i_h * BK + tl.arange(0, BK)
p_k = k + i_b * T * D + i_h * BK + tl.arange(0, BK)
p_k_l2 = k_l2 + i_b * T * D + i_h * BK + tl.arange(0, BK)
p_q_reflected = q_reflected + i_b * T * D + i_h * BK + tl.arange(0, BK)
p_k_reflected = k_reflected + i_b * T * D + i_h * BK + tl.arange(0, BK)
p_dk_l2_partial = dk_l2_partial + i_b * T * D + i_h * BK + tl.arange(0, BK)
h = tl.zeros([BK, BK], dtype=tl.float32) + (tl.arange(0, BK)[:, None] ==
tl.arange(0, BK)[None, :])
for _ in range(0, T):
b_k_l2 = tl.load(p_k_l2).to(tl.float32)
b_q = tl.load(p_q).to(tl.float32)
b_k = tl.load(p_k).to(tl.float32)
tmp = tl.sum(h * b_k_l2[None, :], axis=1)
h -= 2 * b_k_l2[None, :] * tmp[:, None]
b_q = tl.sum(h * b_q[None, :], axis=1)
b_k = tl.sum(h * b_k[None, :], axis=1)
tl.store(p_q_reflected, b_q.to(p_q_reflected.dtype.element_ty))
tl.store(p_k_reflected, b_k.to(p_k_reflected.dtype.element_ty))
tl.store(p_dk_l2_partial, tmp.to(p_dk_l2_partial.dtype.element_ty))
p_q += D
p_k += D
p_k_l2 += D
p_q_reflected += D
p_k_reflected += D
p_dk_l2_partial += D
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/hope-fla/blob/0750c9a9a360fb72236dfaaaf21496959c5ef48d/fla/ops/hope/fused_recurrent.py |
5f0785c8-cf18-4fd2-8f09-4990e5a3ec0a | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/based/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_based_fwd_kernel(q, k, v, o, z, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
BV: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_h_0o = tl.zeros([BV], dtype=tl.float32)
b_h_1o = tl.zeros([BK, BV], dtype=tl.float32)
b_h_2o = tl.zeros([BK * BK, BV], dtype=tl.float32)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (0,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, 0), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (0,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (i_bh + i_k * B * H) * s_v_h, (T, V), (
s_v_t, s_v_d), (0, i_v * BV), (BT, BV), (1, 0))
p_z = z + (i_bh + i_k * B * H) * T + tl.arange(0, BT)
k_2o = tl.zeros([1, BK * BK], dtype=tl.float32)
k_1o = tl.zeros([1, BK], dtype=tl.float32)
k_0o = 0
for i in range(0, tl.cdiv(T, BT)):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_k_2o = b_k[:, None, :] * b_k[None, :, :]
b_k_2o = tl.reshape(b_k_2o, [BK * BK, BT]).to(b_k.dtype)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_q = (tl.load(p_q, boundary_check=(0, 1)) * scale).to(b_k.dtype)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
b_z = tl.zeros([BT], dtype=tl.float32)
b_o += b_h_0o
b_z += k_0o
b_o += tl.dot(b_q, b_h_1o.to(b_q.dtype), allow_tf32=False)
b_z += tl.sum(b_q * k_1o, axis=1)
b_q_2o = b_q[:, :, None] * b_q[:, None, :]
b_q_2o = tl.reshape(b_q_2o, [BT, BK * BK]).to(b_k.dtype)
b_o += tl.dot(b_q_2o, b_h_2o.to(b_q_2o.dtype), allow_tf32=False) * 0.5
b_z += tl.sum(b_q_2o * k_2o, axis=1) * 0.5
k_1o += tl.sum(b_k, axis=1)[None, :]
k_2o += tl.sum(b_k_2o, axis=1)[None, :]
k_0o += BT
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = 1 + b_s + 0.5 * b_s * b_s
b_s = tl.where(m_s, b_s, 0)
b_z += tl.sum(b_s, axis=1)
b_o += tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_z, b_z.to(p_z.dtype.element_ty), mask=i * BT + tl.arange
(0, BT) < T)
b_h_2o = b_h_2o + tl.dot(b_k_2o.to(b_v.dtype), b_v, allow_tf32=False)
b_h_1o = b_h_1o + tl.dot(b_k, b_v, allow_tf32=False)
b_h_0o = b_h_0o + tl.sum(b_v, axis=0)
p_q = tl.advance(p_q, (BT, 0))
p_k = tl.advance(p_k, (0, BT))
p_v = tl.advance(p_v, (BT, 0))
p_o = tl.advance(p_o, (BT, 0))
p_z += BT
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/fused_chunk.py |
54d08794-506f-46cb-8b84-ab8be03f6801 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _jagged_flash_attention_bwd_preprocess_basic_kernel(o_ptr, o_offset_ptr,
do_ptr, delta_ptr, stride_om, stride_od, max_seq_len, D: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_D: tl.constexpr):
pid_m = tl.program_id(axis=0)
pid_batch = tl.program_id(axis=1)
begin_o = tl.load(o_offset_ptr + pid_batch)
end_o = tl.load(o_offset_ptr + pid_batch + 1)
M = end_o - begin_o
M = tl.minimum(M, max_seq_len)
offs_om = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_od = tl.arange(0, BLOCK_SIZE_D)
o_offsets = offs_om[:, None] * stride_om + offs_od[None, :
] * stride_od + begin_o * stride_om
o_ptrs = o_ptr + o_offsets
do_ptrs = do_ptr + o_offsets
o_mask = (offs_om[:, None] < M) & (offs_od[None, :] < D)
o = tl.load(o_ptrs, mask=o_mask)
do = tl.load(do_ptrs, mask=o_mask)
delta = tl.sum(o * do, axis=1)
tl.store(delta_ptr + begin_o + offs_om, delta, mask=offs_om < M)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
51ef560d-cee3-4e4b-9480-414a21661527 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/retention/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32,
64, 128] for num_warps in [2, 4] for num_stages in [2, 3, 4]], key=['BT'])
@triton.jit
def chunk_retention_fwd_kernel_o(q, k, v, h, o, offsets, indices, scale, H:
tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl
.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
o_i = tl.arange(0, BT)
d_i = tl.math.exp2((o_i + 1) * b_b)
m_s = o_i[:, None] >= o_i[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_i[:, None] - o_i[None, :]) * b_b), 0)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
b_s = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (
V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_o += tl.dot(b_q, b_h, allow_tf32=False)
b_s += tl.dot(b_q, b_k, allow_tf32=False)
b_o = b_o * d_i[:, None]
b_s = b_s * d_s
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_o = (b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) * scale
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/chunk.py |
4b7f7511-1f81-4dfd-b663-c2657b3195c5 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/retention/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'NV': lambda args: triton.cdiv(args['V'], args['BV'])})
@triton.jit
def parallel_retention_bwd_kernel(q, k, v, do, dq, dk, dv, scale, B: tl.
constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BS: tl.constexpr, BK: tl.constexpr, BV: tl
.constexpr, NV: tl.constexpr):
i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_k, i_v = i_kv // NV, i_kv % NV
i_h = i_bh % H
parallel_retention_bwd_kernel_dq(i_bh, i_t, i_k, i_v, i_h, k, v, do, dq,
scale, B=B, H=H, T=T, K=K, V=V, BT=BT, BS=BS, BK=BK, BV=BV)
tl.debug_barrier()
parallel_retention_bwd_kernel_dkv(i_bh, i_t, i_k, i_v, i_h, q, k, v, do,
dk, dv, scale, B, H, T, K, V, BT, BS, BK, BV)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/parallel.py |
686cb4de-466f-4a1f-845c-31d93017e89f | test.py | LLMServe/DistServe | evaluation/0-test-single-forward-performance/test.py | 3a5c5397a260c2a53c815688d0df1796dd54128e | 0 | @triton.jit
def f(a: tl.constexpr, b):
pass
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/LLMServe/DistServe/blob/3a5c5397a260c2a53c815688d0df1796dd54128e/evaluation/0-test-single-forward-performance/test.py |
2dcb744e-9b5f-4362-b8b5-054fa60ab3b3 | layernorm_gated.py | sustcsonglin/flash-linear-attention | fla/modules/layernorm_gated.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'HAS_BIAS': lambda args: args['B'] is not None, 'HAS_Z':
lambda args: args['Z'] is not None, 'RECOMPUTE_OUTPUT': lambda args:
args['Y'] is not None})
@triton.jit
def layer_norm_bwd_kernel(X, W, B, Z, Y, DY, DX, DW, DB, DZ, Mean, Rstd,
stride_x_row, stride_z_row, stride_y_row, stride_dy_row, stride_dx_row,
stride_dz_row, stride_dw_row, stride_db_row, M, N, eps,
rows_per_program, NORM_BEFORE_GATE: tl.constexpr, IS_RMS_NORM: tl.
constexpr, HAS_BIAS: tl.constexpr, HAS_Z: tl.constexpr,
RECOMPUTE_OUTPUT: tl.constexpr, BLOCK_N: tl.constexpr):
row_block_id = tl.program_id(0)
group = tl.program_id(1)
row_start = row_block_id * rows_per_program
cols = tl.arange(0, BLOCK_N)
mask = cols < N
X += row_start * stride_x_row + group * N
if HAS_Z:
Z += row_start * stride_z_row + group * N
DZ += row_start * stride_dz_row + group * N
DY += row_start * stride_dy_row + group * N
DX += row_start * stride_dx_row + group * N
if RECOMPUTE_OUTPUT:
Y += row_start * stride_y_row + group * N
if not IS_RMS_NORM:
Mean += group * M
Rstd += group * M
W += group * N
w = tl.load(W + cols, mask=mask).to(tl.float32)
if (RECOMPUTE_OUTPUT or HAS_Z) and HAS_BIAS:
B += group * N
b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
if HAS_BIAS:
db = tl.zeros((BLOCK_N,), dtype=tl.float32)
row_end = min((row_block_id + 1) * rows_per_program, M)
for row in range(row_start, row_end):
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
if not IS_RMS_NORM:
mean = tl.load(Mean + row)
if HAS_Z and not NORM_BEFORE_GATE:
z = tl.load(Z + cols, mask=mask, other=0.0).to(tl.float32)
x_og = x
x = x_og * z * tl.sigmoid(z)
rstd = tl.load(Rstd + row)
xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
xhat = tl.where(mask, xhat, 0.0)
if HAS_Z and NORM_BEFORE_GATE:
z = tl.load(Z + cols, mask=mask, other=0.0).to(tl.float32)
z_sigmoid = tl.sigmoid(z)
y = xhat * w + b if HAS_BIAS else xhat * w
if RECOMPUTE_OUTPUT:
tl.store(Y + cols, y * z * z_sigmoid, mask=mask)
dz = dy * y * z_sigmoid * (1 + z * (1 - z_sigmoid))
tl.store(DZ + cols, dz, mask=mask)
dy *= z * z_sigmoid
elif RECOMPUTE_OUTPUT:
y = xhat * w + b if HAS_BIAS else xhat * w
tl.store(Y + cols, y, mask=mask)
wdy = w * dy
c1 = tl.sum(xhat * wdy, axis=0) / N
if not IS_RMS_NORM:
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
else:
dx = (wdy - xhat * c1) * rstd
dw += dy * xhat
if HAS_BIAS:
db += dy
if HAS_Z and not NORM_BEFORE_GATE:
z_sigmoid = tl.sigmoid(z)
dz = dx * x_og * z_sigmoid * (1 + z * (1 - z_sigmoid))
tl.store(DZ + cols, dz, mask=mask)
dx *= z * z_sigmoid
tl.store(DX + cols, dx, mask=mask)
X += stride_x_row
if HAS_Z:
Z += stride_z_row
DZ += stride_dz_row
if RECOMPUTE_OUTPUT:
Y += stride_y_row
DY += stride_dy_row
DX += stride_dx_row
tl.store(DW + row_block_id * stride_dw_row + group * N + cols, dw, mask
=mask)
if HAS_BIAS:
tl.store(DB + row_block_id * stride_db_row + group * N + cols, db,
mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/layernorm_gated.py |
214f7ac4-a9b5-4d29-84a0-b7ba3504f01e | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps, num_stages
=num_stages) for num_warps in [2, 4] for num_stages in [2, 3, 4]], key=
['BT'])
@triton.jit
def chunk_gsa_bwd_k_kernel_dqkvg(q, k, v, h, g, A, do, dh, dq, dk, dv, dg,
dgv, dA, offsets, indices, scale, B: tl.constexpr, T: tl.constexpr, HQ:
tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl
.constexpr, BK: tl.constexpr, BV: tl.constexpr, NG: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
all = B * T
o_i = tl.arange(0, BT)
o_t = min(i_t * BT + BT, T)
m_s = o_i[:, None] >= o_i[None, :]
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bg * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_A = tl.make_block_ptr(A + (i_k * B * H + i_bh) * T * BT, (T, BT),
(BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (T, K), (HQ * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_A = tl.make_block_ptr(A + ((i_k * all + bos) * HQ + i_hq) * BT, (
T, BT), (HQ * BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_A = tl.dot((b_q * scale).to(b_q.dtype), tl.trans(b_k))
b_A = tl.where(m_s, b_A, 0.0)
tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
o_v = i_v * BV + tl.arange(0, BV)
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bg * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (o_t -
1) * V + o_v, BV), BV)
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_k * B * H + i_bh) * T * V, (T,
V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dgv = tl.make_block_ptr(dgv + (i_k * B * H + i_bh) * T * V, (
T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bg * NT * K * V + i_t * K * V, (V,
K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bh * NT * K * V + i_t * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + o_t - 1) * H *
V + i_h * V + o_v, BV), BV)
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + ((i_k * all + bos) * HQ + i_hq) *
V, (T, V), (HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dg = tl.make_block_ptr(dg + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dgv = tl.make_block_ptr(dgv + ((i_k * all + bos) * HQ + i_hq) *
V, (T, V), (HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * HQ + i_hq) * K * V, (K, V
), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
m_v = o_v < V
b_gn = tl.load(p_gn, mask=m_v, other=0)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0, 1))
b_gv = tl.exp(b_gn[None, :] - b_g)
b_h = tl.load(p_h, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_g) * scale).to(b_do.dtype)
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dg = tl.sum(tl.trans(b_h) * b_dh, 0) * tl.exp(b_gn)
b_dh = b_dh.to(b_k.dtype)
b_dq += tl.dot(b_do, b_h.to(b_k.dtype))
b_dk += tl.dot((b_v * b_gv).to(b_v.dtype), tl.trans(b_dh))
b_dv = tl.dot(b_k, b_dh) * b_gv
b_dg += tl.sum(b_dv * b_v, 0)
if i_k == 0:
b_dgv = tl.load(p_dg, boundary_check=(0, 1)) + b_dg[None, :]
else:
b_dgv = tl.zeros([BT, BV], dtype=tl.float32) + b_dg[None, :]
tl.store(p_dgv, b_dgv.to(p_dgv.dtype.element_ty), boundary_check=(0, 1)
)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
if HEAD_FIRST:
p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_dA = tl.make_block_ptr(dA + (bos * HQ + i_hq) * BT, (T, BT), (HQ *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
p_dq = tl.make_block_ptr(dq + (bos * HQ + i_hq) * K, (T, K), (HQ *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * HQ + i_hq) * K, (T, K), (HQ *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_dA = tl.load(p_dA, boundary_check=(0, 1))
b_dq += tl.dot(b_dA, b_k)
b_dk += tl.dot(tl.trans(b_dA).to(b_k.dtype), b_q)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
2e0354ac-cb88-4e37-87aa-b3e8199840dd | rwkv_log.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_log.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def wkv_triton_log_space_backward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr,
k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b,
state_s_abe, state_s_t, state_s_c, gwkv_ptr, gwkv_s_b, gwkv_s_t,
gwkv_s_c, gstate_out_ptr, gstate_out_s_b, gstate_out_s_abe,
gstate_out_s_c, gw_ptr, gw_s_c, gu_ptr, gu_s_c, gk_ptr, gk_s_b, gk_s_t,
gk_s_c, gv_ptr, gv_s_b, gv_s_t, gv_s_c, gstate_ptr, gstate_s_b,
gstate_s_abe, gstate_s_c, tsz, chans, eps: tl.constexpr, BLOCK_SIZE_C:
tl.constexpr):
b_idx = tl.program_id(0)
c_idx = tl.program_id(1)
cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C)
cmask = cs < chans
k_ptr = k_ptr + b_idx * k_s_b
v_ptr = v_ptr + b_idx * v_s_b
alpha_p_ptr = state_ptr + b_idx * state_s_b
alpha_m_ptr = state_ptr + b_idx * state_s_b + state_s_abe
beta_ptr = state_ptr + b_idx * state_s_b + 2 * state_s_abe
gk_ptr = gk_ptr + b_idx * gk_s_b
gv_ptr = gv_ptr + b_idx * gv_s_b
gwkv_ptr = gwkv_ptr + b_idx * gwkv_s_b
galpha_p_out_ptr = gstate_out_ptr + b_idx * gstate_out_s_b
galpha_m_out_ptr = (gstate_out_ptr + b_idx * gstate_out_s_b +
gstate_out_s_abe)
gbeta_out_ptr = (gstate_out_ptr + b_idx * gstate_out_s_b + 2 *
gstate_out_s_abe)
gln_alpha_p = tl.load(galpha_p_out_ptr + gstate_out_s_c * cs, mask=cmask
).to(tl.float32)
gln_alpha_m = tl.load(galpha_m_out_ptr + gstate_out_s_c * cs, mask=cmask
).to(tl.float32)
gln_beta = tl.load(gbeta_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl
.float32)
w = tl.load(w_ptr + w_s_c * cs, mask=cmask).to(tl.float32)
u = tl.load(u_ptr + u_s_c * cs, mask=cmask).to(tl.float32)
gw = tl.zeros_like(w)
gu = tl.zeros_like(u)
for t in range(tsz):
tc = tsz - t - 1
kt = tl.load(k_ptr + tc * k_s_t + k_s_c * cs, mask=cmask).to(tl.float32
)
vt = tl.load(v_ptr + tc * v_s_t + v_s_c * cs, mask=cmask).to(tl.float32
)
vt_p = tl.maximum(vt, 0) + eps
vt_m = tl.maximum(-vt, 0) + eps
ln_v_p = tl.log(vt_p)
ln_v_m = tl.log(vt_m)
ln_alpha_p_prev = tl.load(alpha_p_ptr + tc * state_s_t + state_s_c *
cs, mask=cmask).to(tl.float32)
ln_alpha_m_prev = tl.load(alpha_m_ptr + tc * state_s_t + state_s_c *
cs, mask=cmask).to(tl.float32)
ln_beta_prev = tl.load(beta_ptr + tc * state_s_t + state_s_c * cs,
mask=cmask).to(tl.float32)
uk = u + kt
ukv_p = uk + ln_v_p
ukv_m = uk + ln_v_m
ukb = logaddexp(uk, ln_beta_prev)
wkv_p = tl.exp(logaddexp(ukv_p, ln_alpha_p_prev) - ukb)
wkv_m = tl.exp(logaddexp(ukv_m, ln_alpha_m_prev) - ukb)
gwkvt = tl.load(gwkv_ptr + tc * gwkv_s_t + gwkv_s_c * cs, mask=cmask
).to(tl.float32)
gln_wkv_p = gwkvt * wkv_p
gln_wkv_m = gwkvt * -wkv_m
e_num_p = tl.exp(ln_alpha_p_prev - ukv_p)
e_num_m = tl.exp(ln_alpha_m_prev - ukv_m)
e_den = tl.exp(ln_beta_prev - uk)
grad_wkv_den_p = gln_wkv_p / (1 + e_den)
grad_wkv_den_m = gln_wkv_m / (1 + e_den)
gkv_p = gln_wkv_p / (1 + e_num_p)
gkv_m = gln_wkv_m / (1 + e_num_m)
grad_uk = gkv_p + gkv_m - grad_wkv_den_p - grad_wkv_den_m
gu += grad_uk
gk = grad_uk
gv = tl.where(vt > 0, gkv_p / vt_p, gkv_m / -vt_m)
gln_alpha_wkv_p = gln_wkv_p / (1 + 1 / e_num_p)
gln_alpha_wkv_m = gln_wkv_m / (1 + 1 / e_num_m)
gln_beta_wkv = -gln_wkv_p / (1 + 1 / e_den) - gln_wkv_m / (1 + 1 /
e_den)
e_alpha_p = tl.exp(kt + ln_v_p - (w + ln_alpha_p_prev))
e_alpha_m = tl.exp(kt + ln_v_m - (w + ln_alpha_m_prev))
gwa_p = gln_alpha_p / (1 + e_alpha_p)
gwa_m = gln_alpha_m / (1 + e_alpha_m)
gkv_p = gln_alpha_p / (1 + 1 / e_alpha_p)
gkv_m = gln_alpha_m / (1 + 1 / e_alpha_m)
gw += gwa_p + gwa_m
gk += gkv_p + gkv_m
gv += tl.where(vt > 0, gkv_p / vt_p, -gkv_m / vt_m)
e_beta = tl.exp(kt - (w + ln_beta_prev))
gwb = gln_beta / (1 + e_beta)
gw += gwb
gk += gln_beta / (1 + 1 / e_beta)
tl.store(gk_ptr + tc * gk_s_t + gk_s_c * cs, gk, mask=cmask)
tl.store(gv_ptr + tc * gv_s_t + gv_s_c * cs, gv, mask=cmask)
gln_alpha_p = gwa_p + gln_alpha_wkv_p
gln_alpha_m = gwa_m + gln_alpha_wkv_m
gln_beta = gwb + gln_beta_wkv
gln_alpha_p_ptr = gstate_ptr + b_idx * gstate_s_b
gln_alpha_m_ptr = gstate_ptr + b_idx * gstate_s_b + gstate_s_abe
gln_beta_ptr = gstate_ptr + b_idx * gstate_s_b + 2 * gstate_s_abe
tl.store(gln_alpha_p_ptr + gstate_s_c * cs, gln_alpha_p, mask=cmask)
tl.store(gln_alpha_m_ptr + gstate_s_c * cs, gln_alpha_m, mask=cmask)
tl.store(gln_beta_ptr + gstate_s_c * cs, gln_beta, mask=cmask)
tl.atomic_add(gw_ptr + gw_s_c * cs, gw, mask=cmask)
tl.atomic_add(gu_ptr + gu_s_c * cs, gu, mask=cmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py |
04b616e3-c3c8-4ae9-b466-28632451aa62 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16]], key=['BK', 'BV'])
@triton.jit
def fused_recurrent_rwkv6_fwd_kernel(q, k, v, w, u, o, h0, ht, offsets,
scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, REVERSE:
tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.
constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl.
int64), tl.program_id(2).to(tl.int64)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
o_k = i_k * BK + tl.arange(0, BK)
o_v = i_v * BV + tl.arange(0, BV)
if HEAD_FIRST:
p_q = q + i_nh * T * K + ((T - 1) * K if REVERSE else 0) + o_k
p_k = k + i_nh * T * K + ((T - 1) * K if REVERSE else 0) + o_k
p_v = v + i_nh * T * V + ((T - 1) * V if REVERSE else 0) + o_v
p_w = w + i_nh * T * K + ((T - 1) * K if REVERSE else 0) + o_k
p_o = o + (i_k * B * H + i_nh) * T * V + ((T - 1) * V if REVERSE else 0
) + o_v
else:
p_q = q + (bos + (T - 1 if REVERSE else 0)) * H * K + i_h * K + o_k
p_k = k + (bos + (T - 1 if REVERSE else 0)) * H * K + i_h * K + o_k
p_v = v + (bos + (T - 1 if REVERSE else 0)) * H * V + i_h * V + o_v
p_w = w + (bos + (T - 1 if REVERSE else 0)) * H * K + i_h * K + o_k
p_o = o + (i_k * all + bos + (T - 1 if REVERSE else 0)
) * H * V + i_h * V + o_v
p_u = u + i_h * K + o_k
mask_k = o_k < K
mask_v = o_v < V
mask_h = mask_k[:, None] & mask_v[None, :]
b_u = tl.load(p_u, mask=mask_k, other=0).to(tl.float32)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + o_k[:, None] * V + o_v[None, :]
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_w = tl.load(p_w, mask=mask_k, other=0).to(tl.float32)
b_kv = b_k[:, None] * b_v[None, :]
b_o = tl.sum((b_h + b_kv * b_u[:, None]) * b_q[:, None], 0)
b_h = b_h * tl.exp(b_w)[:, None] + b_kv
tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_v)
p_q += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_w += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_o += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
if STORE_FINAL_STATE:
p_ht = ht + i_nh * K * V + o_k[:, None] * V + o_v[None, :]
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Batch-Oriented"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/fused_recurrent.py |
b2ccfd99-57ca-4f63-8421-06fc03c2fd71 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/simple_gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4, 8]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_simple_gla_bwd_kernel_dv(q, k, g, do, dv, dh, offsets, indices,
scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
b_g = tl.load(g + i_bh * T + i_t * BT + tl.arange(0, BT))
b_g_last = tl.load(g + i_bh * T + min(i_t * BT + BT, T) - 1)
else:
b_g = tl.load(g + (bos + i_t * BT + tl.arange(0, BT)) * H + i_h)
b_g_last = tl.load(g + (bos + min(i_t * BT + BT, T) - 1) * H + i_h)
b_dv = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype)) * tl.exp(-b_g + b_g_last)[:,
None]
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_A += tl.dot(b_k, b_q, allow_tf32=False)
mask = (tl.arange(0, BT)[:, None] <= tl.arange(0, BT)[None, :]) & (i_t *
BT + tl.arange(0, BT) < T)
b_A = b_A * tl.exp(b_g[None, :] - b_g[:, None]) * scale
b_A = tl.where(mask, b_A, 0).to(do.dtype.element_ty)
if HEAD_FIRST:
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
else:
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dv += tl.dot(b_A, b_do)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/chunk.py |
c37220b7-83c8-4237-ab3c-c6b1f83a1aa3 | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def symmetric_alibi_mask(slope, offs_m, offs_n, M, N, EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr):
alibi = -tl.abs(M - N + offs_n[None, :] - offs_m[:, None]) * slope
if not EVEN_M & EVEN_N:
mask = make_bounds(offs_m, offs_n, M, N, EVEN_M, EVEN_N)
mask, alibi = tl.broadcast(mask, alibi)
alibi = tl.where(mask, alibi, float('-inf'))
return alibi
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
e2a70484-75b6-4ce3-8fc4-87362e385bde | attn_torch_function.py | ROCm/aotriton | tritonsrc/attn_torch_function.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_BWD_FUSED, key=['max_seqlen_q',
'max_seqlen_k', 'head_dim'])
@triton.jit
def tuned_attn_bwd(Q, K, V, B, sm_scale, Out, DO, DK, DV, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn, stride_dkk,
stride_dvz, stride_dvh, stride_dvk, stride_dvn, stride_dqz, stride_dqh,
stride_dqm, stride_dqk, stride_dbz, stride_dbh, stride_dbm, stride_dbn,
num_head_q, num_head_k, cu_seqlens_q, cu_seqlens_k, num_seqlens,
max_seqlen_q, max_seqlen_k, head_dim, dropout_p, philox_seed_ptr,
philox_offset1, philox_offset2, BLOCK_DMODEL: tl.constexpr, CAUSAL: tl.
constexpr, ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr,
BIAS_TYPE: tl.constexpr, BLOCK_M1: tl.constexpr, BLOCK_N1: tl.constexpr,
BLOCK_M2: tl.constexpr, BLOCK_N2: tl.constexpr, BLK_SLICE_FACTOR: tl.
constexpr):
bare_attn_bwd(Q, K, V, B, sm_scale, Out, DO, DK, DV, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dkz, stride_dkh, stride_dkn,
stride_dkk, stride_dvz, stride_dvh, stride_dvk, stride_dvn,
stride_dqz, stride_dqh, stride_dqm, stride_dqk, stride_dbz,
stride_dbh, stride_dbm, stride_dbn, num_head_q, num_head_k,
cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k,
head_dim, dropout_p, philox_seed_ptr, philox_offset_base,
BLOCK_DMODEL, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD, BIAS_TYPE,
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2, BLK_SLICE_FACTOR)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/attn_torch_function.py |
1df982b2-ab6b-4be2-b12a-68ff05bd304d | seqlen_utils.py | Kitsunetic/kitsu | kitsu/nn/seqlen_utils.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def code_downscale_kernel(code_ptr, out_ptr, n_steps, N, BLK: tl.constexpr):
pid = tl.program_id(0)
offs_n = BLK * pid + tl.arange(0, BLK)
mask_n = offs_n < N
code = tl.load(code_ptr + offs_n, mask=mask_n)
top16bit = code & 32767 << 48
low16bit = code & (1 << 48) - 1
low16bit >>= n_steps * 3
new_code = top16bit | low16bit
tl.store(out_ptr + offs_n, new_code, mask=mask_n)
| {
"Data Type": [
"int8",
"uint8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py |
04b2666a-1d7d-4696-a991-b70862c000e7 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def stabilize_fi(F, I, M, F_STABILIZED, I_STABILIZED, NH: tl.constexpr, S:
tl.constexpr):
bh_id = tl.program_id(0)
batch_id = bh_id // NH
head_id = bh_id % NH
s_range = tl.arange(0, S)
batch_offset_fi = batch_id * NH * S + head_id * S
f = tl.load(F + batch_offset_fi + s_range, s_range < S)
i = tl.load(I + batch_offset_fi + s_range, s_range < S)
f = tl.log(tl.sigmoid(f))
_, m = tl.associative_scan((f, i), 0, stabilization_scan_op)
m_shifted = roll(m, 0)
i = tl.exp(i - m)
f = tl.exp(f - m + m_shifted)
tl.store(F_STABILIZED + batch_offset_fi + s_range, f, s_range < S)
tl.store(I_STABILIZED + batch_offset_fi + s_range, i, s_range < S)
tl.store(M + batch_offset_fi + s_range, m, s_range < S)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
f5393048-97b0-492a-b70c-6819fc017e7e | awq_triton.py | Charlie-XIAO/sparse-vllm | vllm/model_executor/layers/quantization/awq_triton.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.jit
def awq_dequantize_kernel(qweight_ptr, scales_ptr, zeros_ptr, group_size,
result_ptr, num_cols, num_rows, BLOCK_SIZE_X: tl.constexpr,
BLOCK_SIZE_Y: tl.constexpr):
pid_x = tl.program_id(axis=0)
pid_y = tl.program_id(axis=1)
offsets_y = pid_y * BLOCK_SIZE_Y + tl.arange(0, BLOCK_SIZE_Y)
offsets_x = pid_x * BLOCK_SIZE_X + tl.arange(0, BLOCK_SIZE_X)
offsets = num_cols * offsets_y[:, None] + offsets_x[None, :]
masks_y = offsets_y < num_rows
masks_x = offsets_x < num_cols
masks = masks_y[:, None] & masks_x[None, :]
result_offsets_y = pid_y * BLOCK_SIZE_Y + tl.arange(0, BLOCK_SIZE_Y)
result_offsets_x = pid_x * BLOCK_SIZE_X * 8 + tl.arange(0, BLOCK_SIZE_X * 8
)
result_offsets = 8 * num_cols * result_offsets_y[:, None
] + result_offsets_x[None, :]
result_masks_y = result_offsets_y < num_rows
result_masks_x = result_offsets_x < num_cols * 8
result_masks = result_masks_y[:, None] & result_masks_x[None, :]
iweights = tl.load(qweight_ptr + offsets, masks)
iweights = tl.interleave(iweights, iweights)
iweights = tl.interleave(iweights, iweights)
iweights = tl.interleave(iweights, iweights)
reverse_awq_order_tensor = ((tl.arange(0, 2) * 4)[None, :] + tl.arange(
0, 4)[:, None]).reshape(8)
shifts = reverse_awq_order_tensor * 4
shifts = tl.broadcast_to(shifts[None, :], (BLOCK_SIZE_Y * BLOCK_SIZE_X, 8))
shifts = tl.reshape(shifts, (BLOCK_SIZE_Y, BLOCK_SIZE_X * 8))
iweights = iweights >> shifts & 15
zero_offsets_y = pid_y * BLOCK_SIZE_Y // group_size + tl.arange(0, 1)
zero_offsets_x = pid_x * BLOCK_SIZE_X + tl.arange(0, BLOCK_SIZE_X)
zero_offsets = num_cols * zero_offsets_y[:, None] + zero_offsets_x[None, :]
zero_masks_y = zero_offsets_y < num_rows // group_size
zero_masks_x = zero_offsets_x < num_cols
zero_masks = zero_masks_y[:, None] & zero_masks_x[None, :]
zeros = tl.load(zeros_ptr + zero_offsets, zero_masks)
zeros = tl.interleave(zeros, zeros)
zeros = tl.interleave(zeros, zeros)
zeros = tl.interleave(zeros, zeros)
zeros = tl.broadcast_to(zeros, (BLOCK_SIZE_Y, BLOCK_SIZE_X * 8))
zeros = zeros >> shifts & 15
scale_offsets_y = pid_y * BLOCK_SIZE_Y // group_size + tl.arange(0, 1)
scale_offsets_x = pid_x * BLOCK_SIZE_X * 8 + tl.arange(0, BLOCK_SIZE_X * 8)
scale_offsets = num_cols * 8 * scale_offsets_y[:, None] + scale_offsets_x[
None, :]
scale_masks_y = scale_offsets_y < num_rows // group_size
scale_masks_x = scale_offsets_x < num_cols * 8
scale_masks = scale_masks_y[:, None] & scale_masks_x[None, :]
scales = tl.load(scales_ptr + scale_offsets, scale_masks)
scales = tl.broadcast_to(scales, (BLOCK_SIZE_Y, BLOCK_SIZE_X * 8))
iweights = (iweights - zeros) * scales
iweights = iweights.to(result_ptr.type.element_ty)
tl.store(result_ptr + result_offsets, iweights, result_masks)
| {
"Data Type": [
"uint8",
"fp32"
],
"Functionality": [
"Quantization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/quantization/awq_triton.py |
6a16d484-c7ae-4445-9911-2a441ad2edc4 | matrix_multiplication.py | gmgu/study-triton | 5_out_of_index/matrix_multiplication.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def triton_mm(x_ptr, y_ptr, out_ptr, n: tl.constexpr, m: tl.constexpr, p:
tl.constexpr, BLOCK_SIZE: tl.constexpr):
pid0 = tl.program_id(axis=0)
pid1 = tl.program_id(axis=1)
x_row_start = tl.arange(0, BLOCK_SIZE)[:, None] * m
x_col_start = tl.arange(0, BLOCK_SIZE)[None, :]
x_col_unit = BLOCK_SIZE
y_row_start = tl.arange(0, BLOCK_SIZE)[:, None] * p
y_col_start = tl.arange(0, BLOCK_SIZE)[None, :]
y_row_unit = BLOCK_SIZE * p
o_row_start = pid0 * BLOCK_SIZE * p + tl.arange(0, BLOCK_SIZE)[:, None] * p
o_col_start = pid1 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)[None, :]
o_offset = o_row_start + o_col_start
out = tl.zeros((BLOCK_SIZE, BLOCK_SIZE), dtype=tl.float32)
for i in range(tl.cdiv(m, BLOCK_SIZE)):
x_block = x_row_start + x_col_start
y_block = y_row_start + y_col_start
x_offset = pid0 * BLOCK_SIZE * m + x_block
x_mask = x_row_start < n * m and x_col_start < m
y_offset = pid1 * BLOCK_SIZE + y_block
y_mask = y_row_start < m * p and y_col_start < p
x = tl.load(x_ptr + x_offset, mask=x_mask, other=0.0)
y = tl.load(y_ptr + y_offset, mask=y_mask, other=0.0)
out += tl.dot(x, y, allow_tf32=False)
x_col_start += x_col_unit
y_row_start += y_row_unit
o_mask = o_row_start < n * p and o_col_start < p
tl.store(out_ptr + o_offset, out, mask=o_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops",
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/5_out_of_index/matrix_multiplication.py |
f80e0e6a-ad55-4272-aace-e1d940a24b8d | reduction.py | daemyung/practice-triton | reduction.py | 27f727726f1507c8380a1c11751d851c7c4a07ce | 0 | @triton.jit
def sum_kernel(y_ptr, x_ptr, size, block_size: tl.constexpr):
offsets = tl.arange(0, block_size)
mask = offsets < size
x = tl.load(x_ptr + offsets, mask)
y = tl.reduce(x, 0, combine_add)
tl.store(y_ptr, y)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/reduction.py |
c1001477-cd08-4169-b77e-125bba440902 | triton_example.py | lshmouse/ai-playground | experimental/triton_example/triton_example.py | 3d91bd77464e2a7fb0ce49180fc6d20e98869a73 | 0 | @triton.jit
def softmax(Y, stride_ym, stride_yn, X, stride_xm, stride_xn, M, N):
m = tl.program_id(0)
BLOCK_SIZE: tl.constexpr = 1024
n = tl.arange(0, BLOCK_SIZE)
X = X + m * stride_xm + n * stride_xn
x = tl.load(X, mask=n < N, other=-float('inf'))
z = x - tl.max(x, axis=0)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
y = num / denom
Y = Y + m * stride_ym + n * stride_yn
tl.store(Y, y, mask=n < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/lshmouse/ai-playground/blob/3d91bd77464e2a7fb0ce49180fc6d20e98869a73/experimental/triton_example/triton_example.py |
a78ad7b2-9ebf-4407-be34-12ed6daa1918 | triton_call_test.py | jax-ml/jax-triton | tests/triton_call_test.py | 859cc392bec876d132bd0790ea6c00b6c246dd2b | 0 | @triton.jit
def copy_twice_kernel(a_ptr, x_ptr, y_ptr):
a = tl.load(a_ptr)
tl.store(x_ptr, a)
tl.store(y_ptr, a)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Low Latency"
]
} | [
"Apache"
] | https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/tests/triton_call_test.py |
22a19981-d2fc-4603-a185-6608ad153106 | sgmv_expand_slice.py | IBM/vllm | vllm/lora/ops/sgmv_expand_slice.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _sgmv_expand_slice_kernel(input_ptr, lora_ptr, out_ptr, N, K,
b_seq_start_loc, seq_lens, lora_indices, xm_stride, xk_stride,
l0_stride, lora_k_stride, lora_n_stride, cm_stride, cn_stride,
slice_offset, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl
.constexpr, EVEN_K: tl.constexpr, ADD_INPUTS: tl.constexpr, CAST_TYPE:
tl.constexpr):
"""
Similar to the 'sgmv_expand' operator, but with an added parameter
'slice_offset'. The reason for not reusing the 'sgmv_expand' operator
might be that in the future, we could implement a fusion operator to
achieve the current functionality instead of having to call it multiple
times.
"""
pid = tl.program_id(axis=0)
cur_batch = tl.program_id(axis=1)
cta_n_num = tl.cdiv(N, BLOCK_N)
pid_m = pid // cta_n_num
pid_n = pid % cta_n_num
M = tl.load(seq_lens + cur_batch)
if pid_m * BLOCK_M > M:
return
lora_index = tl.load(lora_indices + cur_batch)
if lora_index == -1:
return
cur_seq_start = tl.load(b_seq_start_loc + cur_batch)
offset_m = tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_n = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
offset_k = tl.arange(0, BLOCK_K)
ram = tl.max_contiguous(tl.multiple_of(offset_m % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N)
a_ptr = input_ptr + cur_seq_start * xm_stride + ram[:, None
] * xm_stride + offset_k[None, :] * xk_stride,
b_ptr = lora_ptr + l0_stride * lora_index + offset_k[:, None
] * lora_n_stride + rbn[None, :] * lora_k_stride
accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(tl.cdiv(K, BLOCK_K)):
if EVEN_K:
tiled_a = tl.load(a_ptr)
tiled_b = tl.load(b_ptr)
else:
tiled_a = tl.load(a_ptr, mask=offset_k[None, :] < K - k *
BLOCK_K, other=0)
tiled_b = tl.load(b_ptr, mask=offset_k[:, None] < K - k *
BLOCK_K, other=0)
if CAST_TYPE:
tiled_a = tiled_a.to(lora_ptr.dtype.element_ty)
accumulator += tl.dot(tiled_a, tiled_b)
a_ptr += BLOCK_K * xk_stride
b_ptr += BLOCK_K * lora_n_stride
tiled_c = accumulator.to(lora_ptr.dtype.element_ty)
offset_cm = cur_seq_start + tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_cn = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N + slice_offset
c_ptr = out_ptr + offset_cm[:, None] * cm_stride + offset_cn[None, :
] * cn_stride
M = tl.load(seq_lens + cur_batch)
c_mask = (offset_cm[:, None] < cur_seq_start + M) & (offset_cn[None, :] <
slice_offset + N)
if ADD_INPUTS:
tiled_out = tl.load(c_ptr, mask=c_mask, other=None)
tiled_c += tiled_out
tl.store(c_ptr, tiled_c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/sgmv_expand_slice.py |
404e3e4e-48df-4d01-9ca3-88d31910bc38 | masked_load_store.py | ROCm/aotriton | tritonsrc/masked_load_store.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def mload2d(REG_ROWS: tl.constexpr, REG_COLS: tl.constexpr, i_base,
i_start_row, i_start_col, i_rows, i_cols, stride_row, stride_col):
off_rows = tl.arange(0, REG_ROWS) + i_start_row
off_cols = tl.arange(0, REG_COLS) + i_start_col
i_ptrs = i_base + off_rows[:, None] * stride_row + off_cols[None, :
] * stride_col
row_overflow = i_start_row + REG_ROWS - i_rows
col_overflow = i_start_col + REG_COLS - i_cols
i_ptrs_mask = tl.full([REG_ROWS, REG_COLS], 1, dtype=tl.int1)
if row_overflow > 0:
i_ptrs_mask = i_ptrs_mask & (off_rows[:, None] < i_rows)
if col_overflow > 0:
i_ptrs_mask = i_ptrs_mask & (off_cols[None, :] < i_cols)
return tl.load(i_ptrs, mask=i_ptrs_mask, other=0.0)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/masked_load_store.py |
00c82ef8-66e1-47a7-8dd2-64f2b33c6b0b | matmul.py | MichaelWei7/torch | _inductor/triton_ops/matmul.py | 4bfe6988308edc9544ddae94bfdcf83a4326b04a | 0 | @mm_heuristics()
@mm_autotune(get_io_bound_configs=True)
@triton.jit
def _kernel(A, B, C, M, N, K, stride_am, stride_ak, stride_bk, stride_bn,
stride_cm, stride_cn, allow_tf32: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr,
SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, ACC_TYPE: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.0)
b = tl.load(B, mask=rk[:, None] < k, other=0.0)
acc += tl.dot(a, b, allow_tf32=allow_tf32)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD"
] | https://github.com/MichaelWei7/torch/blob/4bfe6988308edc9544ddae94bfdcf83a4326b04a/_inductor/triton_ops/matmul.py |
cc5bd1e2-ee94-444e-9383-a3e3b726ce7c | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/hgrn/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BD': BD}, num_warps=num_warps) for
BD in [32, 64, 128] for num_warps in [1, 2, 4, 8]], key=['D'])
@triton.jit
def fused_recurrent_hgrn_fwd_kernel(x, g, o, h0, ht, offsets, T: tl.
constexpr, D: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.
constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr):
i_d, i_n = tl.program_id(0), tl.program_id(1)
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + bos * D + o_d
p_g = g + bos * D + o_d
p_o = o + bos * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_n * D + o_d
b_h += tl.load(p_h0, mask=mask, other=0).to(tl.float32)
for _ in range(0, T):
b_x = tl.load(p_x, mask=mask, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask)
p_x += D
p_g += D
p_o += D
if STORE_FINAL_STATE:
p_ht = ht + i_n * D + o_d
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/fused_recurrent.py |
abce0537-6e40-4d23-9771-998b073c6648 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _jagged_flash_attention_bwd_basic_kernel(q_ptr, k_ptr, v_ptr, o_ptr,
offset_ptr, dq_ptr, dk_ptr, dv_ptr, do_ptr, delta_ptr, lse_ptr,
stride_qm, stride_qd, stride_kn, stride_kd, stride_vn, stride_vd,
stride_om, stride_od, stride_dqm, stride_dqd, stride_dkn, stride_dkd,
stride_dvn, stride_dvd, stride_dom, stride_dod, max_seq_len, D: tl.
constexpr, use_mask: tl.constexpr, allow_tf32: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_D:
tl.constexpr):
pid_batch = tl.program_id(axis=1)
begin = tl.load(offset_ptr + pid_batch)
end = tl.load(offset_ptr + pid_batch + 1)
M = tl.minimum(end - begin, max_seq_len)
pid_n = tl.program_id(axis=0)
offs_d = tl.arange(0, BLOCK_SIZE_D)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_m = tl.arange(0, BLOCK_SIZE_M)
q_ptrs = q_ptr + begin * stride_qm + (offs_m[:, None] * stride_qm +
offs_d[None, :] * stride_qd)
k_ptrs = k_ptr + begin * stride_kn + (offs_n[:, None] * stride_kn +
offs_d[None, :] * stride_kd)
v_ptrs = v_ptr + begin * stride_vn + (offs_n[:, None] * stride_vn +
offs_d[None, :] * stride_vd)
do_ptrs = do_ptr + begin * stride_dom + (offs_m[:, None] * stride_dom +
offs_d[None, :] * stride_dod)
k = tl.load(k_ptrs, mask=(offs_d[None, :] < D) & (offs_n[:, None] < M))
v = tl.load(v_ptrs, mask=(offs_d[None, :] < D) & (offs_n[:, None] < M))
dv = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_D], dtype=tl.float32)
dk = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_D], dtype=tl.float32)
for begin_m in range(0, M, BLOCK_SIZE_M):
offs_m_temp = begin_m + offs_m
q = tl.load(q_ptrs, mask=(offs_d[None, :] < D) & (offs_m_temp[:,
None] < M))
qk = tl.dot(q, tl.trans(k), allow_tf32=allow_tf32)
mn_mask = (offs_m_temp[:, None] < M) & (offs_n[None, :] < M)
lse_i = tl.load(lse_ptr + offs_m_temp + begin, mask=offs_m_temp < M)
p = tl.exp(qk - lse_i[:, None])
p = tl.where(mn_mask, p, 0.0)
p /= max_seq_len
p_masked = p
attn_mask = None
if use_mask:
attn_mask = offs_m_temp[:, None] - offs_n[None, :]
attn_mask = tl.where(mn_mask, attn_mask, 0.0)
attn_mask = tl.where(attn_mask > 0, 0.0, 1.0)
p_masked = tl.where(attn_mask > 0, p, 0.0)
p_masked = p_masked.to(do_ptr.dtype.element_ty)
do = tl.load(do_ptrs, mask=(offs_d[None, :] < D) & (offs_m_temp[:,
None] < M))
dv += tl.dot(tl.trans(p_masked), do, allow_tf32=allow_tf32)
dp = tl.dot(do, tl.trans(v), allow_tf32=allow_tf32)
Di = tl.load(delta_ptr + offs_m_temp + begin, mask=offs_m_temp < M)
dp_masked = dp
if use_mask:
dp_masked = tl.where(attn_mask > 0, dp, 0.0)
ds = p * (dp_masked - Di[:, None] * max_seq_len)
ds = ds.to(q_ptr.dtype.element_ty)
dk += tl.dot(tl.trans(ds), q, allow_tf32=allow_tf32)
q_ptrs += BLOCK_SIZE_M * stride_qm
do_ptrs += BLOCK_SIZE_M * stride_dom
dk_ptrs = dk_ptr + begin * stride_dkn + (offs_n[:, None] * stride_dkn +
offs_d[None, :] * stride_dkd)
dv_ptrs = dv_ptr + begin * stride_dvn + (offs_n[:, None] * stride_dvn +
offs_d[None, :] * stride_dvd)
tl.store(dk_ptrs, dk, mask=(offs_d[None, :] < D) & (offs_n[:, None] < M))
tl.store(dv_ptrs, dv, mask=(offs_d[None, :] < D) & (offs_n[:, None] < M))
start_m = tl.program_id(axis=0) * BLOCK_SIZE_N
offs_m_curr = start_m + tl.arange(0, BLOCK_SIZE_N)
dq_ptrs_curr = dq_ptr + begin * stride_dqm + (offs_m_curr[:, None] *
stride_dqm + offs_d[None, :] * stride_dqd)
dq_curr = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_D], dtype=tl.float32)
q_ptrs_curr = q_ptr + begin * stride_qm + (offs_m_curr[:, None] *
stride_qm + offs_d[None, :] * stride_qd)
q_curr = tl.load(q_ptrs_curr, mask=(offs_d[None, :] < D) & (offs_m_curr
[:, None] < M))
lse_i_curr = tl.load(lse_ptr + offs_m_curr + begin, mask=offs_m_curr < M)
do_ptrs_curr = do_ptr + begin * stride_dom + (offs_m_curr[:, None] *
stride_dom + offs_d[None, :] * stride_dod)
do_curr = tl.load(do_ptrs_curr, mask=(offs_d[None, :] < D) & (
offs_m_curr[:, None] < M))
Di_curr = tl.load(delta_ptr + offs_m_curr + begin, mask=offs_m_curr < M)
block_start = 0
while block_start < M:
offs_n_curr = block_start + tl.arange(0, BLOCK_SIZE_M)
k_ptrs_curr = k_ptr + begin * stride_kn + (offs_n_curr[:, None] *
stride_kn + offs_d[None, :] * stride_kd)
v_ptrs_curr = v_ptr + begin * stride_vn + (offs_n_curr[:, None] *
stride_vn + offs_d[None, :] * stride_vd)
k_curr = tl.load(k_ptrs_curr, mask=(offs_d[None, :] < D) & (
offs_n_curr[:, None] < M))
v_curr = tl.load(v_ptrs_curr, mask=(offs_d[None, :] < D) & (
offs_n_curr[:, None] < M))
qk_curr = tl.dot(q_curr, tl.trans(k_curr), allow_tf32=allow_tf32)
mn_mask_curr = (offs_m_curr[:, None] < M) & (offs_n_curr[None, :] < M)
p_curr = tl.exp(qk_curr - lse_i_curr[:, None])
p_curr = tl.where(mn_mask_curr, p_curr, 0.0)
p_curr /= max_seq_len
dp_curr = tl.dot(do_curr, tl.trans(v_curr), allow_tf32=allow_tf32)
dp_curr_masked = dp_curr
if use_mask:
attn_mask = offs_m_curr[:, None] - offs_n_curr[None, :]
attn_mask = tl.where(mn_mask_curr, attn_mask, 0.0)
attn_mask = tl.where(attn_mask > 0, 0.0, 1.0)
dp_curr_masked = tl.where(attn_mask > 0, dp_curr, 0.0)
ds_curr = p_curr * (dp_curr_masked - Di_curr[:, None] * max_seq_len)
ds_curr = ds_curr.to(k_ptr.dtype.element_ty)
dq_curr += tl.dot(ds_curr, k_curr, allow_tf32=allow_tf32)
block_start += BLOCK_SIZE_M
tl.store(dq_ptrs_curr, dq_curr, mask=(offs_d[None, :] < D) & (
offs_m_curr[:, None] < M))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
617a9bbf-b4c6-4dab-8223-f52320eabcd6 | kernels.py | ShenzheZhu/sparse_autoencoder | sparse_autoencoder/kernels.py | afef049c905fda5b0f69729127ce0d3a42399152 | 0 | @triton.jit
def triton_mse_loss_fp16_kernel(output_ptr, target_ptr, out_ptr,
stride_a_output, stride_a_target, a, b, BLOCK_SIZE_B: tl.constexpr):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(output_ptr + pid * stride_a_output + offsets_b, mask=
offsets_b < b)
target = tl.load(target_ptr + pid * stride_a_target + offsets_b, mask=
offsets_b < b)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ShenzheZhu/sparse_autoencoder/blob/afef049c905fda5b0f69729127ce0d3a42399152/sparse_autoencoder/kernels.py |
00406477-a3a9-43aa-bcb6-92042cbffae0 | 04-low-memory-dropout.py | triton-lang/triton | python/tutorials/04-low-memory-dropout.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _dropout(x_ptr, x_keep_ptr, output_ptr, n_elements, p, BLOCK_SIZE: tl.
constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
x_keep = tl.load(x_keep_ptr + offsets, mask=mask)
output = tl.where(x_keep, x / (1 - p), 0.0)
tl.store(output_ptr + offsets, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/04-low-memory-dropout.py |
020e145a-16d9-4584-82d7-d36d740a0ca5 | stats.py | neuro-ml/kerops | kerops/kernels/stats.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _Stats_cl3d_impl(X_ptr, Mean_ptr, Sqmean_ptr, numel_no_channels,
num_channels: tl.constexpr, block_other: tl.constexpr):
pid = tl.program_id(0)
X_ptr += pid * block_other * num_channels
channels_offset = tl.arange(0, num_channels)
other_offset = tl.arange(0, block_other)
offset = other_offset[:, None] * num_channels + channels_offset[None, :]
mask = other_offset[:, None] < numel_no_channels - pid * block_other
x = tl.load(X_ptr + offset, mask=mask, other=0.0).to(tl.float32)
mean = tl.sum(x, axis=0) / numel_no_channels
sqmean = tl.sum(x * x, axis=0) / numel_no_channels
tl.atomic_add(Mean_ptr + channels_offset, mean)
tl.atomic_add(Sqmean_ptr + channels_offset, sqmean)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/stats.py |
60939418-875a-427c-b473-1f90ef624523 | gemm_preop_exp_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M':
256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4,
'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({
'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32
), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512,
'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages
=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N':
128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'},
num_stages=2, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel_with_block_pointers_batched(a_ptr, b_ptr, c_ptr, B: tl.
constexpr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_az:
tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr,
stride_bz: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.
constexpr, stride_cz: tl.constexpr, stride_cm: tl.constexpr, stride_cn:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr):
bid = tl.program_id(axis=0)
pid = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offset_a = bid.to(tl.int64) * stride_az
offset_b = bid.to(tl.int64) * stride_bz
a_block_ptr = tl.make_block_ptr(base=a_ptr + offset_a, shape=(M, K),
strides=(stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr + offset_b, shape=(K, N),
strides=(stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
a = a.to(tl.float32)
a = tl.math.exp(a)
a = a.to(tl.bfloat16)
b = tl.load(b_block_ptr, boundary_check=(0, 1))
accumulator += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
c = accumulator.to(tl.float32)
offset_c = bid.to(tl.int64) * stride_cz
c_block_ptr = tl.make_block_ptr(base=c_ptr + offset_c, shape=(M, N),
strides=(stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M,
pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N),
order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py |
b2ab2e7c-d4ec-466b-bffb-cb8427e67f7f | l2norm.py | sustcsonglin/flash-linear-attention | fla/modules/l2norm.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16, 32]], key=['N'])
@triton.jit
def l2norm_fwd_kernel(X, Y, stride_x_row, N, eps, BLOCK_N: tl.constexpr):
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_x_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0)
rstd = 1 / tl.sqrt(var + eps)
mask = cols < N
y = x * rstd
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/l2norm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.