uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
203cae00-18cd-4f73-9919-f4c8e964f077 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_K': b}, num_warps=w) for
b, w in itertools.product([2, 4, 16, 32, 128, 256], [2, 4, 8])], key=['N'])
@triton.jit
def triton_sum_kernel_2D_result_dim_1(input_ptr, output_ptr, M: tl.
constexpr, N: tl.constexpr, K: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr):
pid = tl.program_id(axis=0)
pid_m = pid // tl.cdiv(K, BLOCK_SIZE_K)
pid_k = pid % tl.cdiv(K, BLOCK_SIZE_K)
block_start_n = 0
block_start_k = pid_k * BLOCK_SIZE_K
offsets_n = block_start_n + tl.arange(0, BLOCK_SIZE_N)
offsets_k = block_start_k + tl.arange(0, BLOCK_SIZE_K)
mask_n = offsets_n < N
mask_k = offsets_k < K
idxs_base = offsets_n[:, None] * K + offsets_k
idxs = idxs_base + pid_m * N * K
mask = mask_n[:, None] & mask_k
input = tl.load(input_ptr + idxs, mask=mask, other=0)
output = tl.sum(input, axis=0)
output_offsets = pid_m * K + offsets_k
tl.store(output_ptr + output_offsets, output, mask=mask_k)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py |
683528ff-21dc-4e7c-be90-58e192bdd603 | gemm_a16w8.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/gemm_a16w8.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _triton_gemm_a16w8_sub_channel_kernel(A, B, C, scale_b, bias,
zero_points, M, N, K, stride_am, stride_ak, stride_bn, stride_bk,
stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek,
stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rbn[:, None] * stride_bn + rk[None, :] * stride_bk)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
scale_w_offs = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
_SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
k_remaining = K - k * (BLOCK_K * SPLIT_K)
_A0 = tl.zeros((1, 1), dtype=A.dtype.element_ty)
a = tl.load(A, mask=rk[None, :] < k_remaining, other=_A0)
_B0 = tl.zeros((1, 1), dtype=B.dtype.element_ty)
b = tl.load(B, mask=rk[None, :] < k_remaining, other=_B0)
if add_zero_points:
_ZERO_POINT0 = tl.zeros([1], dtype=zero_points.dtype.element_ty)
zero_points_offs = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
zero_points_ptrs = zero_points + (k * SPLIT_K + pid_z
) * stride_zpk + zero_points_offs
zero_points_vals = tl.load(zero_points_ptrs, mask=
zero_points_offs < N, other=_ZERO_POINT0)
b = b - zero_points_vals[:, None]
scale_ptrs = (scale_b + k * SPLIT_K * stride_scalek + pid_z *
stride_scalek + scale_w_offs)
scales = tl.load(scale_ptrs, mask=scale_w_offs < N, other=_SCALE0)
b_fp = b * scales[:, None]
b_fp = tl.trans(b_fp)
acc += tl.dot(a, b_fp, out_dtype=tl.float32, allow_tf32=True)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc = acc.to(C.dtype.element_ty)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
if add_bias:
offs_bias = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
bias_ptrs = bias + offs_bias
_BIAS0 = tl.zeros([1], dtype=bias.dtype.element_ty)
bias_vals = tl.load(bias_ptrs, mask=offs_bias < N, other=_BIAS0)
if pid_z == 0:
acc += bias_vals[None, :]
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w8.py |
c5831adf-3388-428d-9adb-9b92d80bed77 | layernorm.py | sustcsonglin/flash-linear-attention | fla/modules/layernorm.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_RESIDUAL', 'STORE_RESIDUAL_OUT',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_fwd_kernel(X, Y, W, B, RESIDUAL, RESIDUAL_OUT, Mean, Rstd,
stride_x_row, stride_y_row, stride_res_row, stride_res_out_row, N, G,
eps, IS_RMS_NORM: tl.constexpr, BLOCK_N: tl.constexpr, HAS_RESIDUAL: tl
.constexpr, STORE_RESIDUAL_OUT: tl.constexpr, HAS_WEIGHT: tl.constexpr,
HAS_BIAS: tl.constexpr):
row = tl.program_id(0)
group = row % G
X += row * stride_x_row
Y += row * stride_y_row
if HAS_RESIDUAL:
RESIDUAL += row * stride_res_row
if STORE_RESIDUAL_OUT:
RESIDUAL_OUT += row * stride_res_out_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
if HAS_RESIDUAL:
residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl
.float32)
x += residual
if STORE_RESIDUAL_OUT:
tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
if not IS_RMS_NORM:
mean = tl.sum(x, axis=0) / N
tl.store(Mean + row, mean)
xbar = tl.where(cols < N, x - mean, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
else:
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
tl.store(Rstd + row, rstd)
mask = cols < N
if HAS_WEIGHT:
w = tl.load(W + group * stride_x_row + cols, mask=mask).to(tl.float32)
if HAS_BIAS:
b = tl.load(B + group * stride_x_row + cols, mask=mask).to(tl.float32)
x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
y = x_hat * w if HAS_WEIGHT else x_hat
if HAS_BIAS:
y = y + b
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/layernorm.py |
ec15a21e-2872-4637-b17e-81c3c60d4e50 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for
num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=['BT'])
@triton.jit
def chunk_gsa_fwd_k_kernel_inter(q, k, h, g, o, A, offsets, indices, scale,
T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NG:
tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_o = tl.zeros([BT, BV], dtype=tl.float32)
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bg * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_bg * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (T, K), (HQ *
K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (
V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_o += tl.dot(b_q, b_h)
b_A += tl.dot(b_q, b_k)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (bos * HQ + i_hq) * V, (T, V), (HQ * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + (bos * HQ + i_hq) * BT, (T, BT), (HQ *
BT, 1), (i_t * BT, 0), (BT, BT), (1, 0))
b_g = tl.load(p_g, boundary_check=(0, 1))
b_o = b_o * tl.exp(b_g)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
b_A = tl.where(m_s, b_A, 0.0)
if i_v == 0:
tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
4b05f8aa-476d-4b90-b0cb-9c78955c0028 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def reduce_mlstm_triton(F_REDUCED_IN, F_REDUCED_OUT, C, N, NH: tl.constexpr,
D: tl.constexpr, NSB: tl.constexpr, BLOCK_SIZE: tl.constexpr):
bh_id = tl.program_id(0)
x_id = tl.program_id(1)
y_id = tl.program_id(2)
batch_id = bh_id // NH
head_id = bh_id % NH
nsb_range = tl.arange(0, NSB)
nsb_range_2d = tl.arange(0, NSB)[:, None]
nsb_range_3d = tl.arange(0, NSB)[:, None, None]
block_range = tl.arange(0, BLOCK_SIZE)
block_range_2d = block_range[None, :]
x_range = block_range + x_id * BLOCK_SIZE
x_range_3d = x_range[None, :, None]
y_range = block_range + y_id * BLOCK_SIZE
y_range_3d = y_range[None, None, :]
batch_offset_f = batch_id * NH * NSB + head_id * NSB
batch_offset_n = batch_id * NH * NSB * D + head_id * NSB * D
batch_offset_c = batch_id * NH * NSB * D * D + head_id * NSB * D * D
f = tl.load(F_REDUCED_IN + batch_offset_f + nsb_range)
c_range = (batch_offset_c + nsb_range_3d * D * D + x_range_3d * D +
y_range_3d)
c_mask = (nsb_range_3d < NSB) & (x_range_3d < D) & (y_range_3d < D)
c = tl.load(C + c_range, c_mask)
f_reduced, c = tl.associative_scan((tl.broadcast_to(f[:, None, None], (
NSB, BLOCK_SIZE, BLOCK_SIZE)), c), 0, scan_op)
tl.store(C + c_range, c, c_mask)
if x_id == 0 and y_id == 0:
f_range = batch_offset_f + nsb_range_3d + block_range[None, :, None
] * 0 + block_range[None, None, :] * 0
f_mask = (nsb_range_3d < NSB) & (block_range[None, :, None] == 0) & (
block_range[None, None, :] == 0)
tl.store(F_REDUCED_OUT + f_range, f_reduced, f_mask)
if x_id == 0:
n_range = (batch_offset_n + nsb_range_2d * D + block_range_2d +
y_id * BLOCK_SIZE)
n_mask = (nsb_range_2d < NSB) & (block_range_2d + y_id * BLOCK_SIZE < D
)
n = tl.load(N + n_range, n_mask)
_, n = tl.associative_scan((tl.broadcast_to(f[:, None], (NSB,
BLOCK_SIZE)), n), 0, scan_op)
tl.store(N + n_range, n, n_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
c775be82-a8e2-4755-9d15-d9d2fdf331a0 | mlstm_scan.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_scan.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def roll_op(a1, b1_last, b1_cur, a2, b2_last, b2_cur):
return a1 + a2, tl.where(a2 == 1, b1_cur, 0) + b2_last, b2_cur
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py |
723fee45-0116-4b18-9403-354cfe2b1f3a | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_2_softmax_backward_kernel(grad_output_ptr, softmax_output_ptr,
grad_input_ptr, offsets_row_ptr, offsets_col_ptr, offsets_overall_ptr,
grad_output_stride, softmax_output_stride, grad_input_stride, transpose,
max_seq_len_row: tl.constexpr, max_seq_len_col: tl.constexpr,
BLOCK_SIZE: tl.constexpr):
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
begin = tl.load(offsets_overall_ptr + pid_batch)
if transpose:
N = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
H = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
stride_n = H
stride_h = H // H
H = tl.minimum(max_seq_len_col, H)
N = tl.minimum(max_seq_len_row, N)
else:
N = tl.load(offsets_col_ptr + pid_batch + 1) - tl.load(
offsets_col_ptr + pid_batch)
H = tl.load(offsets_row_ptr + pid_batch + 1) - tl.load(
offsets_row_ptr + pid_batch)
stride_h = N
stride_n = N // N
H = tl.minimum(max_seq_len_row, H)
N = tl.minimum(max_seq_len_col, N)
if pid_head >= H:
return
if H == 0 or N == 0:
pass
start_ptr = grad_output_ptr + begin * grad_output_stride
offsets = tl.arange(0, BLOCK_SIZE)
grad_output_ptrs = (start_ptr + offsets * grad_output_stride * stride_n +
pid_head * grad_output_stride * stride_h)
softmax_output_ptrs = (softmax_output_ptr + begin *
softmax_output_stride + offsets * softmax_output_stride * stride_n +
pid_head * softmax_output_stride * stride_h)
grad_output_row = tl.load(grad_output_ptrs, mask=offsets < N, other=0.0)
softmax_output_row = tl.load(softmax_output_ptrs, mask=offsets < N,
other=0.0)
sum_value = tl.sum(grad_output_row * softmax_output_row, axis=0)
grad_input_row = (grad_output_row - sum_value) * softmax_output_row
grad_input_row_start_ptr = grad_input_ptr + begin * grad_input_stride
grad_input_ptrs = (grad_input_row_start_ptr + offsets *
grad_input_stride * stride_n + pid_head * grad_input_stride * stride_h)
tl.store(grad_input_ptrs, grad_input_row, mask=offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
59b8c65e-8a9d-4eb0-8a49-f36eda5a4381 | kl.py | ardywibowo/triton-mode | kernels/kl.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_kl_forward(y_pred_ptr, y_pred_stride, y_true_ptr, y_true_stride,
output_loss_ptr, output_loss_stride, num_classes, epsilon, BLOCK_SIZE:
tl.constexpr, log_target: tl.constexpr=False, reduction_mode: tl.
constexpr=REDUCE_BATCH_MEAN):
row_id = tl.program_id(0).to(tl.int64)
y_pred_ptr += row_id * y_pred_stride
y_true_ptr += row_id * y_true_stride
output_loss_ptr += row_id * output_loss_stride
base_offsets = tl.arange(0, BLOCK_SIZE)
loss_sum = 0.0
for i in range(0, num_classes, BLOCK_SIZE):
offsets = i + base_offsets
mask = offsets < num_classes
y_pred = tl.load(y_pred_ptr + offsets, mask=mask, other=0.0)
y_true = tl.load(y_true_ptr + offsets, mask=mask, other=0.0)
if not log_target:
loss = y_true * (tl.log(tl.maximum(y_true, epsilon)) - y_pred)
else:
loss = tl.exp(y_true) * (y_true - y_pred)
if reduction_mode == REDUCE_NONE:
tl.store(output_loss_ptr + offsets, loss, mask=mask)
else:
loss_sum += tl.sum(loss, axis=0)
if reduction_mode != REDUCE_NONE:
tl.store(output_loss_ptr, loss_sum)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/kl.py |
30740efe-faab-4c6a-a9da-b62c8d379e71 | test.py | Aalanli/AMDGPUExperiments | test.py | 2a6fd9e1e81d1916e3d87db4dda930e2fa417527 | 0 | @triton.jit
def test(at, bt, ct, k):
midx = tl.arange(0, 32)
kidx = tl.arange(0, 32)
nidx = tl.arange(0, 32)
aidx = midx[:, None] * 32 + kidx[None, :]
bidx = kidx[:, None] * 32 + nidx[None, :]
cidx = midx[:, None] * 32 + nidx[None, :]
a_ptrs = at + aidx
b_ptrs = bt + bidx
c_ptrs = ct + cidx
for i in range(k):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
x = tl.dot(a, b)
tl.atomic_add(c_ptrs, x)
a_ptrs += 32
b_ptrs += 32
c_ptrs += 32
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/Aalanli/AMDGPUExperiments/blob/2a6fd9e1e81d1916e3d87db4dda930e2fa417527/test.py |
f7119a9c-0bf5-469a-844f-759f16760205 | fused_rotary_emb.py | tascj/kaggle-lmsys-chatbot-arena | human_pref/inference/ops/fused_rotary_emb.py | 83cd93d50b9283c18711e8c63e4e1c6399c7b9ce | 0 | @wrap_jit_func(type_hint=dict(Q=Tensor, K=Tensor, PostionIds=Tensor,
InvFreq=Tensor, scaling_factor=float, OutQ=Tensor, OutK=Tensor,
stride_bq=int, stride_sq=int, stride_hq=int, stride_dq=int, stride_bk=
int, stride_sk=int, stride_hk=int, stride_dk=int, stride_bp=int,
stride_sp=int, max_seq_len=int, BLOCK=torch.int32, BLOCK_HQ=torch.int32,
BLOCK_HK=torch.int32, BLOCK_F=torch.int32))
@triton.jit
def _fused_rotary_emb_kernel(Q, K, PostionIds, InvFreq, scaling_factor,
OutQ, OutK, stride_bq, stride_sq, stride_hq: tl.constexpr, stride_dq:
tl.constexpr, stride_bk, stride_sk, stride_hk: tl.constexpr, stride_dk:
tl.constexpr, stride_bp, stride_sp, max_seq_len, BLOCK: tl.constexpr,
BLOCK_HQ: tl.constexpr, BLOCK_HK: tl.constexpr, BLOCK_F: tl.constexpr):
"""fused rotary emb kernel."""
batch_id = tl.program_id(0)
seq_block_id = tl.program_id(1)
s_off = seq_block_id * BLOCK + tl.arange(0, BLOCK)[:, None]
f_off = tl.arange(0, BLOCK_F)[None, :]
s_mask = s_off < max_seq_len
bp_off = stride_bp * batch_id
p_off = bp_off + stride_sp * s_off
sq_off = batch_id * stride_bq + s_off * stride_sq
q0_off = sq_off + f_off * stride_dq
q1_off = q0_off + BLOCK_F * stride_dq
sk_off = batch_id * stride_bk + s_off * stride_sk
k0_off = sk_off + f_off * stride_dk
k1_off = k0_off + BLOCK_F * stride_dk
inv_freq = tl.load(InvFreq + f_off).to(tl.float32)
position_ids = tl.load(PostionIds + p_off, mask=s_mask).to(tl.float32)
position_ids = position_ids / scaling_factor
pos_freq = position_ids * inv_freq
cos = tl.cos(pos_freq).to(Q.dtype.element_ty)
sin = tl.sin(pos_freq).to(Q.dtype.element_ty)
for h in range(BLOCK_HQ):
q0 = tl.load(Q + q0_off + h * stride_hq, mask=s_mask)
q1 = tl.load(Q + q1_off + h * stride_hq, mask=s_mask)
q0_out = q0 * cos - q1 * sin
tl.store(OutQ + q0_off + h * stride_hq, q0_out, mask=s_mask)
q1_out = q1 * cos + q0 * sin
tl.store(OutQ + q1_off + h * stride_hq, q1_out, mask=s_mask)
for h in range(BLOCK_HK):
k0 = tl.load(K + k0_off + h * stride_hk, mask=s_mask)
k1 = tl.load(K + k1_off + h * stride_hk, mask=s_mask)
k0_out = k0 * cos - k1 * sin
tl.store(OutK + k0_off + h * stride_hk, k0_out, mask=s_mask)
k1_out = k1 * cos + k0 * sin
tl.store(OutK + k1_off + h * stride_hk, k1_out, mask=s_mask)
| {
"Data Type": [
"fp32",
"fp16",
"bf16"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/tascj/kaggle-lmsys-chatbot-arena/blob/83cd93d50b9283c18711e8c63e4e1c6399c7b9ce/human_pref/inference/ops/fused_rotary_emb.py |
4efaee35-c75d-4f5c-8424-f258ebfd3ef4 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps) for BK in [32, 64] for BV in [64, 128] for num_warps in [2,
4, 8]], key=['BT'])
@triton.jit
def chunk_gla_fwd_kernel_o(q, v, g, h, o, A, offsets, indices, scale, T: tl
.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.
constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
m_s = tl.arange(0, BT)[:, None] >= tl.arange(0, BT)[None, :]
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (
V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_g = tl.load(p_g, boundary_check=(0, 1))
b_qg = (b_q * tl.exp(b_g)).to(b_q.dtype)
b_h = tl.load(p_h, boundary_check=(0, 1))
if i_k >= 0:
b_o += tl.dot(b_qg, b_h.to(b_qg.dtype))
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_A = tl.load(p_A, boundary_check=(0, 1))
b_A = tl.where(m_s, b_A, 0.0).to(b_v.dtype)
b_o += tl.dot(b_A, b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"bf16"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
2d261111-e9c4-4247-abba-85e8407ab595 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _multi_head_jagged_flash_attention_fwd_kernel(q_ptr, k_ptr, v_ptr,
offset_ptr, o_ptr, lse_i_ptr, stride_qh, stride_qm, stride_qd,
stride_kh, stride_kn, stride_kd, stride_vh, stride_vn, stride_vd,
stride_oh, stride_om, stride_od, stride_lse_h, num_heads: tl.constexpr,
max_seq_len: tl.constexpr, D: tl.constexpr, allow_tf32: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_D: tl.constexpr):
pid_m = tl.program_id(axis=0)
pid_bh = tl.program_id(axis=1)
pid_batch = pid_bh // num_heads
pid_head = pid_bh % num_heads
begin = tl.load(offset_ptr + pid_batch)
end = tl.load(offset_ptr + pid_batch + 1)
seqlen = end - begin
seqlen = tl.minimum(seqlen, max_seq_len)
if pid_m * BLOCK_M >= seqlen:
return
offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_D)
acc = tl.zeros([BLOCK_M, BLOCK_D], dtype=tl.float32)
mi = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
li = tl.zeros([BLOCK_M], dtype=tl.float32)
for j in range(0, seqlen, BLOCK_N):
offs_n = tl.arange(0, BLOCK_N) + j
q_ptrs = q_ptr + pid_head * stride_qh + begin * stride_qm + (offs_m
[:, None] * stride_qm + offs_d[None, :] * stride_qd)
k_ptrs = k_ptr + pid_head * stride_kh + begin * stride_kn + (offs_n
[None, :] * stride_kn + offs_d[:, None] * stride_kd)
qk = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for d in range(0, D, BLOCK_D):
curr_d = d + offs_d
q = tl.load(q_ptrs, mask=(curr_d[None, :] < D) & (offs_m[:,
None] < seqlen), other=0.0)
k = tl.load(k_ptrs, mask=(curr_d[:, None] < D) & (offs_n[None,
:] < seqlen), other=0.0)
qk += tl.dot(q, k, allow_tf32=allow_tf32)
q_ptrs += BLOCK_D * stride_qd
k_ptrs += BLOCK_D * stride_kd
mi_new = tl.maximum(tl.max(qk, axis=1), mi)
mn_mask = (offs_m[:, None] < seqlen) & (offs_n[None, :] < seqlen)
p = tl.exp(qk - mi_new[:, None])
p = tl.where(mn_mask, p, 0.0)
lij_hat = tl.sum(p, axis=1)
alpha = tl.exp(mi - mi_new)
li = alpha * li + lij_hat
acc = alpha[:, None] * acc
v_ptrs = v_ptr + pid_head * stride_vh + begin * stride_vn + (offs_d
[None, :] * stride_vd + offs_n[:, None] * stride_vn)
v = tl.load(v_ptrs, mask=(offs_d[None, :] < D) & (offs_n[:, None] <
seqlen), other=0.0)
p /= max_seq_len
p = p.to(v_ptr.dtype.element_ty)
acc += tl.dot(p, v, allow_tf32=allow_tf32)
mi = mi_new
lse_i = mi + tl.math.log(li)
lse_i_offsets = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
lse_i_ptrs = lse_i_ptr + pid_head * stride_lse_h + begin + lse_i_offsets
tl.store(lse_i_ptrs, lse_i, mask=lse_i_offsets < seqlen)
acc = acc / li[:, None]
o_ptrs = o_ptr + (pid_head * stride_oh + begin * stride_om + offs_m[:,
None] * stride_om + offs_d[None, :] * stride_od)
o_mask = (offs_m[:, None] < seqlen) & (offs_d[None, :] < D)
tl.store(o_ptrs, acc, mask=o_mask)
| {
"Data Type": [
"fp32",
"bf16"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings",
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
aa5c6a98-da4b-4b90-9be0-e4a00ab26b49 | mlstm_matmul.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_matmul.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def mlstm_matmul_kernel_backward_db(dH, Q, K, V, F, I, M, B, dB, NH: tl.
constexpr, S: tl.constexpr, D: tl.constexpr, SB: tl.constexpr):
bh_id = tl.program_id(0)
sb_id = tl.program_id(1)
batch_id = bh_id // NH
head_id = bh_id % NH
batch_offset_dh = batch_id * NH * S * D + head_id * S * D
batch_offset_f = batch_id * NH * S + head_id * S
offset_dh = tl.arange(0, SB) + sb_id * SB
offset_vk = tl.arange(0, SB) + sb_id * SB
d_range = tl.arange(0, D)
dh_range = batch_offset_dh + offset_dh[:, None] * D + d_range[None, :]
dh_mask = (offset_dh[:, None] < S) & (d_range[None, :] < D)
dh = tl.load(dH + dh_range, dh_mask)
q = tl.load(Q + dh_range, dh_mask)
m = tl.load(M + batch_offset_f + offset_dh, offset_dh < S)
f = tl.load(F + batch_offset_f + offset_dh, offset_dh < S)
f = tl.cumsum(tl.log(tl.sigmoid(f)))
scale = tl.sqrt(tl.full((1,), D, dtype=tl.float32))
dn_acc = tl.zeros((SB,), dtype=tl.float32)
for j in range(sb_id, -1, -1):
vk_range = batch_offset_dh + offset_vk[:, None] * D + d_range[None, :]
vk_mask = (offset_vk[:, None] < S) & (d_range[None, :] < D)
v = tl.load(V + vk_range, vk_mask)
f_next = tl.load(F + batch_offset_f + offset_vk, offset_vk < S)
i = tl.load(I + batch_offset_f + offset_vk, offset_vk < S)
f_next = tl.log(tl.sigmoid(f_next))
if j == sb_id:
f_next = tl.cumsum(f_next)
d = f[:, None] - f_next[None, :] + i[None, :]
mask = offset_dh[:, None] >= offset_vk[None, :]
d = tl.where(mask, d, -float('inf'))
else:
f += tl.sum(f_next)
f_next = tl.cumsum(f_next)
d = f[:, None] - f_next[None, :] + i[None, :]
d = tl.exp(d - m[:, None])
dc = matrix_mult(dh, tl.trans(v), SB)
k = tl.load(K + vk_range, vk_mask) / scale
c_tilde = matrix_mult(q, tl.trans(k), SB) * d
dn_acc += tl.sum(c_tilde * dc, 1)
offset_vk -= SB
b = tl.load(B + batch_offset_f + offset_dh, offset_dh < S)
n = tl.maximum(tl.abs(b), tl.exp(-m)) + 1e-06
dn = -dn_acc * (1 / tl.exp(tl.log(n) * 2.0))
db = sign(b) * dn * tl.where(tl.abs(b) > tl.exp(-m), 1.0, 0.0)
tl.store(dB + batch_offset_f + offset_dh, db, offset_dh < S)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py |
03082872-4113-4a3f-8432-9b3df03409c0 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK}, num_warps=num_warps,
num_stages=num_stages) for BK in [32, 64] for num_warps in [1, 2, 4, 8] for
num_stages in [2, 3, 4]], key=['BC'])
@triton.jit
def chunk_rwkv6_fwd_A_kernel_intra_sub_inter(q, k, gi, ge, A, offsets,
indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT:
tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, NC: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
i_i, i_j = i_c // NC, i_c % NC
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if i_t * BT + i_i * BC >= T:
return
if i_i <= i_j:
return
b_A = tl.zeros([BC, BC], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_gq = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (
i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))
p_gk = tl.make_block_ptr(gi + i_bh * T * K, (K, T), (1, K), (
i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))
p_gn = tl.max_contiguous(tl.multiple_of(gi + (i_bh * T + i_t *
BT + i_i * BC) * K + o_k, BK), BK)
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_gq = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))
p_gk = tl.make_block_ptr(gi + (bos * H + i_h) * K, (K, T), (1,
H * K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))
p_gn = tl.max_contiguous(tl.multiple_of(gi + (bos + i_t * BT +
i_i * BC) * H * K + i_h * K + o_k, BK), BK)
b_gn = tl.load(p_gn, mask=m_k, other=0)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_gq = tl.load(p_gq, boundary_check=(0, 1))
b_qg = b_q * tl.exp(b_gq - b_gn[None, :]) * scale
b_k = tl.load(p_k, boundary_check=(0, 1))
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_kg = b_k * tl.exp(b_gn[:, None] - b_gk)
b_A += tl.dot(b_qg, b_kg)
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
else:
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
tl.store(p_A, b_A.to(A.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py |
af45c3e7-e57c-45df-8d0a-061adf132ddc | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES, BLOCK_SIZES, NUM_WARPS, NUM_STAGES)],
key=['M'])
@triton.jit
def triton_jagged_sum_kernel_variable_length_loop_buffer_then_sum(
input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_ragged = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_ragged
), tl.load(input_ptr_offsets + (pid_ragged + 1))
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
buffer += tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer_sum = tl.sum(buffer, axis=0)
buffer_view = buffer_sum.reshape((BLOCK_SIZE_M,))
output_offsets = offsets_m + pid_ragged * M
output_mask = output_offsets < M * (pid_ragged + 1)
tl.store(output_ptr + output_offsets, buffer_view, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_sum/kernels.py |
7505340d-3573-43b3-becd-e287423981c1 | triton_flash_attention.py | IBM/vllm | vllm/attention/ops/triton_flash_attention.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def load_fn(block_ptr, first, second, pad):
if first and second:
tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad)
elif first:
tensor = tl.load(block_ptr, boundary_check=(0,), padding_option=pad)
elif second:
tensor = tl.load(block_ptr, boundary_check=(1,), padding_option=pad)
else:
tensor = tl.load(block_ptr)
return tensor
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/triton_flash_attention.py |
feab1add-a03a-45e1-9a8b-92ffbf911c83 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/based/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def parallel_based_fwd_kernel(q, k, v, o, z, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BTL: tl.constexpr, BTS: tl.constexpr,
BK: tl.constexpr, BV: tl.constexpr):
i_kv, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
NV = tl.cdiv(V, BV)
i_k = i_kv // NV
i_v = i_kv % NV
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTL, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, 0), (BK, BTS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (0,
i_v * BV), (BTS, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_o = tl.zeros([BTL, BV], dtype=tl.float32)
b_z = tl.zeros([BTL], dtype=tl.float32)
for _ in range(0, i_c * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = 1 + b_s + 0.5 * b_s * b_s
b_z += tl.sum(b_s, axis=1)
b_o = b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
p_k = tl.advance(p_k, (0, BTS))
p_v = tl.advance(p_v, (BTS, 0))
tl.debug_barrier()
o_q = tl.arange(0, BTL)
o_k = tl.arange(0, BTS)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, i_c * BTL), (BK, BTS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_c *
BTL, i_v * BV), (BTS, BV), (1, 0))
for _ in range(i_c * BTL, (i_c + 1) * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = 1 + b_s + 0.5 * b_s * b_s
b_s = tl.where(m_s, b_s, 0)
b_z += tl.sum(b_s, axis=1)
b_o += tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
p_k = tl.advance(p_k, (0, BTS))
p_v = tl.advance(p_v, (BTS, 0))
o_k += BTS
p_o = tl.make_block_ptr(o + (i_bh + B * H * i_k) * s_v_h, (T, V), (
s_v_t, s_v_d), (i_c * BTL, i_v * BV), (BTL, BV), (1, 0))
p_z = z + (i_bh + B * H * i_k) * T + i_c * BTL + tl.arange(0, BTL)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_z, b_z.to(p_z.dtype.element_ty), mask=i_c * BTL + tl.arange(
0, BTL) < T)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/parallel.py |
84ed0301-81a1-45f6-b51b-ce832f2a056f | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/simple_gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_simple_gla_bwd_kernel_dqkg(q, k, v, h, g, do, dh, dq, dk, dg,
offsets, indices, scale, B: tl.constexpr, T: tl.constexpr, H: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
all = B * T
o_i = tl.arange(0, BT)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,
), (0,))
b_g_last = tl.load(g + i_bh * T + min(i_t * BT + BT, T) - 1)
else:
p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,),
(BT,), (0,))
b_g_last = tl.load(g + (bos + min(i_t * BT + BT, T) - 1) * H + i_h)
b_g = tl.load(p_g, boundary_check=(0,))
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_ds = tl.zeros([BT, BT], dtype=tl.float32)
b_dg = tl.zeros([BT], dtype=tl.float32)
b_dg_last = tl.zeros([1], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dg_last += tl.sum(b_h * b_dh)
b_ds += tl.dot(b_do, tl.trans(b_v))
b_dq += tl.dot(b_do, b_h.to(b_do.dtype))
b_dk += tl.dot(b_v, b_dh.to(b_v.dtype))
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + (i_k * B * H + i_bh) * T, (T,), (1,),
(i_t * BT,), (BT,), (0,))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BK, BT), (0, 1))
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + (i_k * all + bos) * H + i_h, (T,), (H
,), (i_t * BT,), (BT,), (0,))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dg_last *= tl.exp(b_g_last)
b_dq = b_dq * tl.exp(b_g)[:, None] * scale
b_dk = b_dk * tl.exp(-b_g + b_g_last)[:, None]
b_dg_last += tl.sum(b_dk * b_k)
b_ds = tl.where(o_i[:, None] >= o_i[None, :], b_ds * scale * tl.exp(b_g
[:, None] - b_g[None, :]), 0)
b_ds = b_ds.to(b_k.dtype)
b_dq += tl.dot(b_ds, b_k)
b_dk += tl.dot(tl.trans(b_ds), b_q)
b_dg += tl.sum(b_q * b_dq - b_k * b_dk, axis=1)
b_dg = tl.where(o_i < min(BT, T - i_t * BT) - 1, b_dg, b_dg + b_dg_last)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/chunk.py |
35265897-fe76-4437-a48d-a82641778823 | hilbert.py | Kitsunetic/space-filling-pytorch | space_filling_pytorch/functional/hilbert.py | 0de955ad1036973ee7506c5a0124c208acec722d | 0 | @triton.jit
def _encode_hilbert_kernel(xyz_ptr, code_ptr, B, N, space_size, x_offset,
y_offset, z_offset, str_xyz_B, str_xyz_N, str_xyz_C, BLK: tl.constexpr,
ASSIGN_BATCH_INDEX: tl.constexpr):
pid_b = tl.program_id(0)
pid_n = tl.program_id(1)
offs_n = pid_n * BLK + tl.arange(0, BLK)
mask_n = offs_n < N
xyz_ptrs = xyz_ptr + pid_b * str_xyz_B + offs_n * str_xyz_N
fx = tl.load(xyz_ptrs + x_offset * str_xyz_C, mask=mask_n)
fy = tl.load(xyz_ptrs + y_offset * str_xyz_C, mask=mask_n)
fz = tl.load(xyz_ptrs + z_offset * str_xyz_C, mask=mask_n)
ret = _calculate_hilbert_distance(fx, fy, fz, space_size)
if ASSIGN_BATCH_INDEX:
ret |= pid_b.to(tl.int64) << 48
tl.store(code_ptr + pid_b * N + offs_n, ret, mask=mask_n)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/hilbert.py |
f7e0aaee-d0e9-4cb0-8788-587e3a9bf44c | triton_flash_attention.py | IBM/vllm | vllm/attention/ops/triton_flash_attention.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m,
actual_seqlen_k, dropout_p, philox_seed, batch_philox_offset,
encoded_softmax_block_ptr, block_min, block_max, offs_n_causal,
masked_blocks, n_extra_tokens, bias_ptr, IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.
constexpr, OFFS_M: tl.constexpr, OFFS_N: tl.constexpr, PRE_LOAD_V: tl.
constexpr, MASK_STEPS: tl.constexpr, ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr, PADDED_HEAD: tl.constexpr):
for start_n in range(block_min, block_max, BLOCK_N):
k = load_fn(K_block_ptr, PADDED_HEAD, MASK_STEPS and n_extra_tokens !=
0, 'zero')
if PRE_LOAD_V:
v = load_fn(V_block_ptr, MASK_STEPS and n_extra_tokens != 0,
PADDED_HEAD, 'zero')
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if MASK_STEPS:
if start_n + BLOCK_N == block_max and n_extra_tokens != 0:
boundary_m = tl.full([BLOCK_M], actual_seqlen_k, dtype=tl.int32
)
size_n = start_n + OFFS_N[None, :]
mask = size_n < boundary_m[:, None]
qk = tl.where(mask, qk, float('-inf'))
if IS_CAUSAL:
causal_boundary = start_n + offs_n_causal
causal_mask = OFFS_M[:, None] >= causal_boundary[None, :]
qk = tl.where(causal_mask, qk, float('-inf'))
qk += tl.dot(q, k)
if bias_ptr is not None:
bias = load_fn(bias_ptr, False, MASK_STEPS and n_extra_tokens !=
0, 'zero')
qk += bias * 1.44269504089
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
if ENABLE_DROPOUT:
philox_offset = (batch_philox_offset + start_m * BLOCK_M *
actual_seqlen_k + start_n - BLOCK_N)
keep = dropout_mask(philox_seed, philox_offset, dropout_p,
BLOCK_M, BLOCK_N, actual_seqlen_k)
if RETURN_ENCODED_SOFTMAX:
tl.store(encoded_softmax_block_ptr, tl.where(keep, p, -p).
to(encoded_softmax_block_ptr.type.element_ty))
p = tl.where(keep, p, 0.0)
elif RETURN_ENCODED_SOFTMAX:
tl.store(encoded_softmax_block_ptr, p.to(
encoded_softmax_block_ptr.type.element_ty))
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
if not PRE_LOAD_V:
v = load_fn(V_block_ptr, MASK_STEPS and n_extra_tokens != 0,
PADDED_HEAD, 'zero')
l_i = l_i * alpha + l_ij
m_i = m_ij
acc += tl.dot(p.to(V_block_ptr.type.element_ty), v)
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr,
(0, BLOCK_N))
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/triton_flash_attention.py |
49a964a1-bf73-4726-98df-82a3e693bd2f | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=MATMUL_CONFIGS + [Config({'BLOCK_M': 128,
'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=
8)], key=['m_key', 'n_key', 'k_key'])
@triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] *
args['SPLIT_K']) == 0})
@triton.jit
def _kernel_matmul_fp8_row_no_fast_acc(A_ptr, B_ptr, C_ptr, M, N, K, m_key,
n_key, k_key, A_scale, B_scale, Bias, stride_am, stride_ak, stride_bn,
stride_bk, stride_cm, stride_cn, dot_out_dtype: tl.constexpr,
allow_tf32: tl.constexpr, fp8_fast_accum: tl.constexpr, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.
constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, USE_BIAS: tl.
constexpr, AB_DTYPE: tl.constexpr, NUM_SMS: tl.constexpr) ->None:
"""Matmul kernel of [M, K] @ [N, K] with row-wise scales
performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles.
Args:
A (TensorWrapper): [M, K] input tensor.
B (TensorWrapper): [N, K] input tensor.
C (TensorWrapper): [M, N] output tensor.
M (int): M dimension of input tensor.
N (int): N dimension of input tensor.
K (int): K dimension of input tensor.
m_key (int): Autotuning key for M dimension of input tensor.
n_key (int): Autotuning key for N dimension of input tensor.
k_key (int): Autotuning key for K dimension of input tensor.
A_scale (TensorWrapper): [M] reciprocal scale tensor per row. A * A_scale = original A
B_scale (TensorWrapper): [N] reciprocal scale tensor per row. B * B_scale = original B
Bias (TensorWrapper): [N] Optional bias tensor.
stride_am (int): Stride of M dimension of A.
stride_ak (int): Stride of K dimension of A.
stride_bn (int): Stride of N dimension of B.
stride_bk (int): Stride of K dimension of B.
stride_cm (int): Stride of M dimension of C.
stride_cn (int): Stride of N dimension of C.
dot_out_dtype (torch.dtype): Output type of tensor core.
allow_tf32 (bool): Whether to use TF32 for tensor core.
fp8_fast_accum (bool): Whether to use fast accumulation for tensor core.
BLOCK_M (int): Block size for M dimension.
BLOCK_N (int): Block size for N dimension.
BLOCK_K (int): Block size for K dimension.
GROUP_M (int): Number of groups for M dimension swizzle.
SPLIT_K (int): Number of SM's to launch per row.
EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K.
USE_BIAS(bool): Whether to use bias.
AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core.
"""
start_pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M)
num_pid_n = tl.cdiv(N, BLOCK_N)
k_tiles = tl.cdiv(K, BLOCK_K)
num_tiles = num_pid_m * num_pid_n
tiles_per_SM = num_tiles // NUM_SMS
if start_pid < num_tiles % NUM_SMS:
tiles_per_SM += 1
tile_id = start_pid - NUM_SMS
ki = -1
offs_k_for_mask = tl.arange(0, BLOCK_K)
num_pid_in_group = GROUP_M * num_pid_n
pid_m = 0
pid_n = 0
offs_am = tl.arange(0, BLOCK_M)
offs_bn = tl.arange(0, BLOCK_N)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
for _ in range(0, k_tiles * tiles_per_SM):
ki = tl.where(ki == k_tiles - 1, 0, ki + 1)
if ki == 0:
tile_id += NUM_SMS
group_id = tile_id // num_pid_in_group
first_pid_m = group_id * GROUP_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_M)
pid_m = first_pid_m + tile_id % group_size_m
pid_n = tile_id % num_pid_in_group // group_size_m
start_m = pid_m * BLOCK_M
start_n = pid_n * BLOCK_N
offs_am = start_m + tl.arange(0, BLOCK_M)
offs_bn = start_n + tl.arange(0, BLOCK_N)
offs_am = tl.where(offs_am < M, offs_am, 0)
offs_bn = tl.where(offs_bn < N, offs_bn, 0)
offs_am = tl.max_contiguous(tl.multiple_of(offs_am, BLOCK_M),
BLOCK_M)
offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn, BLOCK_N),
BLOCK_N)
offs_k = ki * BLOCK_K + tl.arange(0, BLOCK_K)
A = A_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak
)
B = B_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn
)
a = tl.load(A, mask=offs_k_for_mask[None, :] < K - ki * BLOCK_K,
other=0.0)
b = tl.load(B, mask=offs_k_for_mask[:, None] < K - ki * BLOCK_K,
other=0.0)
acc += tl.dot(a, b, out_dtype=dot_out_dtype, allow_tf32=allow_tf32)
if ki == k_tiles - 1:
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
a_scale = tl.load(A_scale + rm, mask=rm < M)
b_scale = tl.load(B_scale + rn, mask=rn < N)
scale = a_scale[:, None] * b_scale[None, :]
acc *= scale
if USE_BIAS:
bias = tl.load(Bias + rn, mask=rn < N)
acc += bias[None, :]
acc = acc.to(C_ptr.dtype.element_ty)
C = C_ptr + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc, mask=mask)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
1c6094ba-7870-44ef-a8cd-0333dd051641 | activations.py | sustcsonglin/flash-linear-attention | fla/modules/activations.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['D'])
@triton.jit
def logsigmoid_fwd_kernel(x, y, temperature, T: tl.constexpr, D: tl.
constexpr, B: tl.constexpr):
i = tl.program_id(0)
o_i = i * B + tl.arange(0, B)
m_i = o_i < T
b_x = tl.load(x + o_i, mask=m_i, other=0.0).to(tl.float32)
b_m = tl.minimum(0.0, b_x)
b_z = 1.0 + tl.exp(-tl.abs(b_x))
b_y = (b_m - tl.log(b_z)) / temperature
tl.store(y + o_i, b_y.to(y.dtype.element_ty), mask=m_i)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/activations.py |
7de4e638-bcf1-4f0d-ae73-3e9eb9ad0241 | transposable_semi_structured.py | huyz2023/2by4-pretrain | sparse/transposable_semi_structured.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _to_transposable_sparse_semi_structured_kernel(dense_ptr, sparse_ptr,
mask_raw_ptr, mask_ptr, dense_row_stride, dense_col_stride,
sparse_row_stride, sparse_col_stride, mask_raw_row_stride,
mask_raw_col_stride, mask_row_stride, mask_col_stride, m, k, n, abs,
BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0) * 32 + (tl.arange(0, 128) // 16 * 4)[None, :
] + (tl.arange(0, 16) // 4)[:, None]
col_idx = tl.program_id(1) * 64 + (tl.arange(0, 128) % 16 * 4)[None, :] + (
tl.arange(0, 16) % 4)[:, None]
dense = tl.load(dense_ptr + row_idx * dense_row_stride + col_idx *
dense_col_stride)
mask_raw = tl.load(mask_raw_ptr + tl.arange(0, 16)[None, :] *
mask_raw_col_stride + tl.arange(0, BLOCK_SIZE)[:, None] *
mask_raw_row_stride, mask=tl.arange(0, BLOCK_SIZE)[:, None] < n,
other=0)
sum = tl.dot(mask_raw, tl.abs(dense)) if abs else tl.dot(mask_raw, dense)
sum = tl.where(tl.arange(0, BLOCK_SIZE)[:, None] < n, sum, -float('inf'))
max = tl.argmax(sum, 0)
mask_idx = max[None, :] * 16 + tl.arange(0, 16)[:, None]
mask = tl.load(mask_raw_ptr + mask_idx).to(tl.int1)
tl.store(sparse_ptr + row_idx * sparse_row_stride + col_idx *
sparse_col_stride, dense, mask=mask)
tl.store(mask_ptr + row_idx * mask_row_stride + col_idx *
mask_col_stride, mask)
| {
"Data Type": [],
"Functionality": [
"Top-K Selection",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/transposable_semi_structured.py |
7b3ef975-67fb-4aaf-98b2-e7dc5ae1976a | prefix_prefill.py | IBM/vllm | vllm/attention/ops/prefix_prefill.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _fwd_kernel_flash_attn_v2(Q, K, V, K_cache, V_cache, B_Loc, sm_scale,
B_Start_Loc, B_Seqlen, B_Ctxlen, block_size, x, Out, stride_b_loc_b,
stride_b_loc_s, stride_qbs, stride_qh, stride_qd, stride_kbs, stride_kh,
stride_kd, stride_vbs, stride_vh, stride_vd, stride_obs, stride_oh,
stride_od, stride_k_cache_bs, stride_k_cache_h, stride_k_cache_d,
stride_k_cache_bl, stride_k_cache_x, stride_v_cache_bs,
stride_v_cache_h, stride_v_cache_d, stride_v_cache_bl,
num_queries_per_kv: int, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.
constexpr, BLOCK_N: tl.constexpr):
cur_batch = tl.program_id(0)
cur_head = tl.program_id(1)
start_m = tl.program_id(2)
cur_kv_head = cur_head // num_queries_per_kv
cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch)
cur_batch_seq_len = tl.load(B_Seqlen + cur_batch)
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
block_start_loc = BLOCK_M * start_m
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_q = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_qbs + cur_head * stride_qh + offs_d[None, :] * stride_qd
q = tl.load(Q + off_q, mask=offs_m[:, None] < cur_batch_seq_len -
cur_batch_ctx_len, other=0.0)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
for start_n in range(0, cur_batch_ctx_len, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
bn = tl.load(B_Loc + cur_batch * stride_b_loc_b + (start_n + offs_n
) // block_size * stride_b_loc_s, mask=start_n + offs_n <
cur_batch_ctx_len, other=0)
off_k = bn[None, :
] * stride_k_cache_bs + cur_kv_head * stride_k_cache_h + offs_d[
:, None] // x * stride_k_cache_d + (start_n + offs_n[None, :]
) % block_size * stride_k_cache_bl + offs_d[:, None
] % x * stride_k_cache_x
off_v = bn[:, None
] * stride_v_cache_bs + cur_kv_head * stride_v_cache_h + offs_d[
None, :] * stride_v_cache_d + (start_n + offs_n[:, None]
) % block_size * stride_v_cache_bl
k = tl.load(K_cache + off_k, mask=start_n + offs_n[None, :] <
cur_batch_ctx_len, other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk = tl.where(start_n + offs_n[None, :] < cur_batch_ctx_len, qk,
float('-inf'))
qk *= sm_scale
m_ij = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, m_ij)
p = tl.math.exp(qk - m_i_new[:, None])
l_ij = tl.sum(p, 1)
alpha = tl.math.exp(m_i - m_i_new)
l_i_new = alpha * l_i + l_ij
acc_scale = alpha
acc = acc * acc_scale[:, None]
v = tl.load(V_cache + off_v, mask=start_n + offs_n[:, None] <
cur_batch_ctx_len, other=0.0)
p = p.to(v.dtype)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
off_k = offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + offs_d[
:, None] * stride_kd
off_v = offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + offs_d[
None, :] * stride_vd
k_ptrs = K + off_k
v_ptrs = V + off_v
block_mask = tl.where(block_start_loc < cur_batch_seq_len -
cur_batch_ctx_len, 1, 0)
for start_n in range(0, block_mask * (start_m + 1) * BLOCK_M, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(k_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_kbs, mask=start_n + offs_n[None, :] < cur_batch_seq_len -
cur_batch_ctx_len, other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk *= sm_scale
qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk,
float('-inf'))
m_ij = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, m_ij)
p = tl.math.exp(qk - m_i_new[:, None])
l_ij = tl.sum(p, 1)
alpha = tl.math.exp(m_i - m_i_new)
l_i_new = alpha * l_i + l_ij
acc_scale = alpha
acc = acc * acc_scale[:, None]
v = tl.load(v_ptrs + (cur_batch_in_all_start_index + start_n) *
stride_vbs, mask=start_n + offs_n[:, None] < cur_batch_seq_len -
cur_batch_ctx_len, other=0.0)
p = p.to(v.dtype)
acc += tl.dot(p, v)
l_i = l_i_new
m_i = m_i_new
off_o = (cur_batch_in_all_start_index + offs_m[:, None]
) * stride_obs + cur_head * stride_oh + offs_d[None, :] * stride_od
out_ptrs = Out + off_o
tl.store(out_ptrs, acc, mask=offs_m[:, None] < cur_batch_seq_len -
cur_batch_ctx_len)
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/prefix_prefill.py |
6d49d736-8ea9-4cd8-ba62-bcb2f44902ea | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/abc/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def chunk_abc_bwd_kernel_intra_K(v, z, do, dA, s_v_h, s_v_t, s_v_d, scale,
T: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr,
BV: tl.constexpr, NC: tl.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_t, i_i, i_j = i_c // (NC * NC), i_c % (NC * NC) // NC, i_c % (NC * NC
) % NC
n_bh = tl.num_programs(2)
if i_i > i_j:
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (
i_v * BV, i_t * BT + i_j * BC), (BV, BC), (0, 1))
p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_zn = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), ((
i_t * BT + i_i * BC) * V + i_v * BV,), (BV,), (0,))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_dA = tl.make_block_ptr(dA + (i_bh + i_v * n_bh) * T * BT, (T, BT),
(BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
b_zn = tl.load(p_zn, boundary_check=(0,))
b_z = tl.load(p_z, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_zn[None, :] - b_z) * scale).to(b_do.dtype)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_v = tl.exp(b_v - b_zn[:, None]).to(b_v.dtype)
b_dA = tl.dot(b_do, b_v, allow_tf32=False)
tl.store(p_dA, b_dA.to(dA.dtype.element_ty), boundary_check=(0, 1))
elif i_i == i_j:
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T * V,), (s_v_d,), ((i_t *
BT + i_j * BC) * V + i_v * BV,), (BV,), (0,))
p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
b_z = tl.load(p_z, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)) * scale
o_i = tl.arange(0, BC)
o_A = (i_bh + i_v * n_bh) * T * BT + (i_t * BT + i_i * BC + tl.
arange(0, BC)) * BT + i_j * BC
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
for j in range(0, BC):
b_v = tl.load(p_v, boundary_check=(0,)).to(tl.float32)
b_dA = tl.sum(b_do * tl.exp(b_v[None, :] - b_z), 1)
b_dA = tl.where(o_i >= j, b_dA, 0)
tl.store(dA + o_A + j, b_dA.to(b_do.dtype), mask=m_A)
p_v = tl.advance(p_v, (V,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py |
abbc7553-4ca9-4830-a5ff-405e42b5cd6b | rwkv_log.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_log.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def logsubexp(a, b, log_eps: tl.constexpr):
max_ab = tl.maximum(tl.maximum(a, b), log_eps)
return max_ab + tl.log(tl.exp(a - max_ab) - tl.exp(b - max_ab))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py |
a1e6cbbe-2b59-4e14-81c5-dd969b3c4059 | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsTmaWSPersistent)), key=['N_CTX'])
@triton.jit
def _attn_fwd_tma_ws_persistent(Q, K, V, sm_scale, M, Out, desc_q, desc_k,
desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr,
STAGE: tl.constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.
constexpr, ENABLE_WS: tl.constexpr, GRID_MULTIPLE: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
n_tile_num = tl.cdiv(N_CTX, BLOCK_M)
prog_id = tl.program_id(0)
num_progs = tl.num_programs(0)
total_tiles = n_tile_num * Z * H
tiles_per_sm = total_tiles // num_progs
if prog_id < total_tiles % num_progs:
tiles_per_sm += 1
tile_idx = prog_id
for _ in range(0, tiles_per_sm):
pid = tile_idx % n_tile_num
off_hz = tile_idx // n_tile_num
_attn_fwd_compute_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k,
desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk, stride_vz,
stride_vh, stride_vk, stride_vn, stride_oz, stride_oh,
stride_om, stride_on, off_hz, pid, Z, H, N_CTX, BLOCK_M,
BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA, LOOP_SCHEDULE)
tile_idx += num_progs
| {
"Data Type": [],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
4e993c1c-6857-48f0-95ea-0de66ed768a8 | avgpool.py | neuro-ml/kerops | kerops/kernels/avgpool.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _AvgPoolCeilStats_cl3d_backward_impl(Inpgrad_ptr, Outgrad_ptr,
Output_ptr, Meangrad_ptr, Sqmeangrad_ptr, h_outgrad, w_outgrad,
d_outgrad, d_inpgrad, batch_stride_outgrad, H_stride_outgrad,
W_stride_outgrad, batch_stride_inpgrad, H_stride_inpgrad,
W_stride_inpgrad, numel_no_channels_inpgrad, num_channels: tl.constexpr,
almost_half_d: tl.constexpr):
batch = tl.program_id(0)
H = tl.program_id(1)
W = tl.program_id(2)
Inpgrad_ptr += (batch * batch_stride_inpgrad + H * H_stride_inpgrad + W *
W_stride_inpgrad)
Output_ptr += (batch * batch_stride_inpgrad + H * H_stride_inpgrad + W *
W_stride_inpgrad)
pair_offset = tl.arange(0, 2)
channels_offset = tl.arange(0, num_channels)
d_offset = tl.arange(0, almost_half_d)
inpgrad_offset = d_offset[:, None, None] * num_channels + channels_offset[
None, :, None]
outgrad_offset = d_offset[:, None, None] * (2 * num_channels
) + channels_offset[None, :, None] + pair_offset[None, None, :
] * num_channels
inpgrad_mask = d_offset[:, None, None] < d_inpgrad
outgrad_mask = d_offset[:, None, None] * 2 + pair_offset[None, None, :
] < d_outgrad
inpgrad = tl.load(Inpgrad_ptr + inpgrad_offset, mask=inpgrad_mask,
other=0.0)
output = tl.load(Output_ptr + inpgrad_offset, mask=inpgrad_mask, other=0.0)
meangrad = tl.load(Meangrad_ptr + channels_offset)[None, :, None]
sqmeangrad = tl.load(Sqmeangrad_ptr + channels_offset)[None, :, None]
normalizer = tl.sum(outgrad_mask.to(tl.float16), axis=2)[:, :, None].to(tl
.float16)
W_skip = False
if 2 * (W + 1) > w_outgrad:
W_skip = True
else:
normalizer *= 2
H_skip = False
if 2 * (H + 1) > h_outgrad:
H_skip = True
else:
normalizer *= 2
meangrad = meangrad / numel_no_channels_inpgrad
sqmeangrad = 2 * output.to(tl.float32
) * sqmeangrad / numel_no_channels_inpgrad
grad = (inpgrad + meangrad + sqmeangrad) / normalizer
Tmp_ptr = (Outgrad_ptr + batch * batch_stride_outgrad + 2 * H *
H_stride_outgrad + 2 * W * W_stride_outgrad)
tl.store(Tmp_ptr + outgrad_offset, grad, mask=outgrad_mask)
if not W_skip:
Tmp_ptr = (Outgrad_ptr + batch * batch_stride_outgrad + 2 * H *
H_stride_outgrad + (2 * W + 1) * W_stride_outgrad)
tl.store(Tmp_ptr + outgrad_offset, grad, mask=outgrad_mask)
if not H_skip:
Tmp_ptr = Outgrad_ptr + batch * batch_stride_outgrad + (2 * H + 1
) * H_stride_outgrad + 2 * W * W_stride_outgrad
tl.store(Tmp_ptr + outgrad_offset, grad, mask=outgrad_mask)
if not H_skip and not W_skip:
Tmp_ptr = Outgrad_ptr + batch * batch_stride_outgrad + (2 * H + 1
) * H_stride_outgrad + (2 * W + 1) * W_stride_outgrad
tl.store(Tmp_ptr + outgrad_offset, grad, mask=outgrad_mask)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/avgpool.py |
d2863f4d-a943-457c-8fe8-8bab49d63b2d | chunk_h_parallel.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h_parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_FINAL_STATE': lambda args: args['ht'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32,
64, 128] for num_warps in [2, 4, 8, 16] for num_stages in [2, 3]], key=
['BT', 'USE_G', 'USE_GK', 'USE_GV'])
@triton.jit
def chunk_fwd_kernel_h_reduction(h, g, gk, gv, kvt, ht, offsets,
chunk_offsets, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl
.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_G:
tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl.constexpr,
STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_h = tl.zeros([BK, BV], dtype=tl.float32)
for i_t in range(NT):
if HEAD_FIRST:
p_h = tl.make_block_ptr(h + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_h = tl.make_block_ptr(h + ((boh + i_t) * H + i_h) * K * V, (K,
V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_h += tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
if i_t > 0:
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
last_idx = min(i_t * BT + BT, T) - 1
if USE_G:
if HEAD_FIRST:
b_g_last = tl.load(g + i_nh * T + last_idx)
else:
b_g_last = tl.load(g + bos * H + last_idx * H + i_h)
b_h *= tl.exp(b_g_last)
if USE_GK:
if HEAD_FIRST:
p_gk_last = (gk + i_nh * T * K + last_idx * K + i_k * BK +
tl.arange(0, BK))
else:
p_gk_last = gk + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) <
K, other=0.0)
b_h *= tl.exp(b_gk_last)[:, None]
if USE_GV:
if HEAD_FIRST:
p_gv_last = (gv + i_nh * T * V + last_idx * V + i_v * BV +
tl.arange(0, BV))
else:
p_gv_last = gv + (bos + last_idx
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV)
b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) <
V, other=0.0)
b_h *= tl.exp(b_gv_last)[None, :]
if STORE_FINAL_STATE:
p_kvt = tl.make_block_ptr(kvt + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h += tl.load(p_kvt, boundary_check=(0, 1)).to(tl.float32)
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_parallel.py |
e88a9daf-61e9-4aae-bbd8-4d5f74361199 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/simple_gla/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'NV': lambda args: triton.cdiv(args['V'], args['BV'])})
@triton.jit
def parallel_simple_gla_bwd_kernel(q, k, v, g, do, dq, dk, dv, dg, s_k_h,
s_k_t, s_v_h, s_v_t, scale, B: tl.constexpr, H: tl.constexpr, T: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BS: tl.
constexpr, BK: tl.constexpr, BV: tl.constexpr, NV: tl.constexpr):
i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_k, i_v = i_kv // NV, i_kv % NV
parallel_simple_gla_bwd_kernel_dq(i_bh, i_t, i_k, i_v, i_kv, q, k, v, g,
do, dq, dg, s_k_h, s_k_t, s_v_h, s_v_t, scale, B=B, H=H, T=T, K=K,
V=V, BT=BT, BS=BS, BK=BK, BV=BV)
tl.debug_barrier()
parallel_simple_gla_bwd_kernel_dkv(i_bh, i_t, i_k, i_v, i_kv, q, k, v,
g, do, dk, dv, dg, s_k_h, s_k_t, s_v_h, s_v_t, scale, B, H, T, K, V,
BT, BS, BK, BV)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/parallel.py |
ba44918d-689f-49fc-8bc9-8cd9eb5ecc57 | gelu_and_mul.py | tascj/kaggle-lmsys-chatbot-arena | human_pref/inference/ops/gelu_and_mul.py | 83cd93d50b9283c18711e8c63e4e1c6399c7b9ce | 0 | @triton.jit
def _gelu_and_mul_kernel(input_ptr, stride_input_m, stride_input_n,
stride_output_m, stride_output_n, size_m, size_n, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr):
tid = tl.program_id(0)
input_m_offsets = tid * BLOCK_M + tl.arange(0, BLOCK_M)
output_m_offsets = tid * BLOCK_M + tl.arange(0, BLOCK_M)
pid = tl.program_id(1)
input_n_offsets = pid * BLOCK_N + tl.arange(0, BLOCK_N)
output_n_offsets = pid * BLOCK_N + tl.arange(0, BLOCK_N)
up_offsets = input_m_offsets[:, None] * stride_input_m + (input_n_offsets
[None, :] + size_n) * stride_input_n
gate_offsets = input_m_offsets[:, None] * stride_input_m + input_n_offsets[
None, :] * stride_input_n
res_offsets = output_m_offsets[:, None
] * stride_output_m + output_n_offsets[None, :] * stride_output_n
up = tl.load(input_ptr + up_offsets, mask=(input_n_offsets < size_n)[
None, :] * (input_m_offsets < size_m)[:, None], other=0.0)
gate = tl.load(input_ptr + gate_offsets, mask=(input_n_offsets < size_n
)[None, :] * (input_m_offsets < size_m)[:, None], other=0.0).to(tl.
float32)
gate = gelu(gate)
gate = gate.to(input_ptr.dtype.element_ty)
tl.store(input_ptr + res_offsets, up * gate, mask=(output_n_offsets <
size_n)[None, :] * (output_m_offsets < size_m)[:, None])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/tascj/kaggle-lmsys-chatbot-arena/blob/83cd93d50b9283c18711e8c63e4e1c6399c7b9ce/human_pref/inference/ops/gelu_and_mul.py |
db060ba9-fe89-418b-807d-5beda14cd648 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'OUTPUT_ATTENTIONS': lambda args: args['attn'] is not None}
)
@triton.jit
def parallel_delta_rule_fwd_kernel(q, k, k2, v, beta, o, o_new, attn, s_k_h,
s_k_t, s_v_h, s_v_t, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr,
BT: tl.constexpr, BS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
OUTPUT_ATTENTIONS: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_t * BT,
0), (BT, BK), (1, 0))
b_q = tl.zeros([BT, BK], dtype=tl.float32)
b_q += tl.load(p_q, boundary_check=(0, 1))
b_o = tl.zeros([BT, BV], dtype=tl.float32)
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, 1), (i_t * BT,
0), (BT, BV), (1, 0))
b_o += tl.load(p_o, boundary_check=(0, 1))
for offset in range((i_t + 1) * BT - 2 * BS, i_t * BT - BS, -BS):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (1, s_k_t), (0,
offset), (BK, BS), (0, 1))
p_k2 = tl.make_block_ptr(k2 + i_bh * s_k_h, (T, K), (s_k_t, 1), (
offset, 0), (BS, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, 1), (
offset, 0), (BS, BV), (1, 0))
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (offset,),
(BS,), (0,))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_beta = tl.load(p_beta, boundary_check=(0,))
m_s = tl.arange(0, BT) >= offset - i_t * BT + BS
b_s = tl.dot(b_q.to(b_k.dtype), b_k, allow_tf32=False)
b_s = tl.where(m_s[:, None], b_s, 0)
b_o += tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
b_k2 = (tl.load(p_k2, boundary_check=(0, 1)) * b_beta[:, None]).to(b_v
.dtype)
b_q -= tl.dot(b_s.to(b_v.dtype), b_k2, allow_tf32=False)
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + i_bh * T * T, (T, T), (T, 1), (
i_t * BT, offset), (BT, BS), (1, 0))
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
for offset in range(i_t * BT - BS, -BS, -BS):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (1, s_k_t), (0,
offset), (BK, BS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, 1), (
offset, 0), (BS, BV), (1, 0))
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (offset,),
(BS,), (0,))
p_k2 = tl.make_block_ptr(k2 + i_bh * s_k_h, (T, K), (s_k_t, 1), (
offset, 0), (BS, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_s = tl.dot(b_q.to(b_k.dtype), b_k, allow_tf32=False)
b_o += tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
b_k2 = (tl.load(p_k2, boundary_check=(0, 1)) * b_beta[:, None]).to(b_v
.dtype)
b_q -= tl.dot(b_s.to(b_v.dtype), b_k2, allow_tf32=False).to(b_q.dtype)
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + i_bh * T * T, (T, T), (T, 1), (
i_t * BT, offset), (BT, BS), (1, 0))
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
p_o_new = tl.make_block_ptr(o_new + i_bh * s_v_h, (T, V), (s_v_t, 1), (
i_t * BT, 0), (BT, BV), (1, 0))
tl.store(p_o_new, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/parallel.py |
520e593c-f72d-4d79-8df6-fa4beed24ea7 | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=[Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K':
128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), Config({'BLOCK_M': 256,
'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=
8), Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1
}, num_stages=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 256,
'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({
'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1},
num_stages=4, num_warps=4), Config({'BLOCK_M': 128, 'BLOCK_N': 64,
'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), Config({
'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages
=4, num_warps=4), Config({'BLOCK_M': 64, 'BLOCK_N': 64, 'BLOCK_K': 512,
'SPLIT_K': 1}, num_stages=3, num_warps=4)], key=['m_key', 'n_key', 'k_key']
)
@triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] *
args['SPLIT_K']) == 0})
@triton.jit
def _kernel_matmul_fp8_row_tma_persistent(A_ptr, B_ptr, C_ptr, M, N, K,
m_key, n_key, k_key, A_scale, B_scale, Bias, stride_am, stride_ak,
stride_bn, stride_bk, stride_cm, stride_cn, dot_out_dtype: tl.constexpr,
c_dtype: tl.constexpr, bias_dtype: tl.constexpr, allow_tf32: tl.
constexpr, fp8_fast_accum: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N:
tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, AB_DTYPE:
tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, NUM_SMS: tl.
constexpr, USE_BIAS: tl.constexpr) ->None:
"""Matmul kernel of [M, K] @ [N, K] with row-wise scales
performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles.
Args:
A (TensorWrapper): [M, K] input tensor.
B (TensorWrapper): [N, K] input tensor.
C (TensorWrapper): [M, N] output tensor.
M (int): M dimension of input tensor.
N (int): N dimension of input tensor.
K (int): K dimension of input tensor.
A_scale (TensorWrapper): [M] reciprocal scale tensor per row. A * A_scale = original A
B_scale (TensorWrapper): [N] reciprocal scale tensor per row. B * B_scale = original B
stride_am (int): Stride of M dimension of A.
stride_ak (int): Stride of K dimension of A.
stride_bn (int): Stride of N dimension of B.
stride_bk (int): Stride of K dimension of B.
stride_cm (int): Stride of M dimension of C.
stride_cn (int): Stride of N dimension of C.
dot_out_dtype (torch.dtype): Output type of tensor core.
allow_tf32 (bool): Whether to use TF32 for tensor core.
fp8_fast_accum (bool): Whether to use fast accumulation for tensor core.
BLOCK_M (int): Block size for M dimension.
BLOCK_N (int): Block size for N dimension.
BLOCK_K (int): Block size for K dimension.
GROUP_M (int): Number of groups for M dimension swizzle.
SPLIT_K (int): Number of SM's to launch per row.
EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K.
AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core.
"""
start_pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M)
num_pid_n = tl.cdiv(N, BLOCK_N)
k_tiles = tl.cdiv(K, BLOCK_K)
num_tiles = num_pid_m * num_pid_n
tiles_per_SM = num_tiles // NUM_SMS
if start_pid < num_tiles % NUM_SMS:
tiles_per_SM += 1
tile_id = start_pid - NUM_SMS
ki = -1
pid_m = 0
pid_n = 0
offs_am = 0
offs_bn = 0
num_pid_in_group = GROUP_M * num_pid_n
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
dtype_fp8 = tl.float8e4nv
scale_dtype = tl.float32
for _ in range(0, k_tiles * tiles_per_SM):
ki = tl.where(ki == k_tiles - 1, 0, ki + 1)
if ki == 0:
tile_id += NUM_SMS
group_id = tile_id // num_pid_in_group
first_pid_m = group_id * GROUP_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_M)
pid_m = first_pid_m + tile_id % group_size_m
pid_n = tile_id % num_pid_in_group // group_size_m
offs_am = pid_m * BLOCK_M
offs_bn = pid_n * BLOCK_N
offs_am = tl.multiple_of(offs_am, BLOCK_M)
offs_bn = tl.multiple_of(offs_bn, BLOCK_N)
offs_k = ki * BLOCK_K
a = tl._experimental_descriptor_load(A_ptr, [offs_am, offs_k], [
BLOCK_M, BLOCK_K], dtype_fp8)
b = tl._experimental_descriptor_load(B_ptr, [offs_bn, offs_k], [
BLOCK_N, BLOCK_K], dtype_fp8)
if fp8_fast_accum:
acc = tl.dot(a, b.T, acc, out_dtype=dot_out_dtype, allow_tf32=
allow_tf32)
else:
acc += tl.dot(a, b.T, out_dtype=dot_out_dtype, allow_tf32=
allow_tf32)
if ki == k_tiles - 1:
a_scale = tl._experimental_descriptor_load(A_scale, [offs_am],
[BLOCK_M], scale_dtype)
b_scale = tl._experimental_descriptor_load(B_scale, [offs_bn],
[BLOCK_N], scale_dtype)
scale = a_scale[:, None] * b_scale[None, :]
acc *= scale
if USE_BIAS:
bias = tl._experimental_descriptor_load(Bias, [offs_bn], [
BLOCK_N], bias_dtype)
acc += bias[None, :]
acc = acc.to(c_dtype)
tl._experimental_descriptor_store(C_ptr, acc, [offs_am, offs_bn])
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
d240495b-66c0-479d-a4bb-97581826f003 | mhmoe.py | dtadpole/triton-playground | mhmoe.py | 2d317976722d63080133b1bf88b1f0cdec98f831 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_B': 64,
'BLOCK_SIZE_E': 32}, num_stages=4, num_warps=4), triton.Config({
'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 64}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_B': 64, 'BLOCK_SIZE_E': 64}, num_stages=3,
num_warps=4)], key=['B', 'D', 'E'])
@triton.jit
def mlp_wide_kernel_fwd(x_ptr, w1_ptr, w2_ptr, o_ptr, H, B, D: tl.constexpr,
E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d,
stride_ob, stride_od, BLOCK_SIZE_B: tl.constexpr, BLOCK_SIZE_E: tl.
constexpr, ACTIVATION: tl.constexpr):
"""Kernel for computing the mlp
Z = X @ W1, H = f(Z), O = H @ W2.
- X has shape (B, D)
- W1 has shape (D, E)
- W2 has shape (E, D)
- O has shape (B, D)
"""
pid = tl.program_id(axis=0)
batch_groups = tl.cdiv(B, BLOCK_SIZE_B)
pid_b = pid % batch_groups
pid_h = pid // batch_groups
TARGET_TYPE = x_ptr.type.element_ty
x_ptrs = tl.make_block_ptr(base=x_ptr, shape=(B * H, D), strides=(
stride_xb, stride_xd), offsets=(pid_h * B + pid_b * BLOCK_SIZE_B, 0
), block_shape=(BLOCK_SIZE_B, D), order=(1, 0))
w1_ptrs = tl.make_block_ptr(base=w1_ptr, shape=(D * H, E), strides=(
stride_w1d, stride_w1e), offsets=(pid_h * D, 0), block_shape=(D,
BLOCK_SIZE_E), order=(1, 0))
w2_ptrs = tl.make_block_ptr(base=w2_ptr, shape=(E * H, D), strides=(
stride_w2e, stride_w2d), offsets=(pid_h * E, 0), block_shape=(
BLOCK_SIZE_E, D), order=(1, 0))
o_ptrs = tl.make_block_ptr(base=o_ptr, shape=(B * H, D), strides=(
stride_ob, stride_od), offsets=(pid_h * B + pid_b * BLOCK_SIZE_B, 0
), block_shape=(BLOCK_SIZE_B, D), order=(1, 0))
x = tl.load(x_ptrs)
o = tl.zeros((BLOCK_SIZE_B, D), dtype=tl.float32)
for e in range(0, tl.cdiv(E, BLOCK_SIZE_E)):
w1 = tl.load(w1_ptrs)
w2 = tl.load(w2_ptrs)
z = tl.dot(x, w1, out_dtype=tl.float32)
if ACTIVATION == 'leaky_relu':
z = leaky_relu(z).to(TARGET_TYPE)
elif ACTIVATION == 'silu':
z = silu(z).to(TARGET_TYPE)
elif ACTIVATION == 'sigmoid':
z = tl.sigmoid(z).to(TARGET_TYPE)
else:
z = z.to(TARGET_TYPE)
o = tl.dot(z, w2, o, out_dtype=tl.float32)
w1_ptrs = tl.advance(w1_ptrs, (0, BLOCK_SIZE_E))
w2_ptrs = tl.advance(w2_ptrs, (BLOCK_SIZE_E, 0))
o = o.to(TARGET_TYPE)
tl.store(o_ptrs, o)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py |
d3df060f-25ae-4f53-8fb1-4c7cff1669ee | quantize.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/quantize.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _kernel_dequantize_mx4(A, mx4_lookup_table, out, M, GROUPS_PER_THREAD,
GROUP_SIZE: tl.constexpr, GROUP_LOAD: tl.constexpr, USE_INT64: tl.constexpr
) ->None:
"""Dequantize a packed MX4 tensor and apply scaling.
Args:
A (Tensor): [M] MX4 tensor packed into int8.
shared_exp (Tensor): Int8 tensor representing group exponent.
mx4_lookup_table (Tensor): Map from mx4 integer value to floating point.
M (int): Total number of elements in input.
GROUPS_PER_THREAD (int): Number of groups each thread is responsible for.
GROUP_SIZE (int): Size of chunks that use the same shared exponent.
GROUP_LOAD (int): Number of groups to process simultaneously.
USE_INT64 (bool): Whether to use int64 for indexing.
"""
MX4_BIT_MASK: tl.constexpr = 15
FP32_EXP_BIAS: tl.constexpr = 127
PACKED_GROUP_SIZE: tl.constexpr = GROUP_SIZE // 2 + 1
pid = tl.program_id(0)
if USE_INT64:
pid = pid.to(tl.int64)
M = tl.cast(M, tl.int64)
GROUPS_PER_THREAD = tl.cast(GROUPS_PER_THREAD, tl.int64)
INPUT_CHUNK_SIZE = GROUPS_PER_THREAD * PACKED_GROUP_SIZE
OUTPUT_CHUNK_SIZE = GROUPS_PER_THREAD * GROUP_SIZE
OUTPUT_SIZE = M // PACKED_GROUP_SIZE * GROUP_SIZE
input_start = pid * (GROUPS_PER_THREAD * PACKED_GROUP_SIZE)
exp_start = input_start + GROUP_SIZE // 2
output_start = pid * OUTPUT_CHUNK_SIZE
input_offset = tl.arange(0, GROUP_LOAD * GROUP_SIZE // 2)
exp_indices = input_offset // (GROUP_SIZE // 2)
input_offset = input_offset + exp_indices + input_start
output_offset = tl.arange(0, GROUP_LOAD * GROUP_SIZE) + output_start
exp_offset = exp_indices * PACKED_GROUP_SIZE + exp_start
for _k in range(0, tl.cdiv(GROUPS_PER_THREAD, GROUP_LOAD)):
a = tl.load(A + input_offset, mask=(input_offset < M) & (
input_offset < INPUT_CHUNK_SIZE * (pid + 1)), other=0.0)
low_mx4 = a & MX4_BIT_MASK
high_mx4 = a >> 4 & MX4_BIT_MASK
low_fp32 = tl.load(mx4_lookup_table + low_mx4)
high_fp32 = tl.load(mx4_lookup_table + high_mx4)
exp = tl.load(A + exp_offset, mask=(exp_offset < M) & (exp_offset <
INPUT_CHUNK_SIZE * (pid + 1)), other=0.0)
exp = exp.to(tl.int16) - FP32_EXP_BIAS
scale = tl.exp2(exp.to(tl.float64)).to(tl.float32)
scaled_low_fp32 = scale * low_fp32
scaled_high_fp32 = scale * high_fp32
scaled_fp32 = tl.interleave(scaled_low_fp32, scaled_high_fp32)
tl.store(out + output_offset, scaled_fp32, mask=(output_offset <
OUTPUT_SIZE) & (output_offset < OUTPUT_CHUNK_SIZE * (pid + 1)))
input_offset += GROUP_LOAD * PACKED_GROUP_SIZE
exp_offset += GROUP_LOAD * PACKED_GROUP_SIZE
output_offset += GROUP_LOAD * GROUP_SIZE
| {
"Data Type": [
"int8",
"fp32"
],
"Functionality": [
"Quantization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/quantize.py |
9f4731c2-0c44-4143-a92f-bcfed7921b0a | normalization.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/normalization.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_normalization_backward_kernel_1(grad_input_ptr, grad_output_ptr,
grad_weight_partial_ptr, grad_bias_partial_ptr, output_ptr, weight_ptr,
bias_ptr, inv_var_ptr, n_cols, n_rows, has_bias: tl.constexpr,
zero_centered: tl.constexpr, block_size: tl.constexpr, block_size_row:
tl.constexpr):
rows = tl.program_id(0) * block_size_row + tl.arange(0, block_size_row)[
:, None]
row_mask = rows < n_rows
cols = tl.arange(0, block_size)[None, :]
col_mask = cols < n_cols
mask = col_mask & row_mask
offsets = rows * n_cols + cols
output = tl.load(output_ptr + offsets, mask=mask, other=0).to(tl.float32)
grad_output = tl.load(grad_output_ptr + offsets, mask=mask, other=0).to(tl
.float32)
weight = tl.load(weight_ptr + cols, mask=col_mask).to(tl.float32)
if zero_centered:
weight += 1
inv_var = tl.load(inv_var_ptr + rows, mask=row_mask)
if has_bias:
bias = tl.load(bias_ptr + cols, mask=col_mask).to(tl.float32)
output = output - bias
input_normalized = tl.where(mask, output / weight, 0.0)
weight_grad_output = tl.where(mask, weight * grad_output * inv_var, 0.0)
grad_input = weight_grad_output - input_normalized * (tl.sum(
input_normalized * weight_grad_output, axis=1)[:, None] / n_cols)
if has_bias:
grad_input = grad_input - tl.sum(weight_grad_output, axis=1)[:, None
] / n_cols
tl.store(grad_input_ptr + offsets, grad_input, mask=mask)
parameter_offsets = tl.program_id(0) * n_cols + cols
grad_weight_partial_ptr = grad_weight_partial_ptr + parameter_offsets
grad_weight_partial = (grad_output * input_normalized).to(weight.dtype)
grad_weight_partial = tl.sum(grad_weight_partial, axis=0)[None, :]
if has_bias:
grad_bias_partial_ptr = grad_bias_partial_ptr + parameter_offsets
grad_bias_partial = tl.sum(grad_output.to(weight.dtype), axis=0)[
None, :]
tl.store(grad_weight_partial_ptr, grad_weight_partial, mask=col_mask)
if has_bias:
tl.store(grad_bias_partial_ptr, grad_bias_partial, mask=col_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/normalization.py |
5a3b3d77-505a-428b-9f7e-43a71233c09b | fp8_matmul.py | drisspg/transformer_nuggets | transformer_nuggets/fp8/fp8_matmul.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def matmul_kernel_tma_persistent(a_desc_ptr, a_scale_ptr, b_desc_ptr,
b_scale_ptr, c_desc_ptr, M, N, K, stride_a_scale_m, stride_b_scale_n,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE_M: tl.constexpr, NUM_SMS: tl.constexpr,
output_dtype: tl.constexpr, ROW_WISE_SCALING: tl.constexpr):
tl.inline_asm_elementwise(
'fence.proxy.tensormap::generic.acquire.gpu [$1], 128; // $0 dummy reg'
, '=r, l', [a_desc_ptr], dtype=tl.int32, is_pure=False, pack=1)
tl.inline_asm_elementwise(
'fence.proxy.tensormap::generic.acquire.gpu [$1], 128; // $0 dummy reg'
, '=r, l', [b_desc_ptr], dtype=tl.int32, is_pure=False, pack=1)
tl.inline_asm_elementwise(
'fence.proxy.tensormap::generic.acquire.gpu [$1], 128; // $0 dummy reg'
, '=r, l', [c_desc_ptr], dtype=tl.int32, is_pure=False, pack=1)
dtype = tl.float8e4nv
start_pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
k_tiles = tl.cdiv(K, BLOCK_SIZE_K)
num_tiles = num_pid_m * num_pid_n
tiles_per_SM = num_tiles // NUM_SMS
if start_pid < num_tiles % NUM_SMS:
tiles_per_SM += 1
tile_id = start_pid - NUM_SMS
ki = -1
pid_m = 0
pid_n = 0
offs_am = 0
offs_bn = 0
num_pid_in_group = GROUP_SIZE_M * num_pid_n
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
a_scale, b_scale = load_scales(a_scale_ptr, b_scale_ptr, ROW_WISE_SCALING)
for _ in range(0, k_tiles * tiles_per_SM):
ki = tl.where(ki == k_tiles - 1, 0, ki + 1)
if ki == 0:
tile_id += NUM_SMS
group_id = tile_id // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + tile_id % group_size_m
pid_n = tile_id % num_pid_in_group // group_size_m
offs_am = pid_m * BLOCK_SIZE_M
offs_bn = pid_n * BLOCK_SIZE_N
offs_k = ki * BLOCK_SIZE_K
a = tl._experimental_descriptor_load(a_desc_ptr, [offs_am, offs_k],
[BLOCK_SIZE_M, BLOCK_SIZE_K], dtype)
b = tl._experimental_descriptor_load(b_desc_ptr, [offs_bn, offs_k],
[BLOCK_SIZE_N, BLOCK_SIZE_K], dtype)
accumulator = tl.dot(a, b.T, accumulator)
if ki == k_tiles - 1:
offs_cm = offs_am + tl.arange(0, BLOCK_SIZE_M)
offs_cn = offs_bn + tl.arange(0, BLOCK_SIZE_N)
accumulator = apply_scaling(accumulator, a_scale, b_scale,
ROW_WISE_SCALING, offs_cm, offs_cn, M, N, stride_a_scale_m,
stride_b_scale_n)
c = accumulator.to(output_dtype)
tl._experimental_descriptor_store(c_desc_ptr, c, [offs_am, offs_bn]
)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.
float32)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/fp8_matmul.py |
40bd775e-ef2e-4eda-8c59-25ebdea73d19 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=2, num_warps=8), triton.Config({'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M':
32}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M':
32}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_M':
32}, num_stages=2, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def rz_linear_backward_weight_grad_kernel_tf32(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=
stride_bn, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=True, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [],
"Functionality": [
"Matrix Multiplication",
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
0657cbe4-31fe-41a2-aee4-95f95ff84d3e | masks.py | drisspg/transformer_nuggets | transformer_nuggets/flash/masks.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def causal_mask_triton(score, batch, head, seq_len_q, seq_len_kv):
score = tl.where(seq_len_q >= seq_len_kv, score, float('-inf'))
return score
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py |
40cd1d4b-831e-4e99-84f0-0996d5aa95be | bwd_split_kernel.py | ROCm/aotriton | test/bwd_split_kernel.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def dot(BLOCK_M: tl.constexpr, QDIM: tl.constexpr, KDIM: tl.constexpr, q, k):
if BLOCK_M == 1:
return tl.sum(tl.view(q, [QDIM]) * tl.view(k, [KDIM]))
else:
return tl.dot(q, k)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_split_kernel.py |
17bb60bc-fa4c-439e-b572-df79bd2eeab3 | triton_fused_attn2.py | LouChao98/vqtree | ops/triton_fused_attn2.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0})
@triton.jit
def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh,
stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh,
stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_qb + off_h *
stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1),
offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM
), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + (off_b * stride_kb + off_h *
stride_kh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_kn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h *
stride_vh), shape=(seqlen_k, BLOCK_HEADDIM), strides=(stride_vn, 1),
offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0))
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M:
q = tl.load(Q_block_ptr)
else:
q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) *
BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_N & EVEN_M:
k = tl.load(K_block_ptr)
else:
k = tl.load(K_block_ptr, boundary_check=(0,), padding_option='zero'
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
if not EVEN_N:
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, NEGINF)
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :],
0, NEGINF)
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
acc_o_scale = tl.exp(m_i - m_ij)
acc_o = acc_o * acc_o_scale[:, None]
if EVEN_N & EVEN_M:
v = tl.load(V_block_ptr)
else:
v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero'
)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
K_block_ptr = tl.advance(K_block_ptr, (BLOCK_N, 0))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
o_scale = tl.exp(m_i - lse_i)
acc_o = acc_o * o_scale[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_attn2.py |
2186ae8f-a1c1-4eda-9e86-5e9dad2b7585 | tuned_bwd.py | ROCm/aotriton | tritonsrc/tuned_bwd.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_BWD, key=['BLOCK_DMODEL',
'max_seqlen_q', 'max_seqlen_k'])
@triton.jit
def tuned_bwd_kernel_dq(Q, K, V, B, sm_scale, Out, DO, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dqz, stride_dqh, stride_dqm, stride_dqk,
stride_dbz, stride_dbh, stride_dbm, stride_dbn, cu_seqlens_q,
cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k, head_dim,
dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.
constexpr):
bare_bwd_kernel_dq(Q, K, V, B, sm_scale, Out, DO, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dqz, stride_dqh, stride_dqm,
stride_dqk, stride_dbz, stride_dbh, stride_dbm, stride_dbn,
cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k,
head_dim, dropout_p, philox_seed, philox_offset_base, BLOCK_M,
BLOCK_DMODEL, BLOCK_N, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD=
PADDED_HEAD, BIAS_TYPE=BIAS_TYPE)
| {
"Data Type": [
"fp32",
"int8"
],
"Functionality": [
"Quantization",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/tuned_bwd.py |
0e6f0fcf-9cae-4955-907d-a1026597b579 | RzLinearForward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearForward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_forward_core(a_ptr, b_ptr, c_ptr, init_factor, M: int, N: int,
K: int, H: int, stride_am, stride_ak, stride_cm, stride_cn, allow_tf32:
tl.constexpr, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1:
int, R0: int, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_offset = b_ptr + offs_k[:, None] * BLOCK_SIZE_N + tl.arange(0,
BLOCK_SIZE_N)[None, :]
b_ptrs = b_offset + ((0 * R3 + pid_n * R2 + R1) % R0 * R0 + (0 * R7 +
pid_n * R5 + R4) % R0) % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
a_zero = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
b_zero = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
offs_k = k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
a_mask = (offs_cm[:, None] < M) & (offs_k[None, :] < K)
b_mask = (offs_k[:, None] < K) & (offs_cn[None, :] < N)
a = tl.load(a_ptrs, mask=a_mask, other=a_zero)
b = tl.load(b_ptrs, mask=b_mask, other=b_zero)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs = b_offset + (((k + 1) * R3 + pid_n * R2 + R1) % R0 * R0 + (
(k + 1) * R7 + pid_n * R5 + R4) % R0) % (H - BLOCK_SIZE_K *
BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :
]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c * init_factor, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearForward.py |
f66b57d6-8434-4700-927a-a112b18f64e5 | cross_entropy.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/cross_entropy.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_cross_entropy_forward_backward_kernel(logits_ptr, labels_ptr,
grad_logits_ptr, losses_ptr, grad_losses, n_cols, logits_stride_0,
grad_logits_stride_0, logits_scale_factor: tl.constexpr, block_size: tl
.constexpr):
block_idx = tl.program_id(0).to(tl.int64)
col_offsets = tl.arange(0, block_size)
logits_ptr = logits_ptr + block_idx * logits_stride_0
mask = col_offsets < n_cols
logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float('inf')
).to(tl.float32)
if logits_scale_factor != 1.0:
logits *= logits_scale_factor
max_logits = tl.max(logits, 0)
exp_logits = tl.exp(logits - max_logits)
sum_exp_logits = tl.sum(exp_logits, 0)
label_idx = tl.load(labels_ptr + block_idx)
label_logits = tl.load(logits_ptr + label_idx).to(tl.float32)
if label_idx < 0:
loss = 0.0
else:
loss = tl.log(sum_exp_logits) + max_logits - label_logits
tl.store(losses_ptr + block_idx, loss)
grad_logits_ptr = grad_logits_ptr + block_idx * grad_logits_stride_0
col_offsets = tl.arange(0, block_size)
label_idx = tl.load(labels_ptr + block_idx)
exp_logits = exp_logits / sum_exp_logits
if logits_scale_factor != 1.0:
exp_logits *= logits_scale_factor
if label_idx < 0:
grad_losses = 0.0
grad_logits = grad_losses * tl.where(col_offsets == label_idx,
exp_logits - 1.0, exp_logits)
tl.store(grad_logits_ptr + col_offsets, grad_logits, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/cross_entropy.py |
dc459f02-1c1e-4b41-baca-462de7e6a012 | ops.py | srush/triton-autodiff | triton_autodiff/ops.py | f9d1a04d048e3252bfd222646db7175ad60a3c7c | 0 | @triton.jit
def zeroslike(x):
return tl.zeros(x.shape, tl.float32)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/triton_autodiff/ops.py |
2af621bf-a5fd-4ee0-a77d-7c7225403ab3 | sparse_copy.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_copy.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def copy_sparse_to_dense_kernel(input_ptr, output_ptr, scores_ptr,
sparse_rows_ptr, num_columns: tl.constexpr, num_experts_per_token: tl.
constexpr, block_size: tl.constexpr):
dense_row = tl.program_id(0)
offsets = tl.arange(0, block_size) + block_size * tl.program_id(1)
mask = None if num_columns % block_size == 0 else offsets < num_columns
out = tl.zeros((block_size,), tl.float32)
for top_index in range(num_experts_per_token):
sparse_row = tl.load(sparse_rows_ptr + dense_row *
num_experts_per_token + top_index)
input_ = tl.load(input_ptr + sparse_row * num_columns + offsets,
mask=mask)
if scores_ptr is not None:
input_ *= tl.load(scores_ptr + dense_row *
num_experts_per_token + top_index).to(tl.float32)
out += input_
tl.store(output_ptr + dense_row * num_columns + offsets, out, mask=mask)
| {
"Data Type": [
"fp32",
"int8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_copy.py |
c6143804-3cfd-4d1f-ab60-f8d9dbef8b8b | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_softmax/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS,
NUM_STAGES)], key=['M'])
@triton.jit
def triton_jagged_softmax_kernel_simple_fused_buffer_then_sum(input_ptr_values,
input_ptr_offsets, output_ptr, M, MAX_SEQLEN, BLOCK_SIZE_RAGGED: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
buffer_max_all = tl.full((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), value=float
('-inf'), dtype=tl.float32)
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
buffer_max_all = tl.maximum(buffer_max_all, input)
buffer_max = tl.max(buffer_max_all, axis=0, keep_dims=True)
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
buffer += tl.exp(input - buffer_max)
buffer_exp_sum = tl.sum(buffer, axis=0)
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
output = tl.fdiv(tl.exp(input - buffer_max), buffer_exp_sum)
tl.store(output_ptr + idxs, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_softmax/kernels.py |
97976f05-d763-4664-a07a-d3f7a68ba825 | rms_norm_kernels.py | BobMcDear/attorch | attorch/rms_norm_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def rms_norm_backward_kernel(output_grad_pointer, input_pointer,
inv_rms_pointer, weight_pointer, input_grad_pointer,
weight_grad_pointer, batch_dim, feat_dim, output_grad_batch_stride,
output_grad_feat_stride, input_batch_stride, input_feat_stride,
input_grad_batch_stride, input_grad_feat_stride,
weight_grad_batch_stride, weight_grad_feat_stride, scale_by_weight: tl.
constexpr, BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr):
"""
Calculates the input gradient of root mean square normalization.
Args:
output_grad_pointer: Pointer to root mean square normalization's output gradients.
The output gradients must be of shape [batch_dim, feat_dim].
input_pointer: Pointer to the input.
The input must be of shape [batch_dim, feat_dim].
inv_rms_pointer: Pointer to the input's inverse root mean square.
The inverse root mean square should be of shape [batch_dim].
weight_pointer: Pointer to optional weights if affine transform occurred.
The weights, if provided, must be of shape [feat_dim].
input_grad_pointer: Pointer to a container the input's gradients are written to.
The container must be of shape [batch_dim, feat_dim].
weight_grad_pointer: Pointer to an optional container the weights' row-wise gradients
are written to if scale_by_weight is True, which should later be summed.
The container, if provided, must be of shape [batch_dim/BLOCK_SIZE_BATCH, feat_dim].
bias_grad_pointer: Pointer to an optional container the bias vector's row-wise gradients
are written to if scale_by_weight and add_bias are True, which should later be summed.
The container, if provided, must be of shape [batch_dim/BLOCK_SIZE_BATCH, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
output_grad_batch_stride: Stride necessary to jump one element along the
output gradients' batch dimension.
output_grad_feat_stride: Stride necessary to jump one element along the
output gradients' feature dimension.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_grad_batch_stride: Stride necessary to jump one element along the
input gradient container's batch dimension.
input_grad_feat_stride: Stride necessary to jump one element along the
input gradient container's feature dimension.
weight_grad_batch_stride: Stride necessary to jump one element along the
weight gradient container's batch dimension.
weight_grad_feat_stride: Stride necessary to jump one element along the
weight gradient container's feature dimension.
scale_by_weight: Flag for scaling the normalized output by weights.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
output_grad_pointer += output_grad_batch_stride * batch_offset[:, None
] + output_grad_feat_stride * feat_offset[None, :]
input_pointer += input_batch_stride * batch_offset[:, None
] + input_feat_stride * feat_offset[None, :]
input_grad_pointer += input_grad_batch_stride * batch_offset[:, None
] + input_grad_feat_stride * feat_offset[None, :]
output_grad = tl.load(output_grad_pointer, mask=batch_mask[:, None] &
feat_mask[None, :]).to(tl.float32)
input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[
None, :]).to(tl.float32)
inv_rms = tl.load(inv_rms_pointer + batch_offset, mask=batch_mask)
pre_lin = input * inv_rms[:, None]
if scale_by_weight:
weight = tl.load(weight_pointer + feat_offset, mask=feat_mask)
weight_output_grad_prod = weight * output_grad
else:
weight_output_grad_prod = output_grad
term1 = input * tl.sum(input * weight_output_grad_prod, axis=1)
term2 = inv_rms[:, None] * inv_rms[:, None]
input_grad = inv_rms[:, None] * (weight_output_grad_prod - term1 *
term2 / feat_dim)
tl.store(input_grad_pointer, input_grad, mask=batch_mask[:, None] &
feat_mask[None, :])
if scale_by_weight:
weight_grad_pointer += (weight_grad_batch_stride * batch_pid +
weight_grad_feat_stride * feat_offset)
tl.store(weight_grad_pointer, tl.sum(output_grad * pre_lin, axis=0),
mask=feat_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/rms_norm_kernels.py |
617a6f86-5188-4419-b333-c0d0a02f6e0f | softmax_online_v2_spec_rev_evict.py | iclementine/optimize_softmax | softmax_online_v2_spec_rev_evict.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr
):
pid_m = tl.program_id(0)
m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype.
element_ty)
z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty)
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, prev_multiple, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs).to(output_ptr.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
for start_n in range(prev_multiple, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
final_m = tl.max(m, 0)
z = tl.sum(tl.exp(m - final_m) * z)
m = final_m
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, TILE_N, TILE_N):
n_offsets = prev_multiple - start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf'),
eviction_policy='evict_first').to(output_ptr.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
for start_n in range(TILE_N, N, TILE_N):
n_offsets = prev_multiple - start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs, eviction_policy='evict_first').to(output_ptr
.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out)
| {
"Data Type": [],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_spec_rev_evict.py |
ac0acde3-97ef-443c-937e-f8500c232164 | random_matrix.py | Forkxz/TritonDeepLearningKernel | kernel/dropconnect/random_matrix.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N':
4, 'BLOCK_SIZE_K': 32})], key=['M', 'N', 'K'])
@triton.jit
def random_matrix_kernel(r_ptr, seed, M, K, N, stride_dm, stride_dk,
stride_dn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_m = pid // num_pid_n
pid_n = pid % num_pid_n
offset_m = pid_m * BLOCK_SIZE_M
offset_n = pid_n * BLOCK_SIZE_N
offset_k = 0
d_offsets = block_offsets_3d(M, K, N, stride_dm, stride_dk, stride_dn,
offset_m, offset_k, offset_n, BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
k_mask = offs_k[None, :, None] < K - k * BLOCK_SIZE_K
random_masks = tl.random.rand(seed, d_offsets) > 0.5
tl.store(r_ptr + d_offsets, random_masks.to(tl.int8), mask=k_mask)
d_offsets += BLOCK_SIZE_K * stride_dk
| {
"Data Type": [
"int8"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/random_matrix.py |
6670d8a8-c520-453d-87db-e31e9f5517c5 | test_triton_basics.py | tucommenceapousser/xformers | tests/test_triton_basics.py | c97e3d917cfdad4a38acd4e9d776030d25ab9141 | 0 | @triton.jit
def k_mean(X, Mean, Var, stride, N, BLOCK_SIZE_N: tl.constexpr):
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
x_mean = tl.sum(x, axis=0) / N
x_zm = x - x_mean
x_zm = tl.where(cols < N, x_zm, 0.0)
x_var = tl.sum(x_zm * x_zm, axis=0) / N
tl.store(Mean + row, x_mean)
tl.store(Var + row, x_var)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/tucommenceapousser/xformers/blob/c97e3d917cfdad4a38acd4e9d776030d25ab9141/tests/test_triton_basics.py |
52ce8da6-a14c-4648-8839-2497e5c7cd47 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/rebased/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def parallel_rebased_fwd_kernel(q, k, v, o, z, s_k_h, s_k_t, s_k_d, s_v_h,
s_v_t, s_v_d, scale, B, H, T, K: tl.constexpr, V: tl.constexpr, BTL: tl
.constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr):
i_kv, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
NV = tl.cdiv(V, BV)
i_k = i_kv // NV
i_v = i_kv % NV
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTL, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, 0), (BK, BTS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (0,
i_v * BV), (BTS, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_o = tl.zeros([BTL, BV], dtype=tl.float32)
b_z = tl.zeros([BTL], dtype=tl.float32)
for _ in range(0, i_c * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = b_s * b_s
b_z += tl.sum(b_s, axis=1)
b_o = b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
p_k = tl.advance(p_k, (0, BTS))
p_v = tl.advance(p_v, (BTS, 0))
tl.debug_barrier()
o_q = tl.arange(0, BTL)
o_k = tl.arange(0, BTS)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, i_c * BTL), (BK, BTS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_c *
BTL, i_v * BV), (BTS, BV), (1, 0))
for _ in range(i_c * BTL, (i_c + 1) * BTL, BTS):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
m_s = o_q[:, None] >= o_k[None, :]
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = b_s * b_s
b_s = tl.where(m_s, b_s, 0)
b_z += tl.sum(b_s, axis=1)
b_o += tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
p_k = tl.advance(p_k, (0, BTS))
p_v = tl.advance(p_v, (BTS, 0))
o_k += BTS
p_o = tl.make_block_ptr(o + (i_bh + B * H * i_k) * s_v_h, (T, V), (
s_v_t, s_v_d), (i_c * BTL, i_v * BV), (BTL, BV), (1, 0))
p_z = z + (i_bh + B * H * i_k) * T + i_c * BTL + tl.arange(0, BTL)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_z, b_z.to(p_z.dtype.element_ty), mask=i_c * BTL + tl.arange(
0, BTL) < T)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rebased/parallel.py |
a2f2c846-80e3-4ee0-8813-8cc6f55128f3 | sparse_optimizer.py | huyz2023/2by4-pretrain | sparse/sparse_optimizer.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.autotune(configs=get_configs(), key=['m'])
@triton.jit
def _inverse(F_ptr, out_ptr, F_row_stride, out_row_stride, F_col_stride,
out_col_stride, F_page_stride, out_page_stride, m, BLOCK_SIZE: tl.constexpr
):
row_idx = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = row_idx < m
a11 = tl.load(F_ptr + row_idx * F_row_stride + 0 * F_col_stride + 0 *
F_page_stride, mask=mask)
a12 = tl.load(F_ptr + row_idx * F_row_stride + 0 * F_col_stride + 1 *
F_page_stride, mask=mask)
a13 = tl.load(F_ptr + row_idx * F_row_stride + 0 * F_col_stride + 2 *
F_page_stride, mask=mask)
a14 = tl.load(F_ptr + row_idx * F_row_stride + 0 * F_col_stride + 3 *
F_page_stride, mask=mask)
a21 = tl.load(F_ptr + row_idx * F_row_stride + 1 * F_col_stride + 0 *
F_page_stride, mask=mask)
a22 = tl.load(F_ptr + row_idx * F_row_stride + 1 * F_col_stride + 1 *
F_page_stride, mask=mask)
a23 = tl.load(F_ptr + row_idx * F_row_stride + 1 * F_col_stride + 2 *
F_page_stride, mask=mask)
a24 = tl.load(F_ptr + row_idx * F_row_stride + 1 * F_col_stride + 3 *
F_page_stride, mask=mask)
a31 = tl.load(F_ptr + row_idx * F_row_stride + 2 * F_col_stride + 0 *
F_page_stride, mask=mask)
a32 = tl.load(F_ptr + row_idx * F_row_stride + 2 * F_col_stride + 1 *
F_page_stride, mask=mask)
a33 = tl.load(F_ptr + row_idx * F_row_stride + 2 * F_col_stride + 2 *
F_page_stride, mask=mask)
a34 = tl.load(F_ptr + row_idx * F_row_stride + 2 * F_col_stride + 3 *
F_page_stride, mask=mask)
a41 = tl.load(F_ptr + row_idx * F_row_stride + 3 * F_col_stride + 0 *
F_page_stride, mask=mask)
a42 = tl.load(F_ptr + row_idx * F_row_stride + 3 * F_col_stride + 1 *
F_page_stride, mask=mask)
a43 = tl.load(F_ptr + row_idx * F_row_stride + 3 * F_col_stride + 2 *
F_page_stride, mask=mask)
a44 = tl.load(F_ptr + row_idx * F_row_stride + 3 * F_col_stride + 3 *
F_page_stride, mask=mask)
det = (a11 * a22 * a33 * a44 - a12 * a23 * a34 * a41 + a13 * a24 * a31 *
a42 - a14 * a21 * a32 * a43 + a14 * a23 * a32 * a41 - a11 * a24 *
a33 * a42 + a12 * a21 * a34 * a43 - a13 * a22 * a31 * a44 + a12 *
a23 * a31 * a44 - a13 * a21 * a34 * a42 + a11 * a24 * a32 * a43 -
a14 * a22 * a33 * a41 + a14 * a21 * a33 * a42 - a12 * a24 * a31 *
a43 + a13 * a22 * a34 * a41 - a11 * a23 * a32 * a44 + a13 * a21 *
a32 * a44 - a11 * a22 * a34 * a43 + a12 * a24 * a33 * a41 - a14 *
a23 * a31 * a42 + a14 * a22 * a31 * a43 - a13 * a24 * a32 * a41 +
a11 * a23 * a34 * a42 - a12 * a21 * a33 * a44)
c11 = (a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a24 * a33 *
a42 - a23 * a32 * a44 - a22 * a34 * a43)
c12 = -(a21 * a33 * a44 + a23 * a34 * a41 + a24 * a31 * a43 - a24 * a33 *
a41 - a23 * a31 * a44 - a21 * a34 * a43)
c13 = (a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a24 * a32 *
a41 - a22 * a31 * a44 - a21 * a34 * a42)
c14 = -(a21 * a32 * a43 + a22 * a33 * a41 + a23 * a31 * a42 - a23 * a32 *
a41 - a22 * a31 * a43 - a21 * a33 * a42)
c21 = -(a12 * a33 * a44 + a13 * a34 * a42 + a14 * a32 * a43 - a14 * a33 *
a42 - a13 * a32 * a44 - a12 * a34 * a43)
c22 = (a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a14 * a33 *
a41 - a13 * a31 * a44 - a11 * a34 * a43)
c23 = -(a11 * a32 * a44 + a12 * a34 * a41 + a14 * a31 * a42 - a14 * a32 *
a41 - a12 * a31 * a44 - a11 * a34 * a42)
c24 = (a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a13 * a32 *
a41 - a12 * a31 * a43 - a11 * a33 * a42)
c31 = (a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a14 * a23 *
a42 - a13 * a22 * a44 - a12 * a24 * a43)
c32 = -(a11 * a23 * a44 + a13 * a24 * a41 + a14 * a21 * a43 - a14 * a23 *
a41 - a13 * a21 * a44 - a11 * a24 * a43)
c33 = (a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a14 * a22 *
a41 - a12 * a21 * a44 - a11 * a24 * a42)
c34 = -(a11 * a22 * a43 + a12 * a23 * a41 + a13 * a21 * a42 - a13 * a22 *
a41 - a12 * a21 * a43 - a11 * a23 * a42)
c41 = -(a12 * a23 * a34 + a13 * a24 * a32 + a14 * a22 * a33 - a14 * a23 *
a32 - a13 * a22 * a34 - a12 * a24 * a33)
c42 = (a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a14 * a23 *
a31 - a13 * a21 * a34 - a11 * a24 * a33)
c43 = -(a11 * a22 * a34 + a12 * a24 * a31 + a14 * a21 * a32 - a14 * a22 *
a31 - a12 * a21 * a34 - a11 * a24 * a32)
c44 = (a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a13 * a22 *
a31 - a12 * a21 * a33 - a11 * a23 * a32)
tl.store(out_ptr + row_idx * out_row_stride + 0 * out_col_stride + 0 *
out_page_stride, c11 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 0 * out_col_stride + 1 *
out_page_stride, c21 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 0 * out_col_stride + 2 *
out_page_stride, c31 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 0 * out_col_stride + 3 *
out_page_stride, c41 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 1 * out_col_stride + 0 *
out_page_stride, c12 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 1 * out_col_stride + 1 *
out_page_stride, c22 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 1 * out_col_stride + 2 *
out_page_stride, c32 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 1 * out_col_stride + 3 *
out_page_stride, c42 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 2 * out_col_stride + 0 *
out_page_stride, c13 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 2 * out_col_stride + 1 *
out_page_stride, c23 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 2 * out_col_stride + 2 *
out_page_stride, c33 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 2 * out_col_stride + 3 *
out_page_stride, c43 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 3 * out_col_stride + 0 *
out_page_stride, c14 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 3 * out_col_stride + 1 *
out_page_stride, c24 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 3 * out_col_stride + 2 *
out_page_stride, c34 / det, mask=mask)
tl.store(out_ptr + row_idx * out_row_stride + 3 * out_col_stride + 3 *
out_page_stride, c44 / det, mask=mask)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/sparse_optimizer.py |
464215f1-3063-4a65-8777-6e5fd10319d0 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['dh0'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4)], key=['BK', 'BV'])
@triton.jit
def fused_recurrent_rwkv6_bwd_kernel_dkv(q, k, v, w, u, do, dk, dk1, dv,
dh0, offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
REVERSE: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, USE_OFFSETS: tl
.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl.
int64), tl.program_id(2).to(tl.int64)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
o_k = i_k * BK + tl.arange(0, BK)
o_v = i_v * BV + tl.arange(0, BV)
if HEAD_FIRST:
p_q = q + i_nh * T * K + ((T - 1) * K if not REVERSE else 0) + o_k
p_k = k + i_nh * T * K + ((T - 1) * K if not REVERSE else 0) + o_k
p_v = v + i_nh * T * V + ((T - 1) * V if not REVERSE else 0) + o_v
p_w = w + i_nh * T * K + ((T - 1) * K if not REVERSE else 0) + o_k
p_do = do + i_nh * T * V + ((T - 1) * V if not REVERSE else 0) + o_v
p_dk = dk + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if not
REVERSE else 0) + o_k
p_dk1 = dk1 + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if not
REVERSE else 0) + o_k
p_dv = dv + (i_k * B * H + i_nh) * T * V + ((T - 1) * V if not
REVERSE else 0) + o_v
else:
p_q = q + (bos + (T - 1 if not REVERSE else 0)) * H * K + i_h * K + o_k
p_k = k + (bos + (T - 1 if not REVERSE else 0)) * H * K + i_h * K + o_k
p_v = v + (bos + (T - 1 if not REVERSE else 0)) * H * V + i_h * V + o_v
p_w = w + (bos + (T - 1 if not REVERSE else 0)) * H * K + i_h * K + o_k
p_do = do + (bos + (T - 1 if not REVERSE else 0)
) * H * V + i_h * V + o_v
p_dk = dk + (i_v * all + bos + (T - 1 if not REVERSE else 0)
) * H * K + i_h * K + o_k
p_dk1 = dk1 + (i_v * all + bos + (T - 1 if not REVERSE else 0)
) * H * K + i_h * K + o_k
p_dv = dv + (i_k * all + bos + (T - 1 if not REVERSE else 0)
) * H * V + i_h * V + o_v
p_u = u + i_h * K + o_k
mask_k = o_k < K
mask_v = o_v < V
mask_h = mask_k[:, None] & mask_v[None, :]
b_u = tl.load(p_u, mask=mask_k, other=0).to(tl.float32)
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
for _ in range(T - 1, -1, -1):
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_w = tl.load(p_w, mask=mask_k, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32)
b_dkv = b_q[:, None] * b_do[None, :]
b_dk = tl.sum(b_dh * b_v[None, :], 1)
tl.store(p_dk1, b_dk.to(p_dk1.dtype.element_ty), mask=mask_k)
b_dk += tl.sum(b_dkv * b_u[:, None] * b_v[None, :], 1)
b_dv = tl.sum((b_dh + b_dkv * b_u[:, None]) * b_k[:, None], 0)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), mask=mask_k)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), mask=mask_v)
b_dh *= tl.exp(b_w)[:, None]
b_dh += b_dkv
p_q += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_k += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_v += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_w += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_do += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_dk += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_dk1 += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_dv += (-1 if not REVERSE else 1) * (1 if HEAD_FIRST else H) * V
if USE_INITIAL_STATE:
p_dh0 = dh0 + i_nh * K * V + o_k[:, None] * V + o_v[None, :]
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/fused_recurrent.py |
9233b706-1ad9-479f-95dd-2f0d42a6e5fc | flash_4.py | LitingLin/LoRAT | trackit/runner/evaluation/distributed/tracker_evaluator/components/segmentation/segment_anything_fast/flash_4.py | d7515a51174b037f122ce4ac6c56d668b0ee152b | 0 | @triton.jit
def _fwd_kernel_aligned(Q, K, V, B0, sm_scale, Out, stride_qh, stride_qm,
stride_qk, stride_kh, stride_kn, stride_kk, stride_vh, stride_vk,
stride_vn, stride_oh, stride_om, stride_on, stride_b0h, stride_b0m, Z,
H, N_CTX, P_SEQ, OUT_DTYPE: tl.constexpr, BIAS_LAST_SIZE: tl.constexpr,
B0_NUMEL: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
q_offset = off_hz * stride_qh
kv_offset = off_hz * stride_kh
Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + kv_offset, shape=(BLOCK_DMODEL,
N_CTX + P_SEQ), strides=(stride_kk, stride_kn), offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + kv_offset, shape=(N_CTX +
P_SEQ, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0
), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale * 1.44269504
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(OUT_DTYPE)
lo = 0
hi = N_CTX + P_SEQ
b_ptr_offsets_m = tl.arange(0, BLOCK_M)
b_offset = off_hz * stride_b0h
b_ptr_offsets_n_1 = tl.arange(0, BLOCK_N) % BIAS_LAST_SIZE + BIAS_LAST_SIZE
b1 = tl.load(B0 + b_offset + ((start_m * BLOCK_M + b_ptr_offsets_m) *
stride_b0m)[:, None] + b_ptr_offsets_n_1[None, :])
for start_n in range(lo, hi, BLOCK_N):
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=OUT_DTYPE)
qk += tl.dot(q, k)
b0 = tl.load(B0 + b_offset + ((start_m * BLOCK_M + b_ptr_offsets_m) *
stride_b0m)[:, None] + start_n // BLOCK_N)
qk += (b0 + b1) * 1.44269504
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc *= alpha[:, None]
acc += tl.dot(p.to(OUT_DTYPE), v)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
acc = acc / l_i[:, None]
O_block_ptr = tl.make_block_ptr(base=Out + q_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
tl.store(O_block_ptr, acc.to(OUT_DTYPE))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/LitingLin/LoRAT/blob/d7515a51174b037f122ce4ac6c56d668b0ee152b/trackit/runner/evaluation/distributed/tracker_evaluator/components/segmentation/segment_anything_fast/flash_4.py |
f3027f3b-527d-4872-925c-33213b5e854d | outer_softmax_online.py | iclementine/optimize_softmax | outer_softmax_online.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online(output_ptr, input_ptr, M, N, K, TILE_N: tl.
constexpr, TILE_K: tl.constexpr):
pid_k = tl.program_id(0)
pid_m = tl.program_id(1)
k_offsets = pid_k * TILE_K + tl.arange(0, TILE_K)
m = tl.full([TILE_N, TILE_K], value=float('-inf'), dtype=tl.float32)
z = tl.full([TILE_N, TILE_K], value=0.0, dtype=tl.float32)
for start_n in range(0, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offsets = pid_m * N * K + n_offsets[:, None] * K + k_offsets
mask = (n_offsets[:, None] < N) & (k_offsets < K)
inp = tl.load(input_ptr + offsets, mask=mask, other=-float('inf'))
m_new = tl.maximum(m, inp)
alpha = tl.exp(m - m_new)
z = z * alpha + tl.exp(inp - m_new)
m = m_new
m_reduced = tl.max(m, 0)
z = tl.sum(z * tl.exp(m - m_reduced[None, :]), 0)
m = m_reduced
previous_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, N, TILE_N):
n_offsets = previous_multiple - start_n + tl.arange(0, TILE_N)
offsets = pid_m * N * K + n_offsets[:, None] * K + k_offsets
mask = (n_offsets[:, None] < N) & (k_offsets[None, :] < K)
inp = tl.load(input_ptr + offsets, mask=mask, other=-float('inf'))
o = tl.exp(inp - m[None, :]) / z[None, :]
tl.store(output_ptr + offsets, o, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/outer_softmax_online.py |
df855118-c357-4a08-a639-b101eb791b9c | fused_recurrent.py | sustcsonglin/hope-fla | fla/ops/hope/fused_recurrent.py | 0750c9a9a360fb72236dfaaaf21496959c5ef48d | 0 | @triton.jit
def fused_recurrent_bwd_kernel(q, k, k_l2, dq, dk, dk_l2, dk_l2_partial_fwd,
dk_l2_partial_bwd, dq_reflected, dk_reflected, T, D: tl.constexpr, BK:
tl.constexpr):
i_b, i_h = tl.program_id(0), tl.program_id(1)
d_h = tl.zeros([BK, BK], dtype=tl.float32)
offset = i_b * T * D + i_h * BK + tl.arange(0, BK) + (T - 1) * D
p_q = q + offset
p_k = k + offset
p_k_l2 = k_l2 + offset
p_dq_reflected = dq_reflected + offset
p_dk_reflected = dk_reflected + offset
p_dq = dq + offset
p_dk = dk + offset
p_dk_l2 = dk_l2 + offset
p_dk_l2_partial_fwd = dk_l2_partial_fwd + offset
p_dk_l2_partial_bwd = dk_l2_partial_bwd + offset
for _ in range(T):
b_q = tl.load(p_q).to(tl.float32)
b_k = tl.load(p_k).to(tl.float32)
b_k_l2 = tl.load(p_k_l2).to(tl.float32)
b_dq_reflected = tl.load(p_dq_reflected).to(tl.float32)
b_dk_reflected = tl.load(p_dk_reflected).to(tl.float32)
d_h += b_q[None, :] * b_dq_reflected[:, None] + b_k[None, :
] * b_dk_reflected[:, None]
b_dk_l2_partial_fwd = tl.load(p_dk_l2_partial_fwd).to(tl.float32)
b_dk_l2 = -2 * tl.sum(b_dk_l2_partial_fwd[:, None] * d_h, axis=0)
tl.store(p_dk_l2, b_dk_l2.to(p_dk_l2.dtype.element_ty))
b_dk_l2_partial = tl.sum(d_h * b_k_l2[None, :], axis=1)
d_h -= 2 * b_k_l2[None, :] * b_dk_l2_partial[:, None]
tl.store(p_dk_l2_partial_bwd, b_dk_l2_partial.to(
p_dk_l2_partial_bwd.dtype.element_ty))
p_dq_reflected -= D
p_dk_reflected -= D
p_q -= D
p_k -= D
p_k_l2 -= D
p_dk_l2_partial_fwd -= D
p_dk_l2_partial_bwd -= D
p_dk_l2 -= D
tl.debug_barrier()
offset = i_b * T * D + i_h * BK + tl.arange(0, BK)
p_q = q + offset
p_k = k + offset
p_k_l2 = k_l2 + offset
p_dq_reflected = dq_reflected + offset
p_dk_reflected = dk_reflected + offset
p_dq = dq + offset
p_dk = dk + offset
p_dk_l2 = dk_l2 + offset
p_dk_l2_partial_bwd = dk_l2_partial_bwd + offset
h = tl.zeros([BK, BK], dtype=tl.float32) + (tl.arange(0, BK)[:, None] ==
tl.arange(0, BK)[None, :])
for _ in range(T):
b_k_l2 = tl.load(p_k_l2).to(tl.float32)
b_dk_l2_partial = tl.load(p_dk_l2_partial_bwd).to(tl.float32)
b_dk_l2 = -2 * tl.sum(h * b_dk_l2_partial[:, None], axis=0)
b_dk_l2 += tl.load(p_dk_l2)
tl.store(p_dk_l2, b_dk_l2.to(p_dk_l2.dtype.element_ty))
tmp = tl.sum(h * b_k_l2[None, :], axis=1)
h -= 2 * b_k_l2[None, :] * tmp[:, None]
b_dq_reflected = tl.load(p_dq_reflected).to(tl.float32)
b_dk_reflected = tl.load(p_dk_reflected).to(tl.float32)
b_dq = tl.sum(b_dq_reflected[:, None] * h, axis=0)
b_dk = tl.sum(b_dk_reflected[:, None] * h, axis=0)
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty))
p_q += D
p_k += D
p_k_l2 += D
p_dq_reflected += D
p_dk_reflected += D
p_dk_l2 += D
p_dk_l2_partial_bwd += D
p_dq += D
p_dk += D
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/hope-fla/blob/0750c9a9a360fb72236dfaaaf21496959c5ef48d/fla/ops/hope/fused_recurrent.py |
2f55269a-e5f4-4bb7-88a8-6d4fb70c6ff0 | rope.py | dame-cell/Triformer | triformer/rope.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.heuristics({'BACKWARD_PASS': lambda args: bool(args['BACKWARD_PASS'])})
@triton.jit
def _rope_embedding(Q, Q_row_stride, cos, cos_row_stride, sin,
sin_row_stride, seqlen, head_dim: tl.constexpr, n_heads: tl.constexpr,
BACKWARD_PASS: tl.constexpr, BLOCK_SIZE: tl.constexpr):
"""
Calculates the RoPE Embedding quickly
RoPE is Q * cos + rotate_half(Q) * sin
See our blog post for more info
"""
ROPE_GROUP_SIZE = 4
row_position = tl.program_id(0)
group_head_position = tl.program_id(1)
col_offsets = tl.arange(0, BLOCK_SIZE)
half_head_dim = head_dim // 2
mask = col_offsets < half_head_dim
sin1 = tl.load(sin + row_position % seqlen * sin_row_stride +
half_head_dim * 0 + col_offsets, mask=mask, other=0)
cos1 = tl.load(cos + row_position % seqlen * cos_row_stride +
half_head_dim * 0 + col_offsets, mask=mask, other=0)
if BACKWARD_PASS:
sin1 = -sin1
pass
head_start = group_head_position * ROPE_GROUP_SIZE
head_end = min(head_start + ROPE_GROUP_SIZE, n_heads)
for k in range(head_start, head_end):
offs_q1 = row_position * Q_row_stride + k * head_dim + col_offsets
offs_q2 = (row_position * Q_row_stride + k * head_dim + col_offsets +
half_head_dim)
Q1 = tl.load(Q + offs_q1, mask=mask, other=0).to(sin1.dtype)
Q2 = tl.load(Q + offs_q2, mask=mask, other=0).to(sin1.dtype)
tl.store(Q + offs_q1, Q1 * cos1 - Q2 * sin1, mask=mask)
tl.store(Q + offs_q2, Q2 * cos1 + Q1 * sin1, mask=mask)
pass
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/rope.py |
0556f753-1a94-4e65-a541-a0d3c5e13c41 | cumsum.py | sustcsonglin/flash-linear-attention | fla/ops/utils/cumsum.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BT': 16}, num_warps=2), triton.
Config({'BT': 32}, num_warps=4), triton.Config({'BT': 32}, num_warps=2),
triton.Config({'BT': 64}, num_warps=8), triton.Config({'BT': 64},
num_warps=4)], key=[])
@triton.jit
def chunk_global_reversed_cumsum_scalar_kernel(s, o, offsets, T: tl.
constexpr, H: tl.constexpr, BT: tl.constexpr, HEAD_FIRST: tl.constexpr,
USE_OFFSETS: tl.constexpr):
i_bh = tl.program_id(0)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_b).to(tl.int32), tl.load(offsets +
i_b + 1).to(tl.int32)
else:
bos, eos = i_b * T, i_b * T + T
T = eos - bos
b_z = tl.zeros([], dtype=tl.float32)
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
if HEAD_FIRST:
p_s = tl.make_block_ptr(s + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_o = tl.make_block_ptr(o + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
else:
p_s = tl.make_block_ptr(s + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_o = tl.make_block_ptr(o + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
b_s = tl.load(p_s, boundary_check=(0,)).to(tl.float32)
b_zz = tl.sum(b_s, axis=0)
b_z += b_zz
b_o = b_s - tl.cumsum(b_s, axis=0) + b_z[None]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0,))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py |
11947c31-82e7-420d-82ef-4b852131022a | rmsnorm.py | ardywibowo/triton-mode | kernels/rmsnorm.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_rmsnorm_forward(Y_ptr, Y_row_stride, X_ptr, X_row_stride, W_ptr,
RSTD_ptr, RSTD_row_stride, n_cols, eps, offset, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
Y_ptr += row_idx * Y_row_stride
X_ptr += row_idx * X_row_stride
RSTD_ptr += row_idx * RSTD_row_stride
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0)
X_row_dtype = X_row.dtype
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0)
X_row = X_row.to(tl.float32)
mean_square = tl.sum(X_row * X_row, axis=0) / n_cols
rstd = tl.libdevice.rsqrt(mean_square + eps)
tl.store(RSTD_ptr, rstd)
X_row = X_row * rstd
X_row = X_row.to(X_row_dtype)
Y_row = X_row * (offset + W_row)
tl.store(Y_ptr + col_offsets, Y_row, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/rmsnorm.py |
c11fd5e3-13e4-4c89-b431-45f0b4f5fe6c | linear.py | neuro-ml/kerops | kerops/kernels/linear.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _ReLULinearAddBackward(input_ptr, grad_ptr, input_grad_ptr, weight_ptr,
weight_grad_ptr, numel_no_channels, in_channels: tl.constexpr,
out_channels: tl.constexpr, D_block: tl.constexpr, _ILP: tl.constexpr):
pid = tl.program_id(0)
input_ptr += pid * _ILP * in_channels * D_block
grad_ptr += pid * _ILP * out_channels * D_block
input_grad_ptr += pid * _ILP * in_channels * D_block
weight_grad_ptr += pid * in_channels * out_channels
in_channels_offset = tl.arange(0, in_channels)
out_channels_offset = tl.arange(0, out_channels)
d_offset = tl.arange(0, D_block)
input_offset = d_offset[:, None] * in_channels + in_channels_offset[None, :
]
output_offset = d_offset[:, None] * out_channels + out_channels_offset[
None, :]
weight_offset = out_channels_offset[:, None] + in_channels_offset[None, :
] * out_channels
weight_grad_offset = in_channels_offset[:, None
] * out_channels + out_channels_offset[None, :]
weight = tl.load(weight_ptr + weight_offset)
weight_grad = tl.zeros([in_channels, out_channels], dtype=tl.float32)
for i in tl.static_range(0, _ILP):
mask = d_offset[:, None] < numel_no_channels - (pid * _ILP + i
) * D_block
input = tl.load(input_ptr + input_offset, mask=mask, other=0.0)
grad = tl.load(grad_ptr + output_offset, mask=mask, other=0.0)
weight_grad += tl.dot(tl.trans(tl.maximum(input, 0.0).to(tl.float16
)), grad, out_dtype=tl.float32, allow_tf32=True)
input_grad = tl.dot(grad, weight, out_dtype=tl.float32, allow_tf32=True
).to(tl.float16) * (input > 0)
tl.store(input_grad_ptr + input_offset, input_grad, mask=mask)
grad_ptr += out_channels * D_block
input_grad_ptr += in_channels * D_block
input_ptr += in_channels * D_block
tl.store(weight_grad_ptr + weight_grad_offset, weight_grad)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Backpropagation",
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/linear.py |
ffe33cc0-6fd8-45d2-bdf4-eedd126fdd10 | flash_attention_fwd_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/flash_attention_fwd_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, stride_qz: tl.constexpr, stride_qh:
tl.constexpr, stride_qm: tl.constexpr, stride_qk: tl.constexpr,
stride_kz: tl.constexpr, stride_kh: tl.constexpr, stride_kn: tl.
constexpr, stride_kk: tl.constexpr, stride_vz: tl.constexpr, stride_vh:
tl.constexpr, stride_vk: tl.constexpr, stride_vn: tl.constexpr,
stride_oz: tl.constexpr, stride_oh: tl.constexpr, stride_om: tl.
constexpr, stride_on: tl.constexpr, Z: tl.constexpr, H: tl.constexpr,
N_CTX: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr, STAGE: tl.constexpr):
start_m = tl.program_id(2)
off_z = tl.program_id(0)
off_h = tl.program_id(1)
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64
) * stride_qh
if N_CTX <= 512:
start_m = tl.program_id(0)
off_z = tl.program_id(2)
qvk_offset = off_z.to(tl.int64) * stride_qh
Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=(
BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale
qk_scale *= 1.44269504
q = tl.load(Q_block_ptr)
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr,
V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N,
4 - STAGE, offs_m, offs_n, N_CTX)
if STAGE & 2:
tl.debug_barrier()
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr,
V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N,
2, offs_m, offs_n, N_CTX)
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/flash_attention_fwd_benchmark.py |
6c2b085e-9a9b-4d4a-8365-f732191bf5c6 | y_5.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_5.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def fifth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size:
tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr,
col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST000 = 1.73430461568895
CONST001 = 2.32681380862329
CONST002 = 1.60565407233314
CONST003 = 3.21130814466628
CONST004 = 3.3166247903554
CONST005 = 6.21867148191637
CONST006 = 6.21867148191637
CONST007 = 1.60565407233314
CONST009 = 11.6340690431164
CONST010 = 12.8452325786651
CONST011 = 12.4373429638327
CONST012 = 12.8452325786651
CONST013 = 13.8744369255116
CONST017 = 33.9852909359329
CONST018 = 7.35803132638072
CONST020 = -44.1481879582843
CONST021 = -41.6233107765348
CONST022 = -29.4321253055229
CONST023 = -23.2681380862329
CONST024 = -19.2678488679977
CONST025 = -19.2678488679977
CONST026 = -16.9926454679664
CONST027 = -16.9926454679664
CONST028 = -13.8744369255116
CONST029 = -16.583123951777
CONST030 = 3.4686092313779
CONST031 = -8.49632273398321
CONST032 = -5.20291384706685
CONST033 = -3.4686092313779
CONST034 = -1.73430461568895
VAR05 = x * x * x * x * x
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR14 = y * y * y * y * y
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR23 = z * z * z * z * z
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
Y00 = CONST001 * VAR05 + CONST009 * VAR24 * x + CONST023 * VAR07 * VAR26
Y01 = y * (CONST022 * VAR07 * z - CONST022 * VAR25 * x)
Y02 = CONST000 * VAR05 + VAR07 * (CONST028 * VAR17 + CONST033 * VAR26
) + x * (-CONST021 * VAR17 * VAR26 + CONST032 * VAR24)
Y03 = CONST027 * VAR07 * y * z + x * (CONST017 * VAR16 * z + CONST026 *
VAR25 * y)
Y04 = CONST002 * VAR05 + VAR07 * (CONST003 * VAR26 + CONST025 * VAR17
) + x * (CONST002 * VAR24 + CONST010 * VAR15 + CONST024 * VAR17 * VAR26
)
Y05 = CONST004 * VAR14 + VAR16 * (CONST029 * VAR08 + CONST029 * VAR26
) + y * (CONST005 * VAR06 + CONST006 * VAR24 + CONST011 * VAR08 * VAR26
)
Y06 = CONST002 * VAR23 + VAR25 * (CONST003 * VAR08 + CONST024 * VAR17
) + z * (CONST007 * VAR06 + CONST012 * VAR15 + CONST024 * VAR08 * VAR17
)
Y07 = VAR16 * (CONST026 * VAR08 - CONST026 * VAR26) + y * (-CONST031 *
VAR06 + CONST031 * VAR24)
Y08 = CONST034 * VAR23 + VAR25 * (CONST013 * VAR17 + CONST030 * VAR08
) + z * (CONST021 * VAR08 * VAR17 - CONST032 * VAR06)
Y09 = y * (CONST018 * VAR06 + CONST018 * VAR24 + CONST020 * VAR08 * VAR26)
Y10 = CONST001 * VAR23 + CONST009 * VAR06 * z + CONST023 * VAR08 * VAR25
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
tl.store(output_ptr + output_row_offset + 9, Y09, mask=
output_row_offset + 9 < output_numel)
tl.store(output_ptr + output_row_offset + 10, Y10, mask=
output_row_offset + 10 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_5.py |
a0b42e56-4bd9-44e8-83f0-ca330317f885 | fp8_matmul.py | drisspg/transformer_nuggets | transformer_nuggets/fp8/fp8_matmul.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def load_scales(a_scale_ptr, b_scale_ptr, ROW_WISE_SCALING: tl.constexpr):
if ROW_WISE_SCALING:
return a_scale_ptr, b_scale_ptr
else:
a_scale = tl.load(a_scale_ptr)
b_scale = tl.load(b_scale_ptr)
return a_scale, b_scale
| {
"Data Type": [
"fp32"
],
"Functionality": [],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/fp8_matmul.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.