uuid
stringlengths
36
36
file_name
stringlengths
5
50
repo_name
stringclasses
110 values
file_path
stringlengths
7
112
commit_hash
stringclasses
110 values
starcount
int64
0
0
input
stringlengths
39
33.8k
category
dict
licenses
sequencelengths
1
2
github_url
stringlengths
94
193
a0a75d1e-5e2f-47cc-b051-e6ae5e19ac3a
normalization.py
rosinality/halite
src/halite/nn/normalization.py
0653355c3dac8cfa80d66ec5a82c202c49c64205
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['N']) @triton.jit def _rms_norm_bwd_kernel_sm(X, stride_x, W, DY, stride_dy, DX, stride_dx, Rstd, DW, eps, M, N, rows_per_program, block_N: tl.constexpr): row_block_id = tl.program_id(0) row_start = row_block_id * rows_per_program cols = tl.arange(0, block_N) mask = cols < N w = tl.load(W + cols, mask=mask, other=0.0).to(tl.float32) dw = tl.zeros((block_N,), dtype=tl.float32) row_end = min(row_start + rows_per_program, M) for row in range(row_start, row_end): x = tl.load(X + row * stride_x + cols, mask=mask, other=0.0).to(tl. float32) dy = tl.load(DY + row * stride_dy + cols, mask=mask, other=0.0).to(tl .float32) rstd = tl.load(Rstd + row) x_hat = x * rstd wdy = w * dy dw += dy * x_hat c1 = tl.sum(x_hat * wdy, axis=0) / N dx = (wdy - x_hat * c1) * rstd tl.store(DX + row * stride_dx + cols, dx, mask=mask) tl.store(DW + row_block_id * N + cols, dw, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/rosinality/halite/blob/0653355c3dac8cfa80d66ec5a82c202c49c64205/src/halite/nn/normalization.py
69397af9-0c6f-44c6-9f5b-a774c749f5b4
linear.py
ai-compiler-study/triton-kernels
triton_kernels/ops/linear.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def triton_linear(a_ptr, b_ptr, c_ptr, out_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, GROUP_M: tl.constexpr, EVEN_K: tl. constexpr, ALLOW_TF32: tl.constexpr, ACC_TYPE: tl.constexpr, B_PROLOGUE_CAST_TYPE: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl. constexpr, BLOCK_K: tl.constexpr): pid = tl.program_id(0) grid_m = (M + BLOCK_M - 1) // BLOCK_M grid_n = (N + BLOCK_N - 1) // BLOCK_N width = GROUP_M * grid_n group_id = pid // width group_size = min(grid_m - group_id * GROUP_M, GROUP_M) pid_m = group_id * GROUP_M + pid % group_size pid_n = pid % width // group_size offset_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) offset_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) if stride_am == 1 and stride_ak == M or stride_am == K and stride_ak == 1: offset_am = tl.max_contiguous(tl.multiple_of(offset_m % M, BLOCK_M), BLOCK_M) else: offset_am = offset_m % M if stride_bk == 1 and stride_bn == K or stride_bk == N and stride_bn == 1: offset_bn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N) else: offset_bn = offset_n % N offset_k = tl.arange(0, BLOCK_K) a_ptrs = a_ptr + (offset_am[:, None] * stride_am + offset_k[None, :] * stride_ak) b_ptrs = b_ptr + (offset_k[:, None] * stride_bk + offset_bn[None, :] * stride_bn) acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) for k in range(K, 0, -BLOCK_K): if EVEN_K: a = tl.load(a_ptrs) b = tl.load(b_ptrs) else: a = tl.load(a_ptrs, mask=offset_k[None, :] < k, other=0.0) b = tl.load(b_ptrs, mask=offset_k[:, None] < k, other=0.0) if B_PROLOGUE_CAST_TYPE is not None: b = b.to(B_PROLOGUE_CAST_TYPE) acc += tl.dot(a, b, allow_tf32=ALLOW_TF32) a_ptrs += BLOCK_K * stride_ak b_ptrs += BLOCK_K * stride_bk offset_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) offset_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) idx_m = offset_m[:, None] idx_n = offset_n[None, :] mask = (idx_m < M) & (idx_n < N) xindex = idx_n + N * idx_m c = tl.load(c_ptr + tl.broadcast_to(idx_n, mask.shape), mask, eviction_policy='evict_last').to(tl.float32) out = acc + c tl.store(out_ptr + tl.broadcast_to(xindex, mask.shape), out, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/linear.py
ec7504ed-22fc-448a-af45-338b981af454
slstm_bw.py
NX-AI/flashrnn
flashrnn/flashrnn/triton_fused/slstm_bw.py
3fca666a81c8740af4878d7bc5e2a51900e4fe14
0
@triton.jit def _backward_sequence_kernel(delta_states_all_outside, delta_states_last_outside, R, states_all, gates_all, delta_states_initial, delta_Wx, delta_R, delta_b, T: tl.constexpr, NS: tl.constexpr, B: tl.constexpr, NH: tl.constexpr, DH: tl.constexpr, NGI: tl.constexpr, NGR: tl.constexpr, siz_B: tl.constexpr, DTYPE: tl. constexpr=tl.float32, backward_recurrent_clip_val: tl.constexpr=-1.0): idx_b_NH, idx_b_B = tl.program_id(0), tl.program_id(1) str_matR_B = NH * NGR * DH * DH str_matR_NH = NGR * DH * DH str_matR_NGR = DH * DH str_matStatesAll_NH = (T + 1) * NS * B * DH str_matStatesAll_T = NS * B * DH str_matGatesAll_NH = T * NGI * B * DH str_matGatesAll_T = NGI * B * DH str_delta_states_all_outside_NH = T * NS * B * DH str_delta_states_all_outside_T = NS * B * DH str_matDeltaWx_NH = T * NGI * B * DH str_matDeltaWx_T = NGI * B * DH matDeltaHtrans_last_ptr = tl.make_block_ptr(base= delta_states_last_outside + idx_b_NH * NS * B * DH + 0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matDeltaH_tplus1 = tl.load(matDeltaHtrans_last_ptr).to(tl.float32) matDeltaCtrans_last_ptr = tl.make_block_ptr(base= delta_states_last_outside + idx_b_NH * NS * B * DH + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matDeltaC_tplus1 = tl.load(matDeltaCtrans_last_ptr).to(tl.float32) matDeltaNtrans_last_ptr = tl.make_block_ptr(base= delta_states_last_outside + idx_b_NH * NS * B * DH + 2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matDeltaN_tplus1 = tl.load(matDeltaNtrans_last_ptr).to(tl.float32) matR_i_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 0 * str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) matR_i = tl.load(matR_i_ptr) matR_f_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 1 * str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) matR_f = tl.load(matR_f_ptr) matR_z_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 2 * str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) matR_z = tl.load(matR_z_ptr) matR_o_ptr = tl.make_block_ptr(base=R + idx_b_NH * str_matR_NH + 3 * str_matR_NGR, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) matR_o = tl.load(matR_o_ptr) matDeltaR_i = tl.zeros((DH, DH), dtype=tl.float32) matDeltaR_f = tl.zeros((DH, DH), dtype=tl.float32) matDeltaR_z = tl.zeros((DH, DH), dtype=tl.float32) matDeltaR_o = tl.zeros((DH, DH), dtype=tl.float32) vecDeltaB_i = tl.zeros((DH,), dtype=tl.float32) vecDeltaB_f = tl.zeros((DH,), dtype=tl.float32) vecDeltaB_z = tl.zeros((DH,), dtype=tl.float32) vecDeltaB_o = tl.zeros((DH,), dtype=tl.float32) for idx_t in range(T - 1, -1, -1): matG_i_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T + 0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matG_ibar = tl.load(matG_i_ptr).to(tl.float32) matG_f_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matG_fbar = tl.load(matG_f_ptr).to(tl.float32) matG_z_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T + 2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matG_z = tl.load(matG_z_ptr) matG_o_ptr = tl.make_block_ptr(base=gates_all + idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T + 3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matG_o = tl.load(matG_o_ptr) matC_t_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0 ), block_shape=(siz_B, DH), order=(0, 1)) matC_t = tl.load(matC_t_ptr) matN_t_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0 ), block_shape=(siz_B, DH), order=(0, 1)) matN_t = tl.load(matN_t_ptr) matM_t_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0 ), block_shape=(siz_B, DH), order=(0, 1)) matM_t = tl.load(matM_t_ptr).to(tl.float32) matH_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + idx_t * str_matStatesAll_T + 0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matH_tminus1 = tl.load(matH_tminus1_ptr) matC_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + idx_t * str_matStatesAll_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matC_tminus1 = tl.load(matC_tminus1_ptr) matN_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + idx_t * str_matStatesAll_T + 2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matN_tminus1 = tl.load(matN_tminus1_ptr) matM_tminus1_ptr = tl.make_block_ptr(base=states_all + idx_b_NH * str_matStatesAll_NH + idx_t * str_matStatesAll_T + 3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) matM_tminus1 = tl.load(matM_tminus1_ptr).to(tl.float32) matDeltaHtrans_out_t_ptr = tl.make_block_ptr(base= delta_states_all_outside + idx_b_NH * str_delta_states_all_outside_NH + idx_t * str_delta_states_all_outside_T + 0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=( siz_B, DH), order=(0, 1)) matDeltaHtrans_out_t = tl.load(matDeltaHtrans_out_t_ptr) matDeltaCtrans_out_t_ptr = tl.make_block_ptr(base= delta_states_all_outside + idx_b_NH * str_delta_states_all_outside_NH + idx_t * str_delta_states_all_outside_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=( siz_B, DH), order=(0, 1)) matDeltaCtrans_out_t = tl.load(matDeltaCtrans_out_t_ptr) matDeltaNtrans_out_t_ptr = tl.make_block_ptr(base= delta_states_all_outside + idx_b_NH * str_delta_states_all_outside_NH + idx_t * str_delta_states_all_outside_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=( siz_B, DH), order=(0, 1)) matDeltaNtrans_out_t = tl.load(matDeltaNtrans_out_t_ptr) matDeltaH_t = matDeltaHtrans_out_t + matDeltaH_tplus1 matDeltaC_t = matDeltaCtrans_out_t + matDeltaC_tplus1 matDeltaN_t = matDeltaNtrans_out_t + matDeltaN_tplus1 matDeltaC_t = matDeltaC_t + matDeltaH_t * (matG_o / matN_t) matDeltaN_t = matDeltaN_t - matDeltaH_t * (matG_o * matC_t / ( matN_t * matN_t)) matG_i = tl.exp(matG_ibar - matM_t) matG_logfplusm = matM_tminus1 + tl.log(tl.sigmoid(matG_fbar)) matG_f = tl.exp(matG_logfplusm - matM_t) matDeltaGI = (matDeltaC_t * matG_z + matDeltaN_t) * matG_i matDeltaGF = (matDeltaC_t * matC_tminus1 + matDeltaN_t * matN_tminus1 ) * matG_f * tl.sigmoid(-matG_fbar) matDeltaGZ = matDeltaC_t * matG_i * (1 - matG_z * matG_z) matDeltaGO = matDeltaH_t * (matC_t / matN_t) * (1 - matG_o) * matG_o matDeltaC_tminus1 = matDeltaC_t * matG_f matDeltaN_tminus1 = matDeltaN_t * matG_f matDeltaH_tminus1 = tl.dot(matDeltaGI.to(DTYPE), matR_i) matDeltaH_tminus1 += tl.dot(matDeltaGF.to(DTYPE), matR_f) matDeltaH_tminus1 += tl.dot(matDeltaGZ.to(DTYPE), matR_z) matDeltaH_tminus1 += tl.dot(matDeltaGO.to(DTYPE), matR_o) matDeltaR_i += tl.dot(tl.trans(matDeltaGI.to(DTYPE)), matH_tminus1) matDeltaR_f += tl.dot(tl.trans(matDeltaGF.to(DTYPE)), matH_tminus1) matDeltaR_z += tl.dot(tl.trans(matDeltaGZ.to(DTYPE)), matH_tminus1) matDeltaR_o += tl.dot(tl.trans(matDeltaGO.to(DTYPE)), matH_tminus1) vecDeltaB_i += tl.sum(matDeltaGI, axis=0) vecDeltaB_f += tl.sum(matDeltaGF, axis=0) vecDeltaB_z += tl.sum(matDeltaGZ, axis=0) vecDeltaB_o += tl.sum(matDeltaGO, axis=0) matDeltaGI_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH * str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) tl.store(matDeltaGI_ptr, matDeltaGI.to(DTYPE)) matDeltaGF_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH * str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) tl.store(matDeltaGF_ptr, matDeltaGF.to(DTYPE)) matDeltaGZ_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH * str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) tl.store(matDeltaGZ_ptr, matDeltaGZ.to(DTYPE)) matDeltaGO_ptr = tl.make_block_ptr(base=delta_Wx + idx_b_NH * str_matDeltaWx_NH + idx_t * str_matDeltaWx_T + 3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1)) tl.store(matDeltaGO_ptr, matDeltaGO.to(DTYPE)) matDeltaH_tplus1 = matDeltaH_tminus1 matDeltaC_tplus1 = matDeltaC_tminus1 matDeltaN_tplus1 = matDeltaN_tminus1 matDeltaHtrans_initial_ptr = tl.make_block_ptr(base= delta_states_initial + idx_b_NH * NS * B * DH + 0 * B * DH, shape=( B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape= (siz_B, DH), order=(0, 1)) tl.store(matDeltaHtrans_initial_ptr, matDeltaH_tplus1.to(DTYPE)) matDeltaCtrans_initial_ptr = tl.make_block_ptr(base= delta_states_initial + idx_b_NH * NS * B * DH + 1 * B * DH, shape=( B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape= (siz_B, DH), order=(0, 1)) tl.store(matDeltaCtrans_initial_ptr, matDeltaC_tplus1.to(DTYPE)) matDeltaNtrans_initial_ptr = tl.make_block_ptr(base= delta_states_initial + idx_b_NH * NS * B * DH + 2 * B * DH, shape=( B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape= (siz_B, DH), order=(0, 1)) tl.store(matDeltaNtrans_initial_ptr, matDeltaN_tplus1.to(DTYPE)) matDeltaMtrans_initial_ptr = tl.make_block_ptr(base= delta_states_initial + idx_b_NH * NS * B * DH + 3 * B * DH, shape=( B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape= (siz_B, DH), order=(0, 1)) tl.store(matDeltaMtrans_initial_ptr, tl.zeros((siz_B, DH), dtype=DTYPE)) matDeltaR_i_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B + idx_b_NH * str_matR_NH + 0 * str_matR_NGR, shape=(DH, DH), strides= (DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) tl.store(matDeltaR_i_ptr, matDeltaR_i.to(DTYPE)) matDeltaR_f_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B + idx_b_NH * str_matR_NH + 1 * str_matR_NGR, shape=(DH, DH), strides= (DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) tl.store(matDeltaR_f_ptr, matDeltaR_f.to(DTYPE)) matDeltaR_z_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B + idx_b_NH * str_matR_NH + 2 * str_matR_NGR, shape=(DH, DH), strides= (DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) tl.store(matDeltaR_z_ptr, matDeltaR_z.to(DTYPE)) matDeltaR_o_ptr = tl.make_block_ptr(base=delta_R + idx_b_B * str_matR_B + idx_b_NH * str_matR_NH + 3 * str_matR_NGR, shape=(DH, DH), strides= (DH, 1), offsets=(0, 0), block_shape=(DH, DH), order=(0, 1)) tl.store(matDeltaR_o_ptr, matDeltaR_o.to(DTYPE)) vecDeltaB_i_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI * DH + 0 * DH + tl.arange(0, DH)) tl.store(vecDeltaB_i_ptr, vecDeltaB_i.to(DTYPE)) vecDeltaB_f_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI * DH + 1 * DH + tl.arange(0, DH)) tl.store(vecDeltaB_f_ptr, vecDeltaB_f.to(DTYPE)) vecDeltaB_z_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI * DH + 2 * DH + tl.arange(0, DH)) tl.store(vecDeltaB_z_ptr, vecDeltaB_z.to(DTYPE)) vecDeltaB_o_ptr = (delta_b + idx_b_B * NH * NGI * DH + idx_b_NH * NGI * DH + 3 * DH + tl.arange(0, DH)) tl.store(vecDeltaB_o_ptr, vecDeltaB_o.to(DTYPE))
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT", "BSD" ]
https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/slstm_bw.py
ca9df654-133c-4ad7-bf52-bab6ba0855c7
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _bwd_preprocess_do_o_dot(o_ptr, do_ptr, delta_ptr, T, stride_ob, stride_ot, stride_od, stride_do_b, stride_do_t, stride_do_d, BLOCK_T: tl.constexpr, BLOCK_D: tl.constexpr): start_t = tl.program_id(0) offs_t = start_t * BLOCK_T + tl.arange(0, BLOCK_T) pid_b = tl.program_id(1) offs_d = tl.arange(0, BLOCK_D) o_ptrs = o_ptr + pid_b * stride_ob + offs_t[:, None] * stride_ot + offs_d[ None, :] * stride_od do_ptrs = do_ptr + pid_b * stride_do_b + offs_t[:, None ] * stride_do_t + offs_d[None, :] * stride_do_d o = tl.load(o_ptrs, mask=offs_t[:, None] < T, other=0.0) do = tl.load(do_ptrs, mask=offs_t[:, None] < T, other=0.0) delta = tl.sum(o * do, axis=1) delta_ptrs = delta_ptr + pid_b * T + offs_t tl.store(delta_ptrs, delta, mask=offs_t < T)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
89cd787f-1e2c-4865-a904-bc0d36218c36
flash_attention_nopad.py
tascj/kaggle-lmsys-chatbot-arena
human_pref/inference/ops/flash_attention_nopad.py
83cd93d50b9283c18711e8c63e4e1c6399c7b9ce
0
@triton.jit def _fwd_kernel(Q, K, V, sm_scale, B_Start_Loc, B_Seqlen, Out, stride_qbs, stride_qh, stride_qd, stride_kbs, stride_kh, stride_kd, stride_vbs, stride_vh, stride_vd, stride_obs, stride_oh, stride_od, kv_group_num, logit_softcapping: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr): """flash attention forward triton kernel.""" cur_batch = tl.program_id(0) cur_head = tl.program_id(1) start_m = tl.program_id(2) cur_kv_head = cur_head // kv_group_num cur_batch_seq_len = tl.load(B_Seqlen + cur_batch) cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch) block_start_loc = BLOCK_M * start_m offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) off_q = (cur_batch_in_all_start_index + offs_m[:, None] ) * stride_qbs + cur_head * stride_qh + offs_d[None, :] * stride_qd off_k = offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh + offs_d[ :, None] * stride_kd off_v = offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh + offs_d[ None, :] * stride_vd q = tl.load(Q + off_q, mask=offs_m[:, None] < cur_batch_seq_len, other=0.0) k_ptrs = K + off_k v_ptrs = V + off_v m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) block_mask = tl.where(block_start_loc < cur_batch_seq_len, 1, 0) end_n = tl.minimum((start_m + 1) * BLOCK_M, cur_batch_seq_len) for start_n in range(0, block_mask * end_n, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) k = tl.load(k_ptrs + (cur_batch_in_all_start_index + start_n) * stride_kbs, mask=start_n + offs_n[None, :] < cur_batch_seq_len, other=0.0) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk = tl.dot(q, k, qk) qk *= sm_scale if logit_softcapping > 0.0: qk = qk / logit_softcapping qk = tl.math.tanh(qk) qk = qk * logit_softcapping qk_mask = offs_m[:, None] >= start_n + offs_n[None, :] qk = tl.where(qk_mask, qk, float('-inf')) m_i_new = tl.maximum(m_i, tl.max(qk, 1)) p = tl.exp(qk - m_i_new[:, None]) alpha = tl.exp(m_i - m_i_new) l_i_new = alpha * l_i + tl.sum(p, 1) acc = acc * alpha[:, None] v = tl.load(v_ptrs + (cur_batch_in_all_start_index + start_n) * stride_vbs, mask=start_n + offs_n[:, None] < cur_batch_seq_len, other=0.0) p = p.to(v.dtype) acc += tl.dot(p, v) l_i = l_i_new m_i = m_i_new acc = acc / l_i[:, None] off_o = (cur_batch_in_all_start_index + offs_m[:, None] ) * stride_obs + cur_head * stride_oh + offs_d[None, :] * stride_od out_ptrs = Out + off_o tl.store(out_ptrs, acc.to(Out.type.element_ty), mask=offs_m[:, None] < cur_batch_seq_len)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Softmax", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/tascj/kaggle-lmsys-chatbot-arena/blob/83cd93d50b9283c18711e8c63e4e1c6399c7b9ce/human_pref/inference/ops/flash_attention_nopad.py
3b340b98-2a14-4946-85d4-8529289fd141
mhmoe_bwd.py
dtadpole/triton-playground
mhmoe_bwd.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_B': 64, 'BLOCK_SIZE_E': 32}, num_stages=2, num_warps=4), triton.Config({ 'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 64}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_B': 64, 'BLOCK_SIZE_E': 64}, num_stages=2, num_warps=4)], key=['H', 'B', 'D', 'E']) @triton.jit def mlp_wide_kernel_bwd2(x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl.constexpr, BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr): """Kernel for computing the mlp Z = X @ W1, H = f(Z), O = H @ W2 - X has shape (B, D) - W1 has shape (D, E) - W2 has shape (E, D) - O has shape (B, D) - dX has shape (B, D) - dW1 has shape (D, E) - dW2 has shape (E, D) - dO has shape (B, D) """ pid = tl.program_id(axis=0) pid_x_w = 0 batch_groups_e = tl.cdiv(E, BLOCK_SIZE_E) batch_groups_b = tl.cdiv(B, BLOCK_SIZE_B) idx = pid % (batch_groups_e + batch_groups_b) pid_h = pid // (batch_groups_e + batch_groups_b) TARGET_TYPE = x_ptr.type.element_ty offs_b = tl.arange(0, BLOCK_SIZE_B) offs_d = tl.arange(0, D) offs_e = tl.arange(0, BLOCK_SIZE_E) if idx >= batch_groups_e: pid_b = idx - batch_groups_e dx_ptrs = dx_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:, None]) * stride_dxb + offs_d[None, :] * stride_dxd) dx_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[ None, :] < D) dx = tl.zeros((BLOCK_SIZE_B, D), dtype=tl.float32) dx = _mlp_wide_kernel_bwd_dx(pid_h, pid_b, x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B, BLOCK_SIZE_E, ACTIVATION) tl.store(dx_ptrs, dx.to(TARGET_TYPE), mask=dx_mask) else: pid_e = idx dw1_ptrs = dw1_ptr + ((pid_h * D + offs_d[:, None]) * stride_dw1d + (pid_e * BLOCK_SIZE_E + offs_e[None, :]) * stride_dw1e) dw1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - pid_e * BLOCK_SIZE_E) dw2_ptrs = dw2_ptr + ((pid_h * E + pid_e * BLOCK_SIZE_E + offs_e[:, None]) * stride_dw2e + offs_d[None, :] * stride_dw2d) dw2_mask = (offs_e[:, None] < E - pid_e * BLOCK_SIZE_E) & (offs_d[ None, :] < D) dw1, dw2 = _mlp_wide_kernel_bwd_dw1w2(pid_h, pid_e, x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B, BLOCK_SIZE_E, ACTIVATION) tl.store(dw1_ptrs, dw1.to(TARGET_TYPE), mask=dw1_mask) tl.store(dw2_ptrs, dw2.to(TARGET_TYPE), mask=dw2_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe_bwd.py
1d20f406-94e0-403c-a1e0-92ea5edee22d
rwkv_vanilla.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/rwkv_vanilla.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def wkv_triton_vanilla_forward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr, k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b, state_s_ab, state_s_c, wkv_ptr, wkv_s_b, wkv_s_t, wkv_s_c, state_out_ptr, state_out_s_b, state_out_s_ab, state_out_s_t, state_out_s_c, chans, tsz, BLOCK_SIZE_C: tl.constexpr): b_idx = tl.program_id(0) c_idx = tl.program_id(1) cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C) cmask = cs < chans k_ptr = k_ptr + b_idx * k_s_b v_ptr = v_ptr + b_idx * v_s_b alpha_ptr = state_ptr + b_idx * state_s_b beta_ptr = state_ptr + b_idx * state_s_b + state_s_ab wkv_ptr = wkv_ptr + b_idx * wkv_s_b alpha_out_ptr = state_out_ptr + b_idx * state_out_s_b beta_out_ptr = state_out_ptr + b_idx * state_out_s_b + state_out_s_ab alpha = tl.load(alpha_ptr + cs * state_s_c, mask=cmask).to(tl.float32) beta = tl.load(beta_ptr + cs * state_s_c, mask=cmask).to(tl.float32) w = tl.load(w_ptr + cs * w_s_c, mask=cmask).to(tl.float32) u = tl.load(u_ptr + cs * u_s_c, mask=cmask).to(tl.float32) ew = tl.exp(w) for t in range(tsz): kt = tl.load(k_ptr + t * k_s_t + cs * k_s_c, mask=cmask).to(tl.float32) vt = tl.load(v_ptr + t * v_s_t + cs * v_s_c, mask=cmask).to(tl.float32) euk = tl.exp(u + kt) wkv = (alpha + euk * vt) / (beta + euk) tl.store(wkv_ptr + t * wkv_s_t + cs * wkv_s_c, wkv, mask=cmask) ek = tl.exp(kt) alpha = ew * alpha + ek * vt beta = ew * beta + ek tl.store(alpha_out_ptr + t * state_out_s_t + cs * state_out_s_c, alpha, mask=cmask) tl.store(beta_out_ptr + t * state_out_s_t + cs * state_out_s_c, beta, mask=cmask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_vanilla.py
ebfa7825-dfff-4555-b37d-85b4f1aa9d91
seqlen_utils.py
Kitsunetic/kitsu
kitsu/nn/seqlen_utils.py
826967a493c89753ac2cf1e28b52b79998fc9076
0
@triton.jit def clamp(x, amin, amax): x = tl.where(x < amin, amin, x) x = tl.where(x >= amax, amax, x) return x
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py
5655fee2-cd43-4aa1-9458-4f9a099947b6
y_2.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_2.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def second_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl. constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) CONST_00 = 3.87298334620742 CONST_01 = 2.23606797749979 CONST_02 = -1.11803398874989 CONST_03 = 1.93649167310371 Y20 = CONST_00 * x * z Y21 = CONST_00 * x * y Y23 = CONST_00 * y * z Y22 = CONST_02 * x * x + CONST_01 * y * y + CONST_02 * z * z Y24 = -CONST_03 * x * x + CONST_03 * z * z output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) tl.store(output_ptr + output_row_offset, Y20, mask=output_row_offset < output_numel) tl.store(output_ptr + output_row_offset + 1, Y21, mask= output_row_offset + 1 < output_numel) tl.store(output_ptr + output_row_offset + 2, Y22, mask= output_row_offset + 2 < output_numel) tl.store(output_ptr + output_row_offset + 3, Y23, mask= output_row_offset + 3 < output_numel) tl.store(output_ptr + output_row_offset + 4, Y24, mask= output_row_offset + 4 < output_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_2.py
83df5d43-d1a9-47bb-98c2-6ef4b7de675b
blocksparse_sum.py
kimiasa/Experiments
src/models/attention/blocksparse_sum.py
c4e73bfefd8290695ec52b6386b6b81838ca94a1
0
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])}) @triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3]) * meta['BLOCK']}) @triton.jit def _backward(DX, DOUT, LUT, sizemax, stride_zdx, stride_zdout, stride_hdout, **meta): pidhm = tl.program_id(0) pidz = tl.program_id(1) TN = meta['TN'] BLOCK = meta['BLOCK'] rxm = pidhm % BLOCK rbm = pidhm // BLOCK rxn = tl.arange(0, TN) % BLOCK rbn = tl.arange(0, TN) // BLOCK header = LUT + rbm * 2 size = tl.load(header + 0) offset = tl.load(header + 1) check = rbn < size rbmn = tl.where(check, rbn, size - 1) blockid = tl.load(LUT + offset + rbmn * 4) rowid = tl.load(LUT + offset + rbmn * 4 + 2) headid = tl.load(LUT + offset + rbmn * 4 + 3) pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn pdout = (DOUT + pidz * stride_zdout + headid * stride_hdout + rowid * BLOCK + rxm) dx_zeros = tl.load(pdx, mask=check, other=0) dout = tl.load(pdout) dx = dout - dx_zeros tl.store(pdx, dx, mask=check)
{ "Data Type": [], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/models/attention/blocksparse_sum.py
f22d7c5d-4b22-44c8-bd17-71abf32be596
softmax_split.py
iclementine/optimize_softmax
softmax_split.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def combine_logsumexp_kernel(out_ptr, inp_ptr, M, N, TILE_N: tl.constexpr): pid_m = tl.program_id(0) n_offsets = tl.arange(0, TILE_N) mask = n_offsets < N logzs = tl.load(inp_ptr + pid_m * N + n_offsets, other=-float('inf'), mask=mask).to(out_ptr.dtype.element_ty) m = tl.max(logzs, 0) e = tl.exp(logzs - m) z = tl.sum(e, 0) logz = m + tl.log(z) tl.store(out_ptr + pid_m, logz)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_split.py
3f75f29d-097d-46b4-a75c-c46d48ca63f5
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_linear_attn_fwd_kernel_o(q, k, v, h, o, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, scale, T: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl. constexpr): i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_o = tl.zeros([BT, BV], dtype=tl.float32) b_s = tl.zeros([BT, BT], dtype=tl.float32) for i_k in range(tl.cdiv(K, BK)): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), ( s_h_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_h = tl.load(p_h, boundary_check=(0, 1)) b_o += tl.dot(b_q, b_h, allow_tf32=False) b_s += tl.dot(b_q, b_k, allow_tf32=False) b_s = tl.where(m_s, b_s, 0) p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_o = (b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) * scale tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py
feb07ab0-66fa-4032-b855-37ff437994a7
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_dense_elementwise_mul_jagged_out_kernel(a_ptr, b_ptr, c_ptr, a_seq_lengths_ptr, a_offsets_ptr, stride_a, stride_bm, stride_bn, max_seq_len, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): pid_batch = tl.program_id(0) pid_row_block = tl.program_id(1) batch_offset = tl.load(a_offsets_ptr + pid_batch) batch_seq_len = tl.load(a_seq_lengths_ptr + pid_batch) truncated_seq_len = tl.minimum(batch_seq_len, max_seq_len) offs_row = tl.arange(0, BLOCK_M) offs_col = tl.arange(0, BLOCK_N) rows = pid_row_block * BLOCK_M + offs_row a_ptrs = a_ptr + batch_offset * stride_a + rows[:, None ] * truncated_seq_len + offs_col[None, :] b_ptrs = b_ptr + rows[:, None] * stride_bm + offs_col[None, :] * stride_bn c_ptrs = c_ptr + batch_offset + rows[:, None ] * truncated_seq_len + offs_col[None, :] for block_start in range(0, truncated_seq_len, BLOCK_N): cols = block_start + offs_col mask = (rows[:, None] < truncated_seq_len) & (cols[None, :] < truncated_seq_len) a = tl.load(a_ptrs, mask=mask) a_ptrs += BLOCK_N b = tl.load(b_ptrs, mask=mask) b_ptrs += BLOCK_N c = a * b tl.store(c_ptrs, c, mask=mask) c_ptrs += BLOCK_N
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
3b3299a2-36e9-47f7-8c4e-3fab53ca5277
rotary.py
sustcsonglin/flash-linear-attention
fla/modules/rotary.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [2, 4, 8, 16, 32]], key=['BLOCK_K', 'BLOCK_M', 'INTERLEAVED']) @triton.jit def rotary_embedding_kernel(X, COS, SIN, OUT, CU_SEQLENS, SEQLEN_OFFSETS, seqlen, rotary_dim, seqlen_ro, stride_out_batch, stride_out_seqlen, stride_out_nheads, stride_out_headdim, stride_x_batch, stride_x_seqlen, stride_x_nheads, stride_x_headdim, BLOCK_K: tl.constexpr, BLOCK_M: tl. constexpr, IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr, IS_VARLEN: tl. constexpr, INTERLEAVED: tl.constexpr, CONJUGATE: tl.constexpr): pid_m = tl.program_id(axis=0) pid_batch = tl.program_id(axis=1) pid_head = tl.program_id(axis=2) rotary_dim_half = rotary_dim // 2 if not IS_VARLEN: X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads else: start_idx = tl.load(CU_SEQLENS + pid_batch) seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads OUT = (OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads) if pid_m * BLOCK_M >= seqlen: return rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) if not IS_SEQLEN_OFFSETS_TENSOR: rm_cs = rm + SEQLEN_OFFSETS else: rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch) rk = tl.arange(0, BLOCK_K) rk_half = tl.arange(0, BLOCK_K // 2) if not INTERLEAVED: X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) cos = tl.load(COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[ None, :] < rotary_dim_half), other=1.0).to(tl.float32) sin = tl.load(SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[ None, :] < rotary_dim_half), other=0.0).to(tl.float32) x0 = tl.load(X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0).to(tl.float32) x1 = tl.load(X + rotary_dim_half * stride_x_headdim, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0 ).to(tl.float32) if CONJUGATE: sin = -sin o0 = x0 * cos - x1 * sin o1 = x0 * sin + x1 * cos OUT = OUT + (rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim) tl.store(OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half)) tl.store(OUT + rotary_dim_half * stride_out_headdim, o1, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half)) else: rk_swap = rk + (rk + 1) % 2 * 2 - 1 rk_repeat = tl.arange(0, BLOCK_K) // 2 X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim) X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) cos = tl.load(COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[ None, :] < rotary_dim_half), other=1.0).to(tl.float32) sin = tl.load(SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[ None, :] < rotary_dim_half), other=0.0).to(tl.float32) x0 = tl.load(X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0).to(tl.float32) x1 = tl.load(X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0).to(tl.float32) if CONJUGATE: sin = -sin x0_cos = x0 * cos x1_sin = x1 * sin out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin) OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim) tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/rotary.py
53d194b5-4f30-42c5-bcdc-aa0d961e4d3f
k_dropout.py
cpuhrsch/torchfused
torchfused/triton/k_dropout.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.autotune(configs=_k_configs, key=['N']) @triton.jit def k_dropout_bw(GRAD_IN, GRAD_OUT, INPUTS, BIAS, SEEDS, stride_grad, stride_inputs, N, p, **META): """ Apply dropout on an input tensor GRAD_OUT (M, N) GRAD_IN (M, N) BIAS (N,) SEEDS (M,) p : dropout probability """ BLOCK_SIZE = META['BLOCK_SIZE'] row = tl.program_id(axis=0) col = tl.program_id(axis=1) grad_offsets = row * stride_grad + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) < N grad_out_ptrs = GRAD_OUT + grad_offsets grad_out = tl.load(grad_out_ptrs, mask=mask) if META['ACTIVATION_GRAD']: input_ptrs = (INPUTS + row * stride_inputs + col * BLOCK_SIZE + tl. arange(0, BLOCK_SIZE)) inputs = tl.load(input_ptrs, mask=mask) if META['USE_BIAS']: b_ptrs = BIAS + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) b = tl.load(b_ptrs, mask=mask) inputs += b act_grad = META['ACTIVATION_GRAD'](inputs) grad_out *= act_grad if p > 0.0: output = _drop_and_scale(SEEDS, row, p, grad_offsets, grad_out) else: output = grad_out y_ptrs = GRAD_IN + grad_offsets tl.store(y_ptrs, output, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_dropout.py
212d49aa-c7b6-41bc-b921-3cda1e94187e
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/launch_latency/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.jit def nop_with_args_kernel(t1, t2, t3, t4, t5, i1, i2, i3, i4, i5, i6, i7, i8, i9, c1: tl.constexpr, c2: tl.constexpr, c3: tl.constexpr, c4: tl. constexpr, c5: tl.constexpr): pass
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/launch_latency/kernels.py
1dacd6b7-d7ac-4c8b-9b58-afc4b5e496bd
fp8_gemm.py
pytorch/FBGEMM
fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.autotune(configs=MATMUL_CONFIGS, key=['m_key', 'n_key', 'k_key']) @triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0}) @triton.jit def _kernel_matmul_fp8_row_imprecise_acc(A, B, C, M, N, K, m_key, n_key, k_key, A_scale, B_scale, Bias, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, dot_out_dtype: tl.constexpr, allow_tf32: tl.constexpr, fp8_fast_accum: tl.constexpr, BLOCK_M: tl. constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl. constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, USE_BIAS: tl. constexpr, AB_DTYPE: tl.constexpr) ->None: """Matmul kernel of [M, K] @ [N, K] with row-wise scales performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles. Args: A (TensorWrapper): [M, K] input tensor. B (TensorWrapper): [N, K] input tensor. C (TensorWrapper): [M, N] output tensor. M (int): M dimension of input tensor. N (int): N dimension of input tensor. K (int): K dimension of input tensor. m_key (int): Autotuning key for M dimension of input tensor. n_key (int): Autotuning key for N dimension of input tensor. k_key (int): Autotuning key for K dimension of input tensor. A_scale (TensorWrapper): [M] reciprocal scale tensor per row. A * A_scale = original A B_scale (TensorWrapper): [N] reciprocal scale tensor per row. B * B_scale = original B Bias (TensorWrapper): [N] Optional bias tensor. stride_am (int): Stride of M dimension of A. stride_ak (int): Stride of K dimension of A. stride_bn (int): Stride of N dimension of B. stride_bk (int): Stride of K dimension of B. stride_cm (int): Stride of M dimension of C. stride_cn (int): Stride of N dimension of C. dot_out_dtype (torch.dtype): Output type of tensor core. allow_tf32 (bool): Whether to use TF32 for tensor core. fp8_fast_accum (bool): Whether to use fast accumulation for tensor core. BLOCK_M (int): Block size for M dimension. BLOCK_N (int): Block size for N dimension. BLOCK_K (int): Block size for K dimension. GROUP_M (int): Number of groups for M dimension swizzle. SPLIT_K (int): Number of SM's to launch per row. EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K. USE_BIAS (bool): Whether to use bias. AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core. """ pid = tl.program_id(0) pid_z = tl.program_id(1) grid_m = tl.cdiv(M, BLOCK_M) grid_n = tl.cdiv(N, BLOCK_N) width = GROUP_M * grid_n group_id = pid // width group_size = min(grid_m - group_id * GROUP_M, GROUP_M) pid_m = group_id * GROUP_M + pid % group_size pid_n = pid % width // group_size rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype) for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): if EVEN_K: a = tl.load(A) b = tl.load(B) else: k_remaining = K - k * (BLOCK_K * SPLIT_K) _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0) b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0) if AB_DTYPE: a = a.to(C.dtype.element_ty) b = b.to(C.dtype.element_ty) if fp8_fast_accum: acc = tl.dot(a, b, acc, max_num_imprecise_acc=32, out_dtype= dot_out_dtype, allow_tf32=allow_tf32) else: acc += tl.dot(a, b, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) A += BLOCK_K * SPLIT_K * stride_ak B += BLOCK_K * SPLIT_K * stride_bk rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) a_scale = tl.load(A_scale + rm, mask=rm < M) b_scale = tl.load(B_scale + rn, mask=rn < N) scale = a_scale[:, None] * b_scale[None, :] acc *= scale if USE_BIAS: bias = tl.load(Bias + rn, mask=rn < N) acc += bias[None, :] acc = acc.to(C.dtype.element_ty) C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) mask = (rm < M)[:, None] & (rn < N)[None, :] if SPLIT_K == 1: tl.store(C, acc, mask=mask) else: tl.atomic_add(C, acc, mask=mask)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
f3407da0-33a9-4a5e-bade-84ebd8b023fd
fwd_kernel.py
ROCm/aotriton
test/fwd_kernel.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def attn_fwd(Q, K, V, sm_scale, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, seqlen_q, seqlen_k, dropout_p, philox_seed, philox_offset_base, encoded_softmax, STAGE: tl.constexpr, BLOCK_M: tl. constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, pre_load_v: tl.constexpr, ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr): start_m = tl.program_id(0) off_h = tl.program_id(1) off_z = tl.program_id(2) num_h = tl.num_programs(1) num_z = tl.num_programs(2) q_offset = off_h * stride_qh + off_z * stride_qz Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(seqlen_q, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) k_offset = off_h * stride_kh + off_z * stride_kz K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=(BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0, 0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) v_offset = off_h * stride_vh + off_z * stride_vz V_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(seqlen_k, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0 acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qk_scale = sm_scale * 1.44269504 q = tl.load(Q_block_ptr) q = (q * qk_scale).to(Q_block_ptr.type.element_ty) off_zh = off_z * num_h + off_h * 1 if ENABLE_DROPOUT: batch_philox_offset = philox_offset_base + off_zh * seqlen_q * seqlen_k else: batch_philox_offset = 0 if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.make_block_ptr(base=encoded_softmax + off_zh * seqlen_q * seqlen_k, shape=(seqlen_q, seqlen_k), strides=(seqlen_k, 1), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0)) else: encoded_softmax_block_ptr = 0 if STAGE & 1: acc, l_i, m_i = attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_q, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 4 - STAGE, offs_m, offs_n, pre_load_v, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX) if STAGE & 2: tl.debug_barrier() acc, l_i, m_i = attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_q, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 2, offs_m, offs_n, pre_load_v, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX) acc = acc / l_i[:, None] if ENABLE_DROPOUT: acc = acc / (1 - dropout_p) m_ptrs = M + off_zh * seqlen_q + offs_m tl.store(m_ptrs, m_i + tl.math.log2(l_i)) o_offset = off_h * stride_oh + off_z * stride_oz O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(seqlen_q, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) tl.store(O_block_ptr, acc.to(Out.type.element_ty))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/fwd_kernel.py
23435036-debc-4e75-8236-37b373cdcf8f
_semi_structured_conversions.py
huyz2023/2by4-pretrain
sparse/_semi_structured_conversions.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.autotune(configs=get_configs(), key=['m', 'k']) @triton.jit def _sparse_semi_structured_from_dense_triton_16(dense_ptr, sparse_ptr, meta_reordered_ptr, mask_ptr, dense_row_stride, sparse_row_stride, mask_row_stride, dense_col_stride, sparse_col_stride, mask_col_stride, m, k, seed, BLOCK_SIZE: tl.constexpr, PRUNE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr): if ARRAY_LAYOUT == 'row': row_idx = tl.program_id(0) col_idx = tl.program_id(1) * 16 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE ) * 16 mask = col_idx < k elif ARRAY_LAYOUT == 'col': row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE col_idx = tl.program_id(1) * 16 mask = row_idx < m dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 0) * dense_col_stride, mask=mask) dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 1) * dense_col_stride, mask=mask) dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 2) * dense_col_stride, mask=mask) dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 3) * dense_col_stride, mask=mask) dense_44 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 4) * dense_col_stride, mask=mask) dense_45 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 5) * dense_col_stride, mask=mask) dense_46 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 6) * dense_col_stride, mask=mask) dense_47 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 7) * dense_col_stride, mask=mask) dense_48 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 8) * dense_col_stride, mask=mask) dense_49 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 9) * dense_col_stride, mask=mask) dense_4A = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 10) * dense_col_stride, mask=mask) dense_4B = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 11) * dense_col_stride, mask=mask) dense_4C = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 12) * dense_col_stride, mask=mask) dense_4D = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 13) * dense_col_stride, mask=mask) dense_4E = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 14) * dense_col_stride, mask=mask) dense_4F = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 15) * dense_col_stride, mask=mask) if PRUNE == 'mse': if dense_ptr.type.element_ty == tl.bfloat16: (_dense_40, _dense_41, _dense_42, _dense_43, _dense_44, _dense_45, _dense_46, _dense_47, _dense_48, _dense_49, _dense_4A, _dense_4B, _dense_4C, _dense_4D, _dense_4E, _dense_4F) = (dense_40.to(tl.float32), dense_41.to(tl. float32), dense_42.to(tl.float32), dense_43.to(tl.float32), dense_44.to(tl.float32), dense_45.to(tl.float32), dense_46. to(tl.float32), dense_47.to(tl.float32), dense_48.to(tl. float32), dense_49.to(tl.float32), dense_4A.to(tl.float32), dense_4B.to(tl.float32), dense_4C.to(tl.float32), dense_4D. to(tl.float32), dense_4E.to(tl.float32), dense_4F.to(tl. float32)) else: (_dense_40, _dense_41, _dense_42, _dense_43, _dense_44, _dense_45, _dense_46, _dense_47, _dense_48, _dense_49, _dense_4A, _dense_4B, _dense_4C, _dense_4D, _dense_4E, _dense_4F) = (dense_40, dense_41, dense_42, dense_43, dense_44, dense_45, dense_46, dense_47, dense_48, dense_49, dense_4A, dense_4B, dense_4C, dense_4D, dense_4E, dense_4F) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_40) > tl.abs(_dense_41), tl.abs( _dense_40) > tl.abs(_dense_42), tl.abs(_dense_40) > tl.abs( _dense_43), tl.abs(_dense_41) > tl.abs(_dense_42), tl.abs(_dense_41 ) > tl.abs(_dense_43), tl.abs(_dense_42) > tl.abs(_dense_43) m0, m1, m2, m3 = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_44) > tl.abs(_dense_45), tl.abs( _dense_44) > tl.abs(_dense_46), tl.abs(_dense_44) > tl.abs( _dense_47), tl.abs(_dense_45) > tl.abs(_dense_46), tl.abs(_dense_45 ) > tl.abs(_dense_47), tl.abs(_dense_46) > tl.abs(_dense_47) m4, m5, m6, m7 = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_48) > tl.abs(_dense_49), tl.abs( _dense_48) > tl.abs(_dense_4A), tl.abs(_dense_48) > tl.abs( _dense_4B), tl.abs(_dense_49) > tl.abs(_dense_4A), tl.abs(_dense_49 ) > tl.abs(_dense_4B), tl.abs(_dense_4A) > tl.abs(_dense_4B) m8, m9, mA, mB = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4C) > tl.abs(_dense_4D), tl.abs( _dense_4C) > tl.abs(_dense_4E), tl.abs(_dense_4C) > tl.abs( _dense_4F), tl.abs(_dense_4D) > tl.abs(_dense_4E), tl.abs(_dense_4D ) > tl.abs(_dense_4F), tl.abs(_dense_4E) > tl.abs(_dense_4F) mC, mD, mE, mF = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) elif PRUNE == 'mask': m0 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 0) * mask_col_stride, mask=mask).to(tl.int1) m1 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 1) * mask_col_stride, mask=mask).to(tl.int1) m2 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 2) * mask_col_stride, mask=mask).to(tl.int1) m3 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 3) * mask_col_stride, mask=mask).to(tl.int1) m4 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 4) * mask_col_stride, mask=mask).to(tl.int1) m5 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 5) * mask_col_stride, mask=mask).to(tl.int1) m6 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 6) * mask_col_stride, mask=mask).to(tl.int1) m7 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 7) * mask_col_stride, mask=mask).to(tl.int1) m8 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 8) * mask_col_stride, mask=mask).to(tl.int1) m9 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 9) * mask_col_stride, mask=mask).to(tl.int1) mA = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 10) * mask_col_stride, mask=mask).to(tl.int1) mB = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 11) * mask_col_stride, mask=mask).to(tl.int1) mC = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 12) * mask_col_stride, mask=mask).to(tl.int1) mD = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 13) * mask_col_stride, mask=mask).to(tl.int1) mE = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 14) * mask_col_stride, mask=mask).to(tl.int1) mF = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 15) * mask_col_stride, mask=mask).to(tl.int1) elif PRUNE == 'mvue': if ARRAY_LAYOUT == 'row': seed0 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 seed1 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 + 1 else: seed0 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1)) * 2 seed1 = seed + (tl.program_id(0) * k // 16 + tl.program_id(1) ) * 2 + 1 random0, random1, random2, random3 = tl.rand4x(seed0, tl.arange(0, BLOCK_SIZE), n_rounds=5) random4, random5, random6, random7 = tl.rand4x(seed1, tl.arange(0, BLOCK_SIZE), n_rounds=5) dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = ( _MVUE24_approx(dense_40, dense_41, dense_42, dense_43, random0, random1)) dense_44, dense_45, dense_46, dense_47, m4, m5, m6, m7 = ( _MVUE24_approx(dense_44, dense_45, dense_46, dense_47, random2, random3)) dense_48, dense_49, dense_4A, dense_4B, m8, m9, mA, mB = ( _MVUE24_approx(dense_48, dense_49, dense_4A, dense_4B, random4, random5)) dense_4C, dense_4D, dense_4E, dense_4F, mC, mD, mE, mF = ( _MVUE24_approx(dense_4C, dense_4D, dense_4E, dense_4F, random6, random7)) else: m0 = dense_40 != 0 m1 = dense_41 != 0 m2 = dense_42 != 0 m3 = dense_43 != 0 m4 = dense_44 != 0 m5 = dense_45 != 0 m6 = dense_46 != 0 m7 = dense_47 != 0 m8 = dense_48 != 0 m9 = dense_49 != 0 mA = dense_4A != 0 mB = dense_4B != 0 mC = dense_4C != 0 mD = dense_4D != 0 mE = dense_4E != 0 mF = dense_4F != 0 bit0 = ~m0 & m1 bit1 = ~m0 & ~m1 bit2 = bit1 | ~m2 bit3 = bit0 | ~m1 | m2 idxs0 = bit0 | bit1.to(tl.int64) << 1 idxs1 = bit2 | bit3.to(tl.int64) << 1 sparse0 = tl.where(bit1, tl.where(bit0, dense_43, dense_42), tl.where( bit0, dense_41, dense_40)) sparse1 = tl.where(bit3, tl.where(bit2, dense_43, dense_42), tl.where( bit2, dense_41, dense_40)) bit4 = ~m4 & m5 bit5 = ~m4 & ~m5 bit6 = bit5 | ~m6 bit7 = bit4 | ~m5 | m6 idxs2 = bit4 | bit5.to(tl.int64) << 1 idxs3 = bit6 | bit7.to(tl.int64) << 1 sparse2 = tl.where(bit5, tl.where(bit4, dense_47, dense_46), tl.where( bit4, dense_45, dense_44)) sparse3 = tl.where(bit7, tl.where(bit6, dense_47, dense_46), tl.where( bit6, dense_45, dense_44)) bit8 = ~m8 & m9 bit9 = ~m8 & ~m9 bitA = bit9 | ~mA bitB = bit8 | ~m9 | mA idxs4 = bit8 | bit9.to(tl.int64) << 1 idxs5 = bitA | bitB.to(tl.int64) << 1 sparse4 = tl.where(bit9, tl.where(bit8, dense_4B, dense_4A), tl.where( bit8, dense_49, dense_48)) sparse5 = tl.where(bitB, tl.where(bitA, dense_4B, dense_4A), tl.where( bitA, dense_49, dense_48)) bitC = ~mC & mD bitD = ~mC & ~mD bitE = bitD | ~mE bitF = bitC | ~mD | mE idxs6 = bitC | bitD.to(tl.int64) << 1 idxs7 = bitE | bitF.to(tl.int64) << 1 sparse6 = tl.where(bitD, tl.where(bitC, dense_4F, dense_4E), tl.where( bitC, dense_4D, dense_4C)) sparse7 = tl.where(bitF, tl.where(bitE, dense_4F, dense_4E), tl.where( bitE, dense_4D, dense_4C)) col_idx = tl.program_id(1) * 8 if ARRAY_LAYOUT == 'row': col_idx = tl.program_id(1) * 8 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE ) * 8 mask = col_idx < k // 2 else: col_idx = tl.program_id(1) * 8 mask = row_idx < m tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) * sparse_col_stride, sparse0, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) * sparse_col_stride, sparse1, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) * sparse_col_stride, sparse2, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) * sparse_col_stride, sparse3, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 4) * sparse_col_stride, sparse4, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 5) * sparse_col_stride, sparse5, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 6) * sparse_col_stride, sparse6, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 7) * sparse_col_stride, sparse7, mask=mask) meta_40 = idxs0 | idxs1 << 2 meta_41 = idxs2 | idxs3 << 2 meta_42 = idxs4 | idxs5 << 2 meta_43 = idxs6 | idxs7 << 2 meta = meta_40 | meta_41 << 4 | meta_42 << 8 | meta_43 << 12 if ARRAY_LAYOUT == 'row': col_idx = tl.program_id(1) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) elif ARRAY_LAYOUT == 'col': col_idx = tl.program_id(1) group, interweave = 32, 4 dest_row = row_idx // 32 * 32 + row_idx % 8 * 4 + row_idx % group // 8 dest_col = col_idx topright = ((dest_row % 2 == 0) & (dest_col % 2 == 1)).to(tl.int8) bottomleft = ((dest_row % 2 == 1) & (dest_col % 2 == 0)).to(tl.int8) dest_row = dest_row + topright - bottomleft dest_col = dest_col - topright + bottomleft interleave = 2 cols_maj = dest_col // interleave cols_min = dest_col % interleave meta_reordered_offsets = (cols_maj * m * interleave + dest_row * interleave + cols_min) if ARRAY_LAYOUT == 'row': mask = col_idx < k // 16 elif ARRAY_LAYOUT == 'col': mask = row_idx < m tl.store(meta_reordered_ptr + meta_reordered_offsets, meta, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/_semi_structured_conversions.py
5539577c-6764-48b6-859f-2234d6a9e634
normalization.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/normalization.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_normalization_forward_kernel(input_ptr, output_ptr, weight_ptr, bias_ptr, inv_var_ptr, n_cols, eps, has_bias: tl.constexpr, zero_centered: tl.constexpr, block_size: tl.constexpr): row = tl.program_id(0).to(tl.int64) cols = tl.arange(0, block_size) mask = cols < n_cols offsets = row * n_cols + cols input_ = tl.load(input_ptr + offsets, mask=mask, other=0.0).to(tl.float32) if has_bias: mean = tl.sum(input_, axis=0) / n_cols input_ = tl.where(mask, input_ - mean, 0.0) inv_var = 1 / tl.sqrt(tl.sum(input_ * input_, axis=0) / n_cols + eps) tl.store(inv_var_ptr + row, inv_var) weight = tl.load(weight_ptr + cols, mask=mask) if zero_centered: weight += 1 output = input_ * inv_var * weight if has_bias: bias = tl.load(bias_ptr + cols, mask=mask) output = output + bias tl.store(output_ptr + offsets, output, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/normalization.py
69b84f1a-f834-4d8d-8fa9-a4d114df2847
mlstm_matmul.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_matmul.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def sign(x): return (x > 0).to(tl.float32) - (x < 0).to(tl.float32)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py
fa8e1c2a-4c55-4f07-97c6-e7f805a3a487
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _jagged_dense_flash_attention_bwd_dv_db_dq_kernel(q_ptr, k_ptr, v_ptr, ab_ptr, jagged_offsets_ptr, out_ptr, do_ptr, lse_ptr, delta_ptr, dq_ptr, dk_ptr, dv_ptr, dbias_ptr, max_seq_len, stride_ql, stride_qd, stride_kb, stride_kd, stride_kt, stride_vl, stride_vd, stride_ab_b, stride_ab_l, stride_ab_t, stride_ob, stride_ot, stride_od, stride_dq_l, stride_dq_d, stride_dv_l, stride_dv_d, stride_db_b, stride_db_l, stride_db_t, stride_do_b, stride_do_t, stride_do_d, T: tl.constexpr, BLOCK_T: tl. constexpr, BLOCK_L: tl.constexpr, BLOCK_D: tl.constexpr, allow_tf32: tl .constexpr): pid_l = tl.program_id(0) pid_b = tl.program_id(1) begin = tl.load(jagged_offsets_ptr + pid_b) end = tl.load(jagged_offsets_ptr + pid_b + 1) seqlen = end - begin seqlen = tl.minimum(seqlen, max_seq_len) if seqlen == 0: return q_start_ptr = q_ptr + begin * stride_ql k_start_ptr = k_ptr + pid_b * stride_kb ab_start_ptr = ab_ptr + pid_b * stride_ab_b v_start_ptr = v_ptr + begin * stride_vl do_start_ptr = do_ptr + pid_b * stride_do_b dq_start_ptr = dq_ptr + begin * stride_dq_l dv_start_ptr = dv_ptr + begin * stride_dv_l dbias_start_ptr = dbias_ptr + pid_b * stride_db_b delta_ptrs = delta_ptr + pid_b * T lse_ptrs = lse_ptr + pid_b * T start_l = pid_l * BLOCK_L offs_l_curr = start_l + tl.arange(0, BLOCK_L) offs_d = tl.arange(0, BLOCK_D) offs_t = tl.arange(0, BLOCK_T) q_ptrs = q_start_ptr + offs_l_curr[:, None] * stride_ql + offs_d[None, : ] * stride_qd k_ptrs = k_start_ptr + offs_d[:, None] * stride_kd + offs_t[None, : ] * stride_kt v_ptrs = v_start_ptr + offs_l_curr[:, None] * stride_vl + offs_d[None, : ] * stride_vd do_ptrs = do_start_ptr + offs_t[:, None] * stride_do_t + offs_d[None, : ] * stride_do_d dq = tl.zeros([BLOCK_L, BLOCK_D], dtype=tl.float32) dv = tl.zeros([BLOCK_L, BLOCK_D], dtype=tl.float32) q = tl.load(q_ptrs, mask=(offs_l_curr[:, None] < seqlen) & (offs_d[None, :] < BLOCK_D), other=0.0) v = tl.load(v_ptrs, mask=offs_l_curr[:, None] < seqlen, other=0.0) start_t = 0 while start_t < T: offs_t_curr = start_t + tl.arange(0, BLOCK_T) k = tl.load(k_ptrs, mask=(offs_t_curr[None, :] < T) & (offs_d[:, None] < BLOCK_D), other=0.0) qk = tl.zeros([BLOCK_L, BLOCK_T], dtype=tl.float32) qk += tl.dot(q, k, allow_tf32=allow_tf32) ab_ptrs = ab_start_ptr + offs_l_curr[:, None ] * stride_ab_l + offs_t_curr[None, :] * stride_ab_t ab = tl.load(ab_ptrs, mask=(offs_l_curr[:, None] < seqlen) & ( offs_t_curr[None, :] < T), other=0.0) qk = qk + ab qk_mask = (offs_l_curr[:, None] < seqlen) & (offs_t_curr[None, :] < T) qk = tl.where(qk_mask, qk, float('-inf')) lse_t = tl.load(lse_ptrs + offs_t_curr, mask=offs_t_curr < T, other =float('inf')) p = tl.exp(qk - lse_t[None, :]) p = tl.where(qk_mask, p, 0.0) do = tl.load(do_ptrs, mask=offs_t_curr[:, None] < T, other=0.0) dv += tl.dot(p, do, allow_tf32=allow_tf32) delta = tl.load(delta_ptrs + offs_t_curr, mask=offs_t_curr < T) dp = tl.zeros([BLOCK_L, BLOCK_T], dtype=tl.float32) dp += tl.trans(tl.dot(do, tl.trans(v), allow_tf32=allow_tf32)) ds = p * (dp - delta[None, :]) dbias_ptrs = dbias_start_ptr + offs_l_curr[:, None ] * stride_db_l + offs_t_curr[None, :] * stride_db_t tl.store(dbias_ptrs, ds, mask=(offs_l_curr[:, None] < seqlen) & ( offs_t_curr[None, :] < T)) dq += tl.dot(ds, tl.trans(k), allow_tf32=allow_tf32) k_ptrs += BLOCK_T * stride_kt do_ptrs += BLOCK_T * stride_do_t start_t += BLOCK_T dq_ptrs = dq_start_ptr + offs_l_curr[:, None] * stride_dq_l + offs_d[ None, :] * stride_dq_d dv_ptrs = dv_start_ptr + offs_l_curr[:, None] * stride_dv_l + offs_d[ None, :] * stride_dv_d tl.store(dq_ptrs, dq, mask=offs_l_curr[:, None] < seqlen) tl.store(dv_ptrs, dv, mask=offs_l_curr[:, None] < seqlen)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
ade66aeb-e4c3-493d-8ecf-bf05180889aa
dropout.py
daemyung/practice-triton
dropout.py
27f727726f1507c8380a1c11751d851c7c4a07ce
0
@staticmethod @triton.jit def forward(output_ptr, input_ptr, size, p, seed, block_size: tl.constexpr): pid = tl.program_id(0) offset = pid * block_size input_block_ptr = tl.make_block_ptr(input_ptr, shape=(size,), strides=( 1,), offsets=(offset,), block_shape=(block_size,), order=(0,)) output_block_ptr = tl.make_block_ptr(output_ptr, shape=(size,), strides =(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)) offsets = tl.arange(0, block_size) + offset random_values = tl.rand(seed, offsets) condition = random_values > p input = tl.load(input_block_ptr, boundary_check=(0,)) output = tl.where(condition, input * (1 / (1 - p)), 0.0) tl.store(output_block_ptr, output, boundary_check=(0,))
{ "Data Type": [], "Functionality": [ "Elementwise Operations", "Activation Functions" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/dropout.py
b04ed8dc-c834-4c02-bbc5-5fe077ac9864
gemm_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2, 3]] + [triton.Config ({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2]] + [triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 1024, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages =s, num_warps=32) for s in [2, 3]] + [triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2]] + [triton.Config({ 'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2]] + [triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages =s, num_warps=4) for s in [2]], key=['M', 'N', 'K']) @triton.jit def matmul_kernel_with_block_pointers_batched(a_ptr, b_ptr, c_ptr, B: tl. constexpr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_az: tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bz: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl. constexpr, stride_cz: tl.constexpr, stride_cm: tl.constexpr, stride_cn: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): bid = tl.program_id(axis=1) pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m offset_a = bid.to(tl.int64) * stride_az offset_b = bid.to(tl.int64) * stride_bz a_block_ptr = tl.make_block_ptr(base=a_ptr + offset_a, shape=(M, K), strides=(stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0)) b_block_ptr = tl.make_block_ptr(base=b_ptr + offset_b, shape=(K, N), strides=(stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0)) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for _ in range(0, K, BLOCK_SIZE_K): a = tl.load(a_block_ptr, boundary_check=(0, 1)) b = tl.load(b_block_ptr, boundary_check=(0, 1)) accumulator += tl.dot(a, b) a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K)) b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0)) c = accumulator.to(tl.float32) offset_c = bid.to(tl.int64) * stride_cz c_block_ptr = tl.make_block_ptr(base=c_ptr + offset_c, shape=(M, N), strides=(stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0)) tl.store(c_block_ptr, c, boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_benchmark.py
76abb677-7e00-4012-bc67-1b2087e786e1
ops.py
srush/triton-autodiff
triton_autodiff/ops.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def triton_unbroadcast(array, other): l: tl.constexpr = tl.constexpr(shape_l(array.shape)) ol: tl.constexpr = tl.constexpr(shape_l(other.value)) for i in tl.static_range(0, l): if i >= ol: array = tl.sum(array, l - (1 + i)) array = tl.expand_dims(array, l - (1 + i)) elif array.shape[l - (1 + i)] > other.value[ol - (1 + i)]: array = tl.sum(array, l - (1 + i)) array = tl.expand_dims(array, l - (1 + i)) tl.static_assert(tl.constexpr(shape_l(array.shape)) == l) return tl.view(array, other.value)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/triton_autodiff/ops.py
8294d9f6-80bb-4440-80cc-6db0df32303a
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK}, num_warps=num_warps, num_stages=num_stages) for BK in [32, 64] for num_warps in [1, 2, 4, 8] for num_stages in [2, 3, 4]], key=['BC']) @triton.jit def chunk_gla_fwd_A_kernel_intra_sub_inter(q, k, g, A, offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl. constexpr, BC: tl.constexpr, BK: tl.constexpr, NC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H i_i, i_j = i_c // NC, i_c % NC if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T if i_t * BT + i_i * BC >= T: return if i_i <= i_j: return b_A = tl.zeros([BC, BC], dtype=tl.float32) for i_k in range(tl.cdiv(K, BK)): o_k = i_k * BK + tl.arange(0, BK) m_k = o_k < K if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)) p_gk = tl.make_block_ptr(g + i_bh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)) p_gn = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)) p_gk = tl.make_block_ptr(g + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)) p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) b_gn = tl.load(p_gn, mask=m_k, other=0) b_q = tl.load(p_q, boundary_check=(0, 1)) b_g = tl.load(p_g, boundary_check=(0, 1)) b_qg = b_q * tl.exp(b_g - b_gn[None, :]) * scale b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_kg = b_k * tl.exp(b_gn[:, None] - b_gk) b_A += tl.dot(b_qg, b_kg) if HEAD_FIRST: p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) else: p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) tl.store(p_A, b_A.to(A.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
fe206ade-10f1-48be-9faf-065054bbea19
softmax_split.py
iclementine/optimize_softmax
softmax_split.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel(out_ptr, in_ptr, logz_ptr, M, N, TILE_N: tl.constexpr): pid_n = tl.program_id(0) pid_m = tl.program_id(1) n_offsets = pid_n * TILE_N + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets mask = n_offsets < N inp = tl.load(in_ptr + offset, mask=mask, other=-float('inf')).to(out_ptr .dtype.element_ty) logz = tl.load(logz_ptr + pid_m).to(out_ptr.dtype.element_ty) out = tl.exp(inp - logz) tl.store(out_ptr + offset, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_split.py
a47ca1c7-56ce-48bc-b2f1-eb0c70e769d9
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/jagged_mean/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r, 'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS, NUM_STAGES)], key=['M']) @triton.jit def triton_jagged_mean_kernel_simple_fused_buffer_then_sum(input_ptr_values, input_ptr_offsets, output_ptr, M, MAX_SEQLEN, BLOCK_SIZE_RAGGED: tl. constexpr, BLOCK_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M) pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M) buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32) block_start_m = pid_m * BLOCK_SIZE_M offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M) mask_m = offsets_m < M ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load( input_ptr_offsets + (pid_b + 1)) ragged_len = ragged_end - ragged_start for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED): block_start_ragged = ragged_start + block_pos offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED) mask_ragged = offsets_ragged < ragged_end idxs = offsets_ragged[:, None] * M + offsets_m mask = mask_ragged[:, None] & mask_m buffer += tl.load(input_ptr_values + idxs, mask=mask, other=0) buffer_sum = tl.sum(buffer, axis=0) buffer_view = buffer_sum.reshape((BLOCK_SIZE_M,)) buffer_view_mean = buffer_view * (1 / ragged_len) output_offsets = offsets_m + pid_b * M output_mask = output_offsets < M * (pid_b + 1) tl.store(output_ptr + output_offsets, buffer_view_mean, mask=output_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py
a1761742-5783-4551-93c3-9a127846b9fc
01-vector-add.py
kiwik/os-version-checker
ai/Triton/scripts/01-vector-add.py
65ebf607e0b4bb26c64a025d13e087200517b78c
0
@triton.autotune(configs=[triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 4096}, num_threads=1), triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 4096}, num_threads=0), triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 8192}, num_threads=0), triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 16384}, num_threads=0), triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 32768}, num_threads=0), triton.Config({'TILE_SIZE': 16, 'BLOCK_SIZE': 65536}, num_threads=0)], key=['n_elements']) @triton.jit def add_kernel_tiled_autotuned(x_ptr, y_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr, TILE_SIZE: tl.constexpr): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE for i in range(0, tl.cdiv(BLOCK_SIZE, TILE_SIZE)): offsets = block_start + i * TILE_SIZE + tl.arange(0, TILE_SIZE) mask = offsets < n_elements x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_ptr + offsets, mask=mask) output = x + y tl.store(output_ptr + offsets, output, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/kiwik/os-version-checker/blob/65ebf607e0b4bb26c64a025d13e087200517b78c/ai/Triton/scripts/01-vector-add.py
bbab54f5-e0f5-45d1-a33b-6925fcd3c225
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/common/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4]], key=['BK', 'BV', 'USE_GK', 'USE_GV', 'USE_G']) @triton.jit def fused_recurrent_bwd_kernel(q, k, v, g, gk, gv, h0, do, dq, dk, dv, dht, dh0, offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, REVERSE: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_v, i_k, i_nh = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl. int64), tl.program_id(2).to(tl.int64) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets + i_n + 1).to(tl.int64) all = T T = eos - bos else: bos, eos = i_n * T, i_n * T + T all = B * T if HEAD_FIRST: p_k = k + i_nh * T * K + ((T - 1) * K if REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) p_v = v + i_nh * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) p_do = do + i_nh * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) p_dq = dq + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if REVERSE else 0) + i_k * BK + tl.arange(0, BK) if USE_G: p_g = g + i_nh * T + (T - 1 if REVERSE else 0) if USE_GK: p_gk = gk + i_nh * T * K + ((T - 1) * K if REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + i_nh * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) else: p_k = k + (bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_v = v + (bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_do = do + (bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_dq = dq + (i_v * all + bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) if USE_G: p_g = g + (bos + (T - 1 if REVERSE else 0)) * H + i_h if USE_GK: p_gk = gk + (bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + (bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) mask_k = i_k * BK + tl.arange(0, BK) < K mask_v = i_v * BV + tl.arange(0, BV) < V mask_h = mask_k[:, None] & mask_v[None, :] b_h = tl.zeros([BK, BV], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None] ) * V + (i_v * BV + tl.arange(0, BV)[None, :]) b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32) for _ in range(0, T): b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32) if USE_G: b_g = tl.load(p_g).to(tl.float32) b_h = b_h * tl.exp(b_g) if USE_GK: b_gk = tl.load(p_gk, mask=mask_k, other=0).to(tl.float32) b_h = b_h * tl.exp(b_gk[:, None]) if USE_GV: b_gv = tl.load(p_gv, mask=mask_v, other=0).to(tl.float32) b_h = b_h * tl.exp(b_gv[None, :]) b_h += b_k[:, None] * b_v[None, :] b_dq = b_h * b_do[None, :] b_dq = tl.sum(b_dq, axis=1) * scale tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), mask=mask_k) p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V p_do += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V p_dq += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K if USE_G: p_g += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) if USE_GK: p_gk += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K if USE_GV: p_gv += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V tl.debug_barrier() if HEAD_FIRST: p_q = q + i_nh * T * K + ((T - 1) * K if not REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) p_k = k + i_nh * T * K + ((T - 1) * K if not REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) p_v = v + i_nh * T * V + ((T - 1) * V if not REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) p_do = do + i_nh * T * V + ((T - 1) * V if not REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) p_dk = dk + (i_v * B * H + i_nh) * T * K + ((T - 1) * K if not REVERSE else 0) + i_k * BK + tl.arange(0, BK) p_dv = dv + (i_k * B * H + i_nh) * T * V + ((T - 1) * V if not REVERSE else 0) + i_v * BV + tl.arange(0, BV) if USE_G: p_g = g + i_nh * T + (T - 1 if not REVERSE else 0) if USE_GK: p_gk = gk + i_nh * T * K + ((T - 1) * K if not REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + i_nh * T * V + ((T - 1) * V if not REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) else: p_q = q + (bos + (T - 1 if not REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_k = k + (bos + (T - 1 if not REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_v = v + (bos + (T - 1 if not REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_do = do + (bos + (T - 1 if not REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_dk = dk + (i_v * all + bos + (T - 1 if not REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_dv = dv + (i_k * all + bos + (T - 1 if not REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) if USE_G: p_g = g + (bos + (T - 1 if not REVERSE else 0)) * H + i_h if USE_GK: p_gk = gk + (bos + (T - 1 if not REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + (bos + (T - 1 if not REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) b_dh = tl.zeros([BK, BV], dtype=tl.float32) if USE_FINAL_STATE_GRADIENT: p_dht = dht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None] ) * V + (i_v * BV + tl.arange(0, BV)[None, :]) b_dh += tl.load(p_dht, mask=mask_h, other=0).to(tl.float32) for _ in range(T): b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask_v, other=0).to(tl.float32) b_dh += b_q[:, None] * b_do[None, :] b_dk = tl.sum(b_dh * b_v[None, :], axis=1) b_dv = tl.sum(b_dh * b_k[:, None], axis=0) if USE_G: b_g = tl.load(p_g).to(tl.float32) b_dh *= tl.exp(b_g) if USE_GK: b_gk = tl.load(p_gk, mask=mask_k, other=0).to(tl.float32) b_dh *= tl.exp(b_gk)[:, None] if USE_GV: b_gv = tl.load(p_gv, mask=mask_v, other=0).to(tl.float32) b_dh *= tl.exp(b_gv)[None, :] tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), mask=mask_k) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), mask=mask_v) p_q += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K p_k += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K p_v += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V p_do += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V p_dk += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K p_dv += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V if USE_G: p_g += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) if USE_GK: p_gk += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * K if USE_GV: p_gv += (1 if REVERSE else -1) * (1 if HEAD_FIRST else H) * V if STORE_INITIAL_STATE_GRADIENT: p_dh0 = dh0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[:, None] ) * V + (i_v * BV + tl.arange(0, BV)[None, :]) tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), mask=mask_h)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/fused_recurrent.py
a8445512-cbd6-47e9-9aaf-77d03f212f4e
complex_rnn.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/complex_rnn.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def fwd_sequential_scan_complex(v_real, v_imag, decay_real, decay_imag, hidden_real, hidden_imag, B, L, C, BLOCK_M: tl.constexpr): offset_b = tl.program_id(0) if offset_b >= B: return offset_n = tl.program_id(1) ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + offset_n * BLOCK_M h_real = tl.zeros([BLOCK_M], dtype=tl.float32) h_imag = tl.zeros([BLOCK_M], dtype=tl.float32) for _ in range(L): x_real = tl.load(v_real + ptr).to(tl.float32) x_imag = tl.load(v_imag + ptr).to(tl.float32) f_real = tl.load(decay_real + ptr).to(tl.float32) f_imag = tl.load(decay_imag + ptr).to(tl.float32) h_real_new = h_real * f_real - h_imag * f_imag + x_real h_imag_new = h_real * f_imag + h_imag * f_real + x_imag tl.store(hidden_real + ptr, h_real_new.to(hidden_real.dtype.element_ty) ) tl.store(hidden_imag + ptr, h_imag_new.to(hidden_imag.dtype.element_ty) ) h_real = h_real_new h_imag = h_imag_new ptr += C
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/complex_rnn.py
7518b7e9-9f9a-4687-8c6c-2c18f6325875
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8, 16]], key=['BT', 'BK', 'BV']) @triton.jit def chunk_delta_rule_fwd_kernel_h(k, v, d, v_new, h, h0, ht, offsets, chunk_offsets, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl .constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) boh = tl.load(chunk_offsets + i_n).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T NT = tl.cdiv(T, BT) boh = i_n * NT b_h = tl.zeros([BK, BV], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) for i_t in range(NT): if HEAD_FIRST: p_h = tl.make_block_ptr(h + (i_nh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_h = tl.make_block_ptr(h + ((boh + i_t) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1)) b_hc = tl.zeros([BK, BV], dtype=tl.float32) for i_c in range(tl.cdiv(min(BT, T - i_t * BT), BC)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_nh * T * K, (K, T), (1, K), ( i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_d = tl.make_block_ptr(d + i_nh * T * K, (T, K), (K, 1), ( i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_nh * T * V, (T, V), (V, 1), ( i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_v_new = tl.make_block_ptr(v_new + i_nh * T * V, (T, V), ( V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_d = tl.make_block_ptr(d + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_v_new = tl.make_block_ptr(v_new + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_d = tl.load(p_d, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_v -= tl.dot(b_d, b_h.to(b_k.dtype)) tl.store(p_v_new, b_v.to(p_v_new.dtype.element_ty), boundary_check=(0, 1)) b_hc += tl.dot(b_k, b_v.to(b_k.dtype), allow_tf32=False) b_h += b_hc if STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py
09a0bee9-c6e5-453a-99e4-ba49fd05641a
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None, 'USE_INITIAL_STATE': lambda args: args['dh0'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4]], key=['BT', 'BK', 'BV']) @triton.jit def chunk_gated_delta_rule_bwd_kernel_dhu(q, k, d, g, dht, dh0, do, dh, dv, dv2, offsets, c_offsets, scale, T: tl.constexpr, H: tl.constexpr, K: tl .constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl .constexpr, BV: tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) boh = tl.load(c_offsets + i_n).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T NT = tl.cdiv(T, BT) boh = i_n * NT b_dh = tl.zeros([BK, BV], dtype=tl.float32) if USE_FINAL_STATE_GRADIENT: p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_dh += tl.load(p_dht, boundary_check=(0, 1)) for i_t in range(NT - 1, -1, -1): if HEAD_FIRST: p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1)) b_dh_tmp = tl.zeros([BK, BV], dtype=tl.float32) last_idx = min((i_t + 1) * BT, T) - 1 if HEAD_FIRST: bg_last = tl.load(g + i_nh * T + last_idx) else: bg_last = tl.load(g + (bos + last_idx) * H + i_h) for i_c in range(tl.cdiv(BT, BC) - 1, -1, -1): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), ( i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_k = tl.make_block_ptr(k + i_nh * T * K, (T, K), (K, 1), ( i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_d = tl.make_block_ptr(d + i_nh * T * K, (K, T), (1, K), ( i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_dv = tl.make_block_ptr(dv + i_nh * T * V, (T, V), (V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_g = tl.make_block_ptr(g + i_nh * T, (T,), (1,), (i_t * BT + i_c * BC,), (BC,), (0,)) p_dv2 = tl.make_block_ptr(dv2 + i_nh * T * V, (T, V), (V, 1 ), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_d = tl.make_block_ptr(d + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT + i_c * BC,), (BC,), (0,)) p_dv2 = tl.make_block_ptr(dv2 + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) b_g = tl.load(p_g, boundary_check=(0,)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale * tl.exp(b_g)[None, :]).to(b_q.dtype) b_k = tl.load(p_k, boundary_check=(0, 1)) b_d = tl.load(p_d, boundary_check=(0, 1)) b_k = (b_k * tl.exp(bg_last - b_g)[:, None]).to(b_k.dtype) b_d = (b_d * tl.exp(b_g)[None, :]).to(b_d.dtype) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dv = tl.load(p_dv, boundary_check=(0, 1)) b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False) tl.store(p_dv2, b_dv.to(p_dv.dtype.element_ty), boundary_check= (0, 1)) b_dh_tmp += tl.dot(b_q, b_do.to(b_q.dtype), allow_tf32=False) b_dh_tmp -= tl.dot(b_d, b_dv.to(b_q.dtype), allow_tf32=False) b_dh *= tl.exp(bg_last) b_dh += b_dh_tmp if USE_INITIAL_STATE: p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py
2b6f63b8-ff56-4ab9-8423-39194f903638
dynamic_quant.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/dynamic_quant.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.autotune(configs=_get_autotune_configs(), key=['M', 'N']) @triton.jit def _triton_dynamic_quantize_kernel(output_ptr, input_ptr, scale_ptr, stride_outputm, stride_outputn, stride_inputm, stride_inputn, n_elements, M: tl.constexpr, N: tl.constexpr): pid = tl.program_id(axis=0) offsets = tl.arange(0, N) mask = offsets < n_elements input_ptrs = input_ptr + pid * stride_inputm + offsets input_vals = tl.load(input_ptrs, mask=mask, other=1e-06) abs_max_f = tl.reduce(input_vals, 0, _abs_max) dynamic_per_token_scale = 127.0 / abs_max_f precison_mask = tl.where(input_vals > 0, 0.5, -0.5) output_vals = (input_vals * dynamic_per_token_scale + precison_mask).to(tl .int8) output_ptrs = output_ptr + pid * stride_outputm + offsets tl.store(output_ptrs, output_vals, mask=mask) tl.store(scale_ptr + pid, abs_max_f / 127.0)
{ "Data Type": [ "fp32", "int8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/dynamic_quant.py
cfb5d0c5-f872-468f-8407-d69c5eed5e51
05-layer-norm.py
triton-lang/triton
python/tutorials/05-layer-norm.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _layer_norm_fwd_fused(X, Y, W, B, Mean, Rstd, stride, N, eps, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) Y += row * stride X += row * stride mean = 0 _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) a = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) _mean += a mean = tl.sum(_mean, axis=0) / N _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x - mean, 0.0) _var += x * x var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) tl.store(Mean + row, mean) tl.store(Rstd + row, rstd) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N w = tl.load(W + cols, mask=mask) b = tl.load(B + cols, mask=mask) x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32) x_hat = (x - mean) * rstd y = x_hat * w + b tl.store(Y + cols, y, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/05-layer-norm.py
790d51bc-f5f5-494f-b80a-9596d37fd44f
lao.py
MayDomine/Burst-Attention
burst_attn/lao.py
b088c554072935074ea9c643de5ee363be5ab1f6
0
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[ 'BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args[ 'BLOCK_HEADDIM']}) @triton.jit def _fwd_kernel(Q, K, V, Bias, Out, M_in, Lse_in, O_in, Lse, M_out, TMP, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl. constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl .constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): start_m = tl.program_id(0) off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_HEADDIM) q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :]) k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :]) v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :]) if BIAS_TYPE == 'vector': b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n elif BIAS_TYPE == 'matrix': b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :]) t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m lin_ptrs = Lse_in + off_hb * seqlen_q_rounded + offs_m acc_o_ptrs = O_in + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :]) lse_i = tl.load(lin_ptrs) m_ptrs = M_in + off_hb * seqlen_q_rounded + offs_m m_i = tl.load(m_ptrs) acc_o = tl.load(acc_o_ptrs) if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0) elif EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0) else: q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[ None, :] < headdim), other=0.0) end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k) for start_n in range(0, end_n, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs + start_n * stride_kn) else: k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0) elif EVEN_HEADDIM: k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0) else: k = tl.load(k_ptrs + start_n * stride_kn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k, trans_b=True) if not EVEN_N: qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float ('-inf')) if IS_CAUSAL: qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float('-inf')) if BIAS_TYPE != 'none': if BIAS_TYPE == 'vector': if EVEN_N: bias = tl.load(b_ptrs + start_n).to(tl.float32) else: bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0).to(tl.float32) bias = bias[None, :] elif BIAS_TYPE == 'matrix': if EVEN_M & EVEN_N: bias = tl.load(b_ptrs + start_n).to(tl.float32) else: bias = tl.load(b_ptrs + start_n, mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k ), other=0.0).to(tl.float32) qk = qk * softmax_scale + bias m_ij = tl.maximum(tl.max(qk, 1), lse_i) p = tl.exp(qk - m_ij[:, None]) else: m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i) p = tl.exp(qk * softmax_scale - m_ij[:, None]) l_ij = tl.sum(p, 1) acc_o_scale = tl.exp(m_i - m_ij) tl.store(t_ptrs, acc_o_scale) acc_o_scale = tl.load(t_ptrs) acc_o = acc_o * acc_o_scale[:, None] if EVEN_N & EVEN_M: if EVEN_HEADDIM: v = tl.load(v_ptrs + start_n * stride_vn) else: v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0) elif EVEN_HEADDIM: v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0) else: v = tl.load(v_ptrs + start_n * stride_vn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0) p = p.to(v.dtype) acc_o += tl.dot(p, v) m_i = m_ij l_i_new = tl.exp(lse_i - m_ij) + l_ij lse_i = m_ij + tl.log(l_i_new) start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m m_ptrs = M_out + off_hb * seqlen_q_rounded + offs_m tl.store(m_ptrs, m_i) tl.store(lse_ptrs, lse_i) offs_d = tl.arange(0, BLOCK_HEADDIM) out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :]) if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc_o) else: tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim) elif EVEN_HEADDIM: tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q) else: tl.store(out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & ( offs_d[None, :] < headdim))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/lao.py
5a8dca50-9d6e-4735-be6c-03ed94a672f6
positional_embedding.py
sjjeong94/ai_compiler_study
aicom/positional_embedding.py
e87284aab74acab704e2d192190be446e328e1c6
0
@triton.jit def rope_fw(t_ptr, f_ptr, o_ptr, t_s_stride, f_s_stride, o_s_stride, d, d2, BLOCK_SIZE: tl.constexpr): s_idx = tl.program_id(0) bh_idx = tl.program_id(1) t_start_ptr = t_ptr + s_idx * t_s_stride f_start_ptr = f_ptr + s_idx * f_s_stride o_start_ptr = o_ptr + s_idx * o_s_stride d2_half = d2 // 2 col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < d2_half f0_ptrs = f_start_ptr + col_offsets f1_ptrs = f_start_ptr + col_offsets + d2_half f0 = tl.load(f0_ptrs, mask=mask, other=0.0) cos0 = tl.cos(f0) sin0 = tl.sin(f0) f1 = tl.load(f1_ptrs, mask=mask, other=0.0) cos1 = tl.cos(f1) sin1 = tl.sin(f1) t0_ptrs = t_start_ptr + bh_idx * d + col_offsets t1_ptrs = t_start_ptr + bh_idx * d + col_offsets + d2_half t0 = tl.load(t0_ptrs, mask=mask, other=0.0) t1 = tl.load(t1_ptrs, mask=mask, other=0.0) o0 = t0 * cos0 - t1 * sin0 o1 = t1 * cos1 + t0 * sin1 o0_ptrs = o_start_ptr + bh_idx * d + col_offsets o1_ptrs = o_start_ptr + bh_idx * d + col_offsets + d2_half tl.store(o0_ptrs, o0, mask=mask) tl.store(o1_ptrs, o1, mask=mask) if d2 < d: remainder = d - d2 q, r = remainder // BLOCK_SIZE, remainder % BLOCK_SIZE for i in range(q): t2_ptrs = (t_start_ptr + bh_idx * d + col_offsets + d2 + BLOCK_SIZE * i) o2_ptrs = (o_start_ptr + bh_idx * d + col_offsets + d2 + BLOCK_SIZE * i) t2 = tl.load(t2_ptrs) tl.store(o2_ptrs, t2) if r > 0: t2_ptrs = (t_start_ptr + bh_idx * d + col_offsets + d2 + BLOCK_SIZE * q) o2_ptrs = (o_start_ptr + bh_idx * d + col_offsets + d2 + BLOCK_SIZE * q) mask = col_offsets < r t2 = tl.load(t2_ptrs, mask=mask, other=0.0) tl.store(o2_ptrs, t2, mask=mask)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Transposed Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sjjeong94/ai_compiler_study/blob/e87284aab74acab704e2d192190be446e328e1c6/aicom/positional_embedding.py
fca61cfe-da61-4c1c-bf97-ac5bad29149b
test_addptr.py
microsoft/triton-shared
python/examples/test_addptr.py
d5b7bee73b5b12f09906e88f300c0d83b0022753
0
@triton.jit def addptr(in0, out0): for i in range(0, 10, 2): in1 = in0 + 1 + i in2 = in1 + 1 out1 = out0 + 1 + i out2 = out1 + 1 a1 = tl.load(in1) a2 = tl.load(in2) tl.store(out1, a1) tl.store(out2, a2)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/microsoft/triton-shared/blob/d5b7bee73b5b12f09906e88f300c0d83b0022753/python/examples/test_addptr.py
5b77e44e-f459-4f02-bbc2-d49523a36ffa
gemm_a16w4.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/gemm_a16w4.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _triton_gemm_a16w4_sub_channel_kernel(A, B, C, scale_b, bias, zero_points, M, N, K, rescale_m, rescale_n, rescale_k, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek, stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl. constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl. constexpr): pid = tl.program_id(0) pid_z = tl.program_id(1) grid_m = tl.cdiv(M, BLOCK_M) grid_n = tl.cdiv(N, BLOCK_N) width = GROUP_M * grid_n group_id = pid // width group_size = min(grid_m - group_id * GROUP_M, GROUP_M) pid_m = group_id * GROUP_M + pid % group_size pid_n = pid % width // group_size rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) B = B + (rbn[:, None] * stride_bn + rk[None, :] * stride_bk) acc_l = tl.zeros((BLOCK_N, BLOCK_M), dtype=tl.float32) acc_h = tl.zeros((BLOCK_N, BLOCK_M), dtype=tl.float32) _A0 = tl.zeros((1, 1), dtype=A.dtype.element_ty) _B0 = tl.zeros((1, 1), dtype=B.dtype.element_ty) if add_zero_points: zero_points_offs = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N) _ZERO_POINT0 = tl.zeros([1], dtype=zero_points.dtype.element_ty) scale_offs = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N) _SCALE0 = tl.zeros([1], dtype=scale_b.dtype.element_ty) for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): k_remaining = K - k * (BLOCK_K * SPLIT_K) b_int4_two = tl.load(B, mask=rk[None, :] < k_remaining, other=_B0) b_int4_l = b_int4_two.__lshift__(4).to(tl.int8).__rshift__(4) b_int4_h = b_int4_two.__rshift__(4) if add_zero_points: zero_points_ptrs = (zero_points + k * SPLIT_K * stride_zpk + pid_z * stride_zpk + zero_points_offs) zero_points_vals = tl.load(zero_points_ptrs, mask= zero_points_offs < 2 * N, other=_ZERO_POINT0) zero_points_vals = tl.reshape(zero_points_vals, (BLOCK_N, 2)) zp_l, zp_h = tl.split(zero_points_vals) b_int4_l -= zp_l[:, None] b_int4_h -= zp_h[:, None] scales_val = tl.load(scale_b + k * SPLIT_K * stride_scalek + pid_z * stride_scalek + scale_offs, mask=scale_offs < 2 * N, other=_SCALE0) scales_val = tl.reshape(scales_val, (BLOCK_N, 2)) scale_l, scale_h = tl.split(scales_val) b_int4_l = b_int4_l * scale_l[:, None] b_int4_h = b_int4_h * scale_h[:, None] a = tl.load(A, mask=rk[None, :] < k_remaining, other=_A0) a = tl.trans(a) acc_l += tl.dot(b_int4_l, a, out_dtype=tl.float32, allow_tf32=True) acc_h += tl.dot(b_int4_h, a, out_dtype=tl.float32, allow_tf32=True) A += BLOCK_K * SPLIT_K * stride_ak B += BLOCK_K * SPLIT_K * stride_bk acc_l = tl.trans(acc_l) acc_h = tl.trans(acc_h) acc = tl.interleave(acc_l, acc_h) rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N) mask = (rm < M)[:, None] & (rn < 2 * N)[None, :] if add_bias: offs_bias = pid_n * BLOCK_N * 2 + tl.arange(0, 2 * BLOCK_N) bias_ptrs = bias + offs_bias _BIAS0 = tl.zeros([1], dtype=bias.dtype.element_ty) bias_vals = tl.load(bias_ptrs, mask=offs_bias < 2 * N, other=_BIAS0) if pid_z == 0: acc += bias_vals[None, :] if SPLIT_K == 1: tl.store(C + rm[:, None] * stride_cm + rn[None, :], acc, mask=mask) else: tl.atomic_add(C + rm[:, None] * stride_cm + rn[None, :], acc, mask=mask )
{ "Data Type": [ "fp32", "int8" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w4.py
e9b25d3b-8143-4146-a7a0-9427ecb0c33d
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_jagged_bmm_kernel(a_ptr, a_offset_ptr, b_ptr, c_ptr, M, N, stride_am, stride_ak, stride_bk, stride_bn, stride_cl, stride_cm, stride_cn, max_seq_len, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr): """ Kernel for computing the matmul C = A x B. A has shape (M, sum_B(Ki)), B has shape (sum_B(Ki), N) and C has shape (B, M, N) """ pid_batch = tl.program_id(0) pid = tl.program_id(1) begin = tl.load(a_offset_ptr + pid_batch) end = tl.load(a_offset_ptr + pid_batch + 1) K = end - begin K = tl.minimum(K, max_seq_len) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) pid_m = pid // num_pid_n pid_n = pid % num_pid_n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) + begin * stride_ak b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) + begin * stride_bk c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, K, BLOCK_SIZE_K): updated_offset = k + offs_k a = tl.load(a_ptrs, mask=(updated_offset[None, :] < K) & (offs_am[:, None] < M), other=0.0) b = tl.load(b_ptrs, mask=(updated_offset[:, None] < K) & (offs_bn[ None, :] < N), other=0.0) c += tl.dot(a, b, allow_tf32=allow_tf32) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) c_ptrs = c_ptr + stride_cm * offs_m[:, None] + stride_cn * offs_n[None, : ] + stride_cl * pid_batch tl.store(c_ptrs, c, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
a5c53b73-1cef-4764-bfc4-9437dd79e4c5
bwd_split_kernel.py
ROCm/aotriton
test/bwd_split_kernel.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def bwd_kernel_dk_dv(Q, K, V, sm_scale, Out, DO, DK, DV, L, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, seqlen_q, seqlen_k, dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl. constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr, ENABLE_DROPOUT: tl.constexpr): start_m = tl.program_id(0) * BLOCK_N off_h = tl.program_id(1) off_z = tl.program_id(2) num_h = tl.num_programs(1) num_z = tl.num_programs(2) offs_m = start_m + tl.arange(0, BLOCK_N) offs_n = tl.arange(0, BLOCK_M) q_offset = off_h * stride_qh + off_z * stride_qz Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(seqlen_q, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) k_offset = off_h * stride_kh + off_z * stride_kz K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=(BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0, start_m), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) v_offset = off_h * stride_vh + off_z * stride_vz VT_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(BLOCK_DMODEL, seqlen_k), strides=(stride_vn, stride_vk), offsets=(0, start_m), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) do_offset = q_offset DO_block_ptr = tl.make_block_ptr(base=DO + do_offset, shape=(seqlen_q, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) off_zh = off_z * num_h + off_h * 1 D_ptrs = D + off_zh * seqlen_q l_ptrs = L + off_zh * seqlen_q qk_scale = sm_scale * 1.44269504 k = tl.load(K_block_ptr) k = (k * qk_scale).to(K_block_ptr.type.element_ty) vt = tl.load(VT_block_ptr) dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) lo = start_m // BLOCK_M * BLOCK_M if CAUSAL else 0 hi = seqlen_q Q_block_ptr = tl.advance(Q_block_ptr, (lo, 0)) DO_block_ptr = tl.advance(DO_block_ptr, (lo, 0)) batch_philox_offset = philox_offset_base + off_zh * seqlen_q * seqlen_k """ K1 K2 (d)V dO Q1 qk11 qk12 (d)v1 dO1 Q2 qk21 qk22 (d)v2 dO2 QK: (seqlen_q, seqlen_k) dO: (seqlen_q, hdim) dV: (seqlen_k, hdim) dV = (QK)^T dO dV1 = qk11 dO1 + qk21 dO2 = q1 k1 dO1 + q2 k1 dO2 dV2 = qk12 dO1 + qk22 dO2 = q1 k2 dO1 + q2 k2 dO2 ~~~~~ = 0 start_m: select k and dV start_n: select q and dO """ for start_n in range(lo, hi, BLOCK_M): offs_m_curr = offs_n[:, None] + start_n q = tl.load(Q_block_ptr) do = tl.load(DO_block_ptr) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, q, k) if CAUSAL: qk = tl.where(offs_m_curr >= offs_m[None, :], qk, float('-inf')) l_i = tl.load(l_ptrs + offs_m_curr) p = tl.math.exp2(qk - l_i) if ENABLE_DROPOUT: philox_offset = batch_philox_offset + start_n * seqlen_k + start_m keep = dropout_mask(philox_seed, philox_offset, dropout_p, BLOCK_M, BLOCK_N, seqlen_k) if BLOCK_M == 1: dv += tl.where(keep, p / (1 - dropout_p), 0.0).to(Q.dtype. element_ty) * do else: dv += tl.dot(tl.where(tl.trans(keep), tl.trans(p) / (1 - dropout_p), 0.0).to(Q.dtype.element_ty), do) elif BLOCK_M == 1: dv += p.to(Q.dtype.element_ty) * do else: dv += tl.dot(tl.trans(p).to(do.dtype), do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) dp += tl.dot(do, vt) if ENABLE_DROPOUT: dp = tl.where(keep, dp / (1 - dropout_p), 0) ds = p * (dp - Di) if BLOCK_M == 1: dk += ds.to(Q.dtype.element_ty) * q else: dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) Q_block_ptr = tl.advance(Q_block_ptr, (BLOCK_M, 0)) DO_block_ptr = tl.advance(DO_block_ptr, (BLOCK_M, 0)) DK_block_ptr = tl.make_block_ptr(base=DK + k_offset, shape=(seqlen_k, BLOCK_DMODEL), strides=(stride_kn, stride_kk), offsets=(start_m, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) DV_block_ptr = tl.make_block_ptr(base=DV + v_offset, shape=(seqlen_k, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(start_m, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) tl.store(DK_block_ptr, (dk * sm_scale).to(DK.type.element_ty)) tl.store(DV_block_ptr, dv.to(DV.type.element_ty))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_split_kernel.py
5e173f19-b632-4e9b-8079-ecbf1eeba1e1
k_softmax.py
kimiasa/Experiments
src/ops/triton/k_softmax.py
c4e73bfefd8290695ec52b6386b6b81838ca94a1
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['K']) @triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])}) @triton.heuristics({'IS_FP16': lambda nargs: nargs['GradIn'].dtype == torch .float16}) @triton.jit def _softmax_backward(GradIn, GradOut, Out, stride_bm, stride_bn, stride_gm, stride_gn, stride_om, stride_on, K, LOG: tl.constexpr, CAUSAL: tl. constexpr, DEPTH: tl.constexpr, IS_FP16: tl.constexpr): """ Compute the softmax gradients. ..Note: Not autotuning for now because this would lead to broken accumulated gradients """ m = tl.program_id(0) n = tl.program_id(1) k = tl.arange(0, DEPTH) grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k out_ptrs = Out + m * stride_om + n * stride_on + k io_mask = k < K if CAUSAL: io_mask = io_mask & (k <= n) g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0)) o = tl.load(out_ptrs, mask=io_mask, other=float(0)) if CAUSAL: zero = float(0) zero = zero.to(g.dtype) g = tl.where(k > n, zero, g) o = tl.where(k > n, zero, o) if LOG: s = tl.sum(g, 0) if IS_FP16: o = o.to(tl.float32) grad_in = g - tl.exp(o) * s else: s = tl.sum(g * o, 0) grad_in = o * (g - s) grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k tl.store(grad_in_ptrs, grad_in, mask=k < K)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/ops/triton/k_softmax.py
ab5cdfd9-a6c2-4c41-ada3-c603bb44fb3a
paged_attn.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.autotune(configs=[triton.Config({}, num_warps=warps) for warps in [ 4, 8, 16]], key=['HEAD_SIZE', 'PADDED_NUM_SPLITS', 'PARTITION_SIZE']) @triton.jit def _paged_attn_wo_mma_v2_reduce_kernel(out, exp_sums, max_logits, tmp_out, context_lens, stride_exp_m, stride_exp_n, stride_out_m, stride_out_n, stride_tmp_m, stride_tmp_n, stride_tmp_k, HEAD_SIZE: tl.constexpr, PADDED_NUM_SPLITS: tl.constexpr, PARTITION_SIZE: tl.constexpr): seq_idx = tl.program_id(axis=1) head_idx = tl.program_id(axis=0) context_len = tl.load(context_lens + seq_idx) num_partitions = tl.cdiv(context_len, PARTITION_SIZE) max_logit = float('-inf') offs_logit = seq_idx * stride_exp_m + head_idx * stride_exp_n head_size_offs = tl.arange(0, HEAD_SIZE) tmp_out_ptr = seq_idx * stride_tmp_m + head_idx * stride_tmp_n out_ptr = seq_idx * stride_out_m + head_idx * stride_out_n + head_size_offs acc = tl.zeros([HEAD_SIZE], dtype=tl.float32) global_exp_sum = tl.zeros([1], dtype=tl.float32) logits = tl.load(max_logits + offs_logit + tl.arange(0, PADDED_NUM_SPLITS), mask=tl.arange(0, PADDED_NUM_SPLITS) < num_partitions, other=float('-inf')) max_logit = tl.max(logits, axis=0) exp_sum = tl.load(exp_sums + offs_logit + tl.arange(0, PADDED_NUM_SPLITS), mask=tl.arange(0, PADDED_NUM_SPLITS) < num_partitions, other=0.0) rescaled_exp_sum = exp_sum * tl.exp(logits - max_logit) global_exp_sum += tl.sum(rescaled_exp_sum, axis=0) tmp = tl.load(tmp_out + tmp_out_ptr + tl.arange(0, PADDED_NUM_SPLITS)[:, None] * stride_tmp_k + head_size_offs) acc += tl.sum(tmp * rescaled_exp_sum[:, None], axis=0) inv_sum = 1.0 / (global_exp_sum + 1e-06) tl.store(out + out_ptr, acc * inv_sum)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py
0c45df3d-8dd1-4102-b17a-cee67e7c7218
sparse_linear.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/sparse_linear.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.autotune(configs=autotune_configs, key=['row_dim', 'col_dim', 'inner_dim']) @triton.jit def dense_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr, row_dim: tl.constexpr, col_dim: tl.constexpr, inner_dim: tl.constexpr, lhs_stride_row: tl. constexpr, lhs_stride_inner: tl.constexpr, rhs_stride_inner: tl. constexpr, rhs_stride_col: tl.constexpr, out_stride_row: tl.constexpr, out_stride_col: tl.constexpr, accumulate: tl.constexpr, masked: tl. constexpr, block_size_row: tl.constexpr, block_size_col: tl.constexpr, block_size_inner: tl.constexpr, group_size_row: tl.constexpr): if not masked: tl.static_assert(row_dim % block_size_row == 0) tl.static_assert(col_dim % block_size_col == 0) tl.static_assert(inner_dim % block_size_inner == 0) pid_row, pid_col = tl.swizzle2d(tl.program_id(axis=0), tl.program_id( axis=1), tl.cdiv(row_dim, block_size_row), tl.cdiv(col_dim, block_size_col), group_size_row) row_offset = pid_row * block_size_row col_offset = pid_col * block_size_col row_range = tl.arange(0, block_size_row)[:, None] + row_offset col_range = tl.arange(0, block_size_col)[None, :] + col_offset inner_range = tl.arange(0, block_size_inner) lhs_ptr += row_range * lhs_stride_row + inner_range[None, : ] * lhs_stride_inner rhs_ptr += inner_range[:, None ] * rhs_stride_inner + col_range * rhs_stride_col out_ptr += row_range * out_stride_row + col_range * out_stride_col if masked: row_mask = row_range < row_dim col_mask = col_range < col_dim inner_mask = inner_range < inner_dim out = tl.dot(tl.load(lhs_ptr, mask=row_mask * inner_mask[None, :], other=0), tl.load(rhs_ptr, mask=inner_mask[:, None] * col_mask, other=0), out_dtype=tl.float32) else: out = tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr), out_dtype=tl.float32) for k in range(1, inner_dim // block_size_inner): lhs_ptr += block_size_inner * lhs_stride_inner rhs_ptr += block_size_inner * rhs_stride_inner if masked: inner_range += block_size_inner inner_mask = inner_range < inner_dim out += tl.dot(tl.load(lhs_ptr, mask=row_mask & inner_mask[None, :], other=0), tl.load(rhs_ptr, mask=inner_mask[:, None] & col_mask, other=0), out_dtype=tl.float32) else: out += tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr)) if masked: out_mask = row_mask & col_mask if accumulate: out += tl.load(out_ptr, mask=out_mask) tl.store(out_ptr, out, mask=out_mask) else: if accumulate: out += tl.load(out_ptr) tl.store(out_ptr, out)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py
a20761f1-48a0-41de-8397-039ab5f0ba71
bgmv_expand_slice.py
IBM/vllm
vllm/lora/ops/bgmv_expand_slice.py
99523dd62be2ecf6c6db15e8133aaaf7855e7e86
0
@triton.jit def _bgmv_expand_slice_kernel(input_ptr, lora_ptr, out_ptr, N, K, lora_indices, xm_stride, xk_stride, l0_stride, lora_k_stride, lora_n_stride, cm_stride, cn_stride, slice_offset, BLOCK_N: tl. constexpr, BLOCK_K: tl.constexpr, SPLIT_N: tl.constexpr, EVEN_K: tl. constexpr, ADD_INPUTS: tl.constexpr, CAST_TYPE: tl.constexpr): """ GroupGEMV, additionally, introducing SPLIT_N can improve large hidden_size's performance """ pid_sn = tl.program_id(axis=0) cur_batch = tl.program_id(axis=1) lora_index = tl.load(lora_indices + cur_batch) if lora_index == -1: return offset_k = tl.arange(0, BLOCK_K) offset_n = tl.arange(0, BLOCK_N) if EVEN_K: tiled_a = tl.load(input_ptr + cur_batch * xm_stride + offset_k * xk_stride) else: tiled_a = tl.load(input_ptr + cur_batch * xm_stride + offset_k * xk_stride, mask=offset_k < K, other=0) split_n_length = tl.cdiv(N, SPLIT_N) if CAST_TYPE: tiled_a = tiled_a.to(lora_ptr.dtype.element_ty) b_ptr = (lora_ptr + l0_stride * lora_index + pid_sn * split_n_length * lora_k_stride) c_ptr = (out_ptr + cur_batch * cm_stride + pid_sn * split_n_length + slice_offset * cn_stride) for n in range(0, split_n_length, BLOCK_N): current_n = n + offset_n b_ptr_mask = (current_n[:, None] < split_n_length) & (offset_k[None, :] < K) c_mask = current_n < split_n_length tiled_b = tl.load(b_ptr + current_n[:, None] * lora_k_stride + offset_k[None, :] * lora_n_stride, mask=b_ptr_mask, other=0.0) if ADD_INPUTS: tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask, other=None) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) tl.store(c_ptr + current_n * cn_stride, accumulator, mask=c_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/bgmv_expand_slice.py
1d21c4d6-1ec8-482a-8d2e-484bbc9cdf08
y_9.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_9.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def ninth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset < output_numel) g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask= output_row_offset + 1 < output_numel) g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask= output_row_offset + 2 < output_numel) g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask= output_row_offset + 3 < output_numel) g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask= output_row_offset + 4 < output_numel) g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask= output_row_offset + 5 < output_numel) g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask= output_row_offset + 6 < output_numel) g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask= output_row_offset + 7 < output_numel) g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask= output_row_offset + 8 < output_numel) g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask= output_row_offset + 9 < output_numel) g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask= output_row_offset + 10 < output_numel) g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask= output_row_offset + 11 < output_numel) g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask= output_row_offset + 12 < output_numel) g_13 = tl.load(sph_grad_ptr + output_row_offset + 13, mask= output_row_offset + 13 < output_numel) g_14 = tl.load(sph_grad_ptr + output_row_offset + 14, mask= output_row_offset + 14 < output_numel) g_15 = tl.load(sph_grad_ptr + output_row_offset + 15, mask= output_row_offset + 15 < output_numel) g_16 = tl.load(sph_grad_ptr + output_row_offset + 16, mask= output_row_offset + 16 < output_numel) g_17 = tl.load(sph_grad_ptr + output_row_offset + 17, mask= output_row_offset + 17 < output_numel) g_18 = tl.load(sph_grad_ptr + output_row_offset + 18, mask= output_row_offset + 18 < output_numel) CONST000 = 1.59908344719522 CONST001 = 2.0 CONST002 = 3.0 CONST003 = 4.0 CONST004 = 5.0 CONST005 = 6.39633378878088 CONST006 = 7.0 CONST007 = 8.63855507530412 CONST008 = 9.59450068317133 CONST009 = 6.39633378878088 CONST011 = 12.7926675775618 CONST012 = 12.7926675775618 CONST014 = 15.5493991355474 CONST015 = 14.391751024757 CONST017 = 15.0007324039945 CONST018 = 14.45506743704 CONST019 = 14.45506743704 CONST020 = 13.3827919767794 CONST021 = 23.8930627690618 CONST022 = 23.8930627690618 CONST023 = 27.0429549260581 CONST024 = 29.2403830344269 CONST025 = 30.001464807989 CONST027 = 29.2403830344269 CONST028 = 38.3780027326853 CONST031 = 39.2300904918661 CONST032 = 42.9079114754785 CONST033 = 10.7269778688696 CONST034 = 54.0859098521163 CONST036 = 58.9217071894985 CONST037 = 57.8202697481601 CONST038 = 60.0029296159779 CONST039 = 62.4530292249704 CONST040 = 64.3618672132178 CONST042 = 69.1084406024329 CONST044 = 78.5622762526647 CONST045 = 85.815822950957 CONST046 = 85.815822950957 CONST050 = 107.062335814235 CONST052 = 108.171819704233 CONST053 = -1935.03633686812 CONST055 = 115.64053949632 CONST056 = 117.843414378997 CONST057 = 117.843414378997 CONST059 = 120.005859231956 CONST060 = 2176.91587897664 CONST061 = 2176.91587897664 CONST064 = 150.007324039945 CONST065 = -1892.23403121978 CONST066 = -1885.49463006395 CONST067 = 173.46080924448 CONST068 = -1873.59087674911 CONST070 = 10.7269778688696 CONST071 = 180.008788847934 CONST074 = 13.5214774630291 CONST076 = 205.957975082297 CONST078 = 216.343639408465 CONST079 = 4326.8727881693 CONST080 = 233.923064275415 CONST081 = 233.923064275415 CONST082 = 240.011718463912 CONST083 = 241.879542108515 CONST085 = 255.853351551235 CONST086 = 255.853351551235 CONST087 = 257.447468852871 CONST088 = 257.447468852871 CONST090 = 270.429549260581 CONST091 = 289.101348740801 CONST093 = 300.01464807989 CONST097 = 13.0937127087774 CONST099 = -3747.18175349822 CONST100 = 6.39633378878088 CONST103 = 374.718175349822 CONST105 = 404.741888237121 CONST106 = 411.915950164594 CONST107 = 412.45195032649 CONST108 = 432.68727881693 CONST109 = 435.383175795328 CONST110 = 435.383175795327 CONST112 = 462.562157985281 CONST113 = -1571.24552505329 CONST114 = 483.759084217031 CONST115 = 511.706703102471 CONST116 = 562.077263024733 CONST117 = 578.202697481601 CONST119 = -1451.27725265109 CONST121 = -1451.27725265109 CONST123 = 600.029296159779 CONST124 = -1440.07031078347 CONST129 = -1387.68647395584 CONST130 = -1387.68647395584 CONST131 = -1373.05316721531 CONST132 = -1338.01151506746 CONST133 = 725.638626325546 CONST134 = -1298.06183645079 CONST137 = 788.430846341574 CONST138 = -1249.06058449941 CONST139 = -1228.09608744593 CONST140 = -1228.09608744593 CONST141 = 823.831900329187 CONST142 = -3245.15459112698 CONST143 = -1178.43414378997 CONST144 = 870.766351590655 CONST145 = 870.766351590655 CONST147 = -1124.15452604947 CONST149 = -3153.7233853663 CONST150 = 960.046873855647 CONST151 = 960.046873855647 CONST152 = 967.518168434061 CONST153 = -1081.71819704233 CONST154 = 967.518168434061 CONST155 = -1060.59072941097 CONST156 = 1023.41340620494 CONST157 = 1023.41340620494 CONST159 = -967.518168434061 CONST160 = 1081.71819704233 CONST161 = -960.046873855647 CONST163 = -936.795438374555 CONST165 = -900.043944239669 CONST166 = 1156.4053949632 CONST168 = -2902.55450530218 CONST170 = 11.2632978048796 CONST171 = -785.622762526647 CONST172 = -785.622762526647 CONST173 = -767.560054653706 CONST175 = 1338.01151506746 CONST176 = -693.843236977922 CONST177 = -693.843236977921 CONST178 = -686.526583607656 CONST179 = -669.005757533731 CONST180 = -669.005757533731 CONST182 = -649.030918225395 CONST183 = -630.744677073259 CONST184 = -628.498210021318 CONST185 = -628.498210021317 CONST186 = -600.029296159779 CONST187 = -589.217071894985 CONST188 = -578.202697481601 CONST189 = 15.5493991355474 CONST190 = -562.077263024733 CONST191 = 1500.07324039945 CONST192 = -480.023436927823 CONST193 = -480.023436927823 CONST195 = -462.562157985281 CONST196 = -450.021972119834 CONST197 = -412.45195032649 CONST198 = -409.365362481977 CONST199 = -409.365362481976 CONST200 = -404.741888237121 CONST201 = -392.811381263323 CONST202 = -383.780027326853 CONST203 = -383.780027326853 CONST204 = 1672.51439383433 CONST205 = -374.718175349822 CONST206 = -353.530243136991 CONST207 = -2400.11718463912 CONST209 = -346.921618488961 CONST210 = -346.921618488961 CONST211 = -343.263291803828 CONST212 = -338.631358951921 CONST213 = -338.631358951921 CONST214 = -324.515459112698 CONST215 = -315.37233853663 CONST216 = -314.249105010659 CONST217 = -2356.86828757994 CONST218 = -300.01464807989 CONST219 = -294.608535947493 CONST220 = -289.101348740801 CONST221 = -270.013183271901 CONST222 = -2312.81078992641 CONST223 = 1800.08788847934 CONST224 = -241.879542108515 CONST225 = -240.011718463912 CONST226 = -241.879542108515 CONST227 = -4326.8727881693 CONST228 = -216.343639408465 CONST229 = -210.010253655923 CONST230 = -204.682681240988 CONST231 = -204.682681240988 CONST232 = -204.682681240988 CONST233 = -196.405690631662 CONST234 = -191.144502152495 CONST235 = -191.890013663426 CONST236 = -191.890013663427 CONST237 = -187.359087674911 CONST238 = -180.008788847934 CONST239 = -176.765121568496 CONST241 = 1873.59087674911 CONST242 = -173.46080924448 CONST244 = -162.257729556349 CONST245 = -156.920361967464 CONST246 = -156.920361967464 CONST248 = -150.007324039945 CONST249 = -144.5506743704 CONST250 = -137.14955340795 CONST251 = -135.214774630291 CONST252 = -127.926675775618 CONST253 = -127.926675775618 CONST254 = -120.939771054258 CONST255 = -120.005859231956 CONST256 = -120.939771054258 CONST257 = -117.843414378997 CONST258 = -117.843414378997 CONST259 = -115.64053949632 CONST260 = -115.64053949632 CONST261 = 1935.03633686812 CONST262 = -2163.43639408465 CONST263 = -114.421097267943 CONST264 = -108.171819704233 CONST265 = -107.062335814235 CONST266 = -108.171819704233 CONST267 = -104.74970167022 CONST268 = -96.7518168434061 CONST269 = -96.7518168434061 CONST270 = -90.0043944239669 CONST271 = -90.106382439037 CONST272 = -80.2967518606762 CONST273 = -78.4601809837321 CONST274 = -78.4601809837321 CONST275 = -77.2655855030233 CONST276 = -78.5622762526647 CONST277 = -68.5747767039748 CONST278 = -63.9633378878088 CONST279 = -62.4530292249704 CONST280 = -61.8124684024186 CONST281 = -60.0029296159779 CONST282 = -63.9633378878088 CONST283 = -58.9217071894985 CONST284 = -57.8202697481601 CONST285 = -57.8202697481601 CONST286 = -48.375908421703 CONST287 = -48.3759084217031 CONST288 = -39.2811381263323 CONST289 = -38.6327927515116 CONST290 = -39.2811381263323 CONST291 = -30.9062342012093 CONST292 = -30.001464807989 CONST293 = -30.001464807989 CONST294 = -27.6433762409732 CONST295 = -17.3847567381802 CONST296 = -15.0007324039945 CONST297 = -14.7304267973746 CONST298 = -13.5214774630291 CONST299 = -13.0937127087774 CONST300 = -13.3827919767794 CONST301 = -9.82028453158308 CONST302 = -4.91014226579154 CONST303 = 2046.82681240988 VAR06 = x * x * x * x VAR07 = x * x * x VAR08 = x * x VAR02 = VAR06 * VAR06 VAR03 = VAR06 * VAR07 VAR04 = VAR07 * VAR07 VAR05 = VAR07 * VAR08 VAR15 = y * y * y * y VAR16 = y * y * y VAR17 = y * y VAR11 = VAR15 * VAR15 VAR12 = VAR15 * VAR16 VAR13 = VAR16 * VAR16 VAR14 = VAR16 * VAR17 VAR24 = z * z * z * z VAR25 = z * z * z VAR26 = z * z VAR20 = VAR24 * VAR24 VAR21 = VAR24 * VAR25 VAR22 = VAR25 * VAR25 VAR23 = VAR25 * VAR26 g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask= coord_row_offset + 1 < coord_numel) g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask= coord_row_offset + 2 < coord_numel) g_x += g_0 * (CONST021 * VAR20 + CONST022 * VAR02 + CONST179 * VAR04 * VAR26 + CONST180 * VAR08 * VAR22 + CONST204 * VAR06 * VAR24 ) + g_1 * y * (CONST065 * VAR08 * VAR23 - CONST149 * VAR06 * VAR25 + CONST183 * VAR04 * z - CONST271 * VAR21) + g_10 * (CONST012 * VAR21 * x + VAR23 * (CONST028 * VAR07 + CONST203 * VAR17 * x) + VAR25 * ( CONST028 * VAR05 + CONST157 * VAR15 * x + CONST173 * VAR07 * VAR17) + z * (CONST011 * VAR03 + CONST157 * VAR07 * VAR15 + CONST198 * VAR13 * x + CONST202 * VAR05 * VAR17)) + g_11 * (CONST150 * VAR07 * VAR14 + CONST250 * VAR12 * x + VAR16 * (CONST093 * VAR24 * x + CONST165 * VAR05 + CONST186 * VAR07 * VAR26) + y * (CONST059 * VAR03 + CONST071 * VAR05 * VAR26 + CONST281 * VAR22 * x)) + g_12 * (VAR23 * (CONST257 * VAR17 * x - CONST290 * VAR07) + VAR25 * (CONST044 * VAR05 + CONST143 * VAR07 * VAR17 - CONST172 * VAR15 * x) + z * ( CONST155 * VAR05 * VAR17 + CONST184 * VAR13 * x - CONST217 * VAR07 * VAR15 - CONST288 * VAR03)) + g_13 * (VAR14 * (CONST129 * VAR26 * x - CONST195 * VAR07) + VAR16 * (CONST166 * VAR24 * x + CONST176 * VAR05 - CONST222 * VAR07 * VAR26) + y * (CONST188 * VAR07 * VAR24 + CONST209 * VAR05 * VAR26 - CONST259 * VAR03 + CONST259 * VAR22 * x) ) + g_14 * (CONST042 * VAR03 * z + CONST268 * VAR07 * VAR23 + CONST294 * VAR21 * x + VAR15 * (CONST053 * VAR25 * x + CONST261 * VAR07 * z) + VAR17 * (CONST119 * VAR05 * z + CONST144 * VAR23 * x + CONST152 * VAR07 * VAR25)) + g_15 * (VAR16 * (CONST068 * VAR24 * x - CONST099 * VAR07 * VAR26 + CONST205 * VAR05) + y * (CONST050 * VAR03 + CONST147 * VAR05 * VAR26 - CONST205 * VAR22 * x)) + g_16 * ( CONST214 * VAR05 * VAR25 - CONST264 * VAR03 * z + CONST264 * VAR07 * VAR23 - CONST275 * VAR21 * x + VAR17 * (CONST079 * VAR07 * VAR25 + CONST134 * VAR05 * z + CONST134 * VAR23 * x)) + g_17 * y * ( CONST065 * VAR05 * VAR26 - CONST149 * VAR07 * VAR24 + CONST183 * VAR22 * x - CONST271 * VAR03) + g_18 * (CONST132 * VAR05 * VAR25 + CONST175 * VAR07 * VAR23 - CONST234 * VAR03 * z + CONST234 * VAR21 * x ) + g_2 * (CONST002 * VAR08 * (CONST034 * VAR22 + CONST153 * VAR17 * VAR24) + CONST004 * VAR06 * (CONST023 * VAR24 - CONST182 * VAR17 * VAR26) + CONST006 * VAR04 * (CONST289 * VAR26 + CONST291 * VAR17) - CONST228 * VAR17 * VAR22 - CONST295 * VAR02 + CONST298 * VAR20 ) + g_3 * (VAR16 * (-CONST068 * VAR06 * z + CONST099 * VAR08 * VAR25 + CONST103 * VAR23) + y * (CONST116 * VAR08 * VAR23 - CONST163 * VAR06 * VAR25 + CONST190 * VAR04 * z + CONST272 * VAR21) ) + g_4 * (CONST007 * VAR20 + CONST014 * VAR02 + CONST254 * VAR06 * VAR24 + CONST269 * VAR04 * VAR26 + VAR15 * (CONST114 * VAR06 + CONST114 * VAR24 + CONST168 * VAR08 * VAR26) + VAR17 * (CONST060 * VAR06 * VAR26 + CONST133 * VAR08 * VAR24 + CONST212 * VAR04 + CONST224 * VAR22)) + g_5 * (VAR14 * (CONST130 * VAR08 * z - CONST195 * VAR25) + VAR16 * (CONST195 * VAR23 - CONST222 * VAR06 * z) + y * (CONST067 * VAR08 * VAR23 + CONST200 * VAR04 * z + CONST220 * VAR06 * VAR25 - CONST284 * VAR21)) + g_6 * (CONST002 * VAR08 * (CONST201 * VAR15 * VAR26 - CONST219 * VAR17 * VAR24 + CONST267 * VAR13 + CONST299 * VAR22) + CONST004 * VAR06 * (CONST036 * VAR17 * VAR26 - CONST233 * VAR15 + CONST301 * VAR24) + CONST187 * VAR15 * VAR24 + CONST197 * VAR04 * VAR17 - CONST216 * VAR13 * VAR26 - CONST239 * VAR17 * VAR22 - CONST297 * VAR02 + CONST302 * VAR20 ) + g_7 * (CONST002 * VAR08 * (-CONST186 * VAR16 * VAR25 + CONST192 * VAR14 * z + CONST270 * VAR23 * y) + CONST004 * VAR06 * (-CONST218 * VAR16 * z + CONST270 * VAR25 * y) + CONST193 * VAR14 * VAR25 - CONST218 * VAR16 * VAR23 + CONST229 * VAR04 * y * z - CONST250 * VAR12 * z + CONST292 * VAR21 * y) + g_8 * (CONST000 * VAR20 + CONST002 * VAR08 * (CONST005 * VAR22 + CONST115 * VAR15 * VAR26 + CONST230 * VAR13 + CONST235 * VAR17 * VAR24) + CONST004 * VAR06 * ( CONST008 * VAR24 + CONST085 * VAR15 + CONST235 * VAR17 * VAR26) + CONST006 * VAR04 * (CONST009 * VAR26 + CONST278 * VAR17) + CONST015 * VAR02 + CONST024 * VAR11 + CONST085 * VAR15 * VAR24 + CONST231 * VAR13 * VAR26 + CONST278 * VAR17 * VAR22) + g_9 * (CONST245 * VAR12 * x + VAR14 * (CONST141 * VAR07 + CONST141 * VAR26 * x) + VAR16 * ( CONST131 * VAR07 * VAR26 + CONST178 * VAR05 + CONST178 * VAR24 * x) + y * (CONST045 * VAR03 + CONST046 * VAR22 * x + CONST087 * VAR05 * VAR26 + CONST088 * VAR07 * VAR24)) g_y += CONST001 * g_16 * y * (CONST160 * VAR06 * VAR25 + CONST182 * VAR08 * VAR23 + CONST228 * VAR04 * z - CONST291 * VAR21) + g_1 * (- CONST183 * VAR05 * VAR25 + CONST183 * VAR07 * VAR23 + CONST271 * VAR03 * z - CONST271 * VAR21 * x) + g_10 * (CONST252 * VAR21 * y + VAR23 * (CONST157 * VAR16 + CONST203 * VAR08 * y) + VAR25 * ( CONST140 * VAR14 + CONST202 * VAR06 * y + CONST303 * VAR08 * VAR16) + z * (CONST080 * VAR12 + CONST139 * VAR08 * VAR14 + CONST157 * VAR06 * VAR16 + CONST252 * VAR04 * y)) + g_11 * (CONST002 * VAR17 * ( CONST064 * VAR08 * VAR24 + CONST248 * VAR04 + CONST248 * VAR06 * VAR26 - CONST248 * VAR22) + CONST004 * VAR15 * (CONST082 * VAR06 + CONST225 * VAR24) + CONST006 * VAR13 * (CONST277 * VAR08 - CONST277 * VAR26) + CONST017 * VAR02 + CONST025 * VAR04 * VAR26 + CONST293 * VAR08 * VAR22 + CONST296 * VAR20) + g_12 * (CONST056 * VAR21 * y + VAR23 * (CONST171 * VAR16 + CONST257 * VAR08 * y) + VAR25 * (- CONST113 * VAR08 * VAR16 - CONST185 * VAR14 + CONST187 * VAR06 * y) + z * (CONST066 * VAR08 * VAR14 + CONST206 * VAR04 * y - CONST217 * VAR06 * VAR16)) + g_13 * (CONST002 * VAR17 * (CONST117 * VAR06 * VAR26 + CONST117 * VAR08 * VAR24 + CONST259 * VAR04 + CONST260 * VAR22) + CONST004 * VAR15 * (CONST055 * VAR06 + CONST055 * VAR24 + CONST176 * VAR08 * VAR26) + CONST018 * VAR20 + CONST019 * VAR02 + CONST249 * VAR06 * VAR24 + CONST284 * VAR04 * VAR26 + CONST285 * VAR08 * VAR22) + g_14 * (CONST001 * y * (CONST083 * VAR06 * VAR25 + CONST109 * VAR08 * VAR23 + CONST226 * VAR04 * z + CONST286 * VAR21) + CONST003 * VAR16 * (CONST114 * VAR06 * z + CONST159 * VAR08 * VAR25 - CONST269 * VAR23)) + g_15 * (CONST002 * VAR17 * (CONST039 * VAR22 - CONST163 * VAR06 * VAR26 + CONST163 * VAR08 * VAR24 + CONST279 * VAR04) + CONST020 * VAR02 + CONST237 * VAR04 * VAR26 - CONST237 * VAR08 * VAR22 + CONST300 * VAR20) + g_17 * (CONST137 * VAR06 * VAR24 + CONST170 * VAR02 + CONST170 * VAR20 + CONST215 * VAR04 * VAR26 + CONST215 * VAR08 * VAR22) + g_2 * (CONST108 * VAR22 * x * y - CONST134 * VAR05 * VAR26 * y + CONST262 * VAR07 * VAR24 * y + CONST280 * VAR03 * y) + g_3 * (CONST002 * VAR17 * (CONST103 * VAR23 * x + CONST138 * VAR07 * VAR25 - CONST205 * VAR05 * z) - CONST237 * VAR05 * VAR25 - CONST237 * VAR07 * VAR23 + CONST272 * VAR03 * z + CONST272 * VAR21 * x) + g_4 * (CONST001 * y * (CONST110 * VAR05 * VAR26 - CONST224 * VAR07 * VAR24 + CONST224 * VAR22 * x + CONST287 * VAR03) + CONST003 * VAR16 * (CONST114 * VAR24 * x + CONST159 * VAR07 * VAR26 - CONST269 * VAR05)) + g_5 * (CONST002 * VAR17 * ( CONST112 * VAR05 * z + CONST195 * VAR23 * x) + CONST004 * VAR15 * ( CONST195 * VAR07 * z - CONST195 * VAR25 * x) + CONST037 * VAR07 * VAR23 + CONST284 * VAR05 * VAR25 - CONST284 * VAR21 * x + CONST285 * VAR03 * z) + g_6 * (CONST258 * VAR03 * y + VAR05 * (CONST057 * VAR26 * y - CONST171 * VAR16) + VAR07 * (CONST113 * VAR16 * VAR26 + CONST185 * VAR14 - CONST187 * VAR24 * y) + x * (-CONST066 * VAR14 * VAR26 - CONST206 * VAR22 * y + CONST217 * VAR16 * VAR24)) + g_7 * ( CONST292 * VAR03 * z + VAR05 * (-CONST165 * VAR17 * z + CONST270 * VAR25) + VAR07 * (CONST207 * VAR15 * z + CONST223 * VAR17 * VAR25 + CONST270 * VAR23) + x * (CONST151 * VAR13 * z - CONST165 * VAR17 * VAR23 + CONST207 * VAR15 * VAR25 + CONST292 * VAR21)) + g_8 * ( CONST253 * VAR03 * y + VAR05 * (CONST156 * VAR16 + CONST202 * VAR26 * y) + VAR07 * (CONST139 * VAR14 + CONST202 * VAR24 * y + CONST303 * VAR16 * VAR26) + x * (CONST081 * VAR12 + CONST140 * VAR14 * VAR26 + CONST156 * VAR16 * VAR24 + CONST253 * VAR22 * y)) + g_9 * (CONST002 * VAR17 * (CONST211 * VAR06 * VAR26 + CONST211 * VAR08 * VAR24 + CONST263 * VAR04 + CONST263 * VAR22) + CONST004 * VAR15 * (CONST076 * VAR06 + CONST076 * VAR24 + CONST106 * VAR08 * VAR26) + CONST006 * VAR13 * (CONST273 * VAR26 + CONST274 * VAR08) + CONST031 * VAR11 + CONST032 * VAR04 * VAR26 + CONST032 * VAR08 * VAR22 + CONST033 * VAR20 + CONST040 * VAR06 * VAR24 + CONST070 * VAR02) g_z += g_0 * (CONST132 * VAR07 * VAR23 + CONST175 * VAR05 * VAR25 + CONST234 * VAR03 * z - CONST234 * VAR21 * x) + g_1 * y * (-CONST065 * VAR05 * VAR26 + CONST149 * VAR07 * VAR24 - CONST183 * VAR22 * x + CONST271 * VAR03) + g_10 * (CONST000 * VAR02 + CONST002 * VAR26 * ( CONST100 * VAR04 + CONST115 * VAR08 * VAR15 + CONST231 * VAR13 + CONST235 * VAR06 * VAR17) + CONST004 * VAR24 * (CONST008 * VAR06 + CONST086 * VAR15 + CONST236 * VAR08 * VAR17) + CONST006 * VAR22 * ( CONST005 * VAR08 + CONST282 * VAR17) + CONST015 * VAR20 + CONST027 * VAR11 + CONST086 * VAR06 * VAR15 + CONST232 * VAR08 * VAR13 + CONST282 * VAR04 * VAR17) + g_11 * (CONST161 * VAR14 * VAR25 - CONST250 * VAR12 * z + VAR16 * (CONST123 * VAR08 * VAR25 - CONST165 * VAR23 + CONST218 * VAR06 * z) + y * (CONST038 * VAR04 * z + CONST238 * VAR08 * VAR23 + CONST255 * VAR21)) + g_12 * (CONST002 * VAR26 * (CONST097 * VAR04 - CONST201 * VAR08 * VAR15 + CONST219 * VAR06 * VAR17 - CONST267 * VAR13) + CONST004 * VAR24 * (CONST233 * VAR15 + CONST283 * VAR08 * VAR17 - CONST301 * VAR06) + CONST107 * VAR17 * VAR22 - CONST187 * VAR06 * VAR15 + CONST216 * VAR08 * VAR13 + CONST239 * VAR04 * VAR17 + CONST297 * VAR20 - CONST302 * VAR02 ) + g_13 * (VAR14 * (CONST129 * VAR08 * z - CONST195 * VAR25) + VAR16 * (CONST166 * VAR06 * z + CONST177 * VAR23 - CONST222 * VAR08 * VAR25) + y * (CONST188 * VAR06 * VAR25 + CONST210 * VAR08 * VAR23 + CONST260 * VAR04 * z - CONST260 * VAR21)) + g_14 * (CONST007 * VAR02 + CONST189 * VAR20 + CONST256 * VAR06 * VAR24 + CONST269 * VAR08 * VAR22 + VAR15 * (CONST114 * VAR06 + CONST114 * VAR24 + CONST168 * VAR08 * VAR26) + VAR17 * (CONST061 * VAR08 * VAR24 + CONST133 * VAR06 * VAR26 + CONST213 * VAR22 + CONST226 * VAR04) ) + g_15 * (VAR16 * (-CONST068 * VAR06 * z + CONST099 * VAR08 * VAR25 + CONST103 * VAR23) + y * (-CONST147 * VAR08 * VAR23 + CONST205 * VAR04 * z + CONST265 * VAR21)) + g_16 * (CONST074 * VAR02 + CONST090 * VAR08 * VAR22 + CONST244 * VAR04 * VAR26 + CONST251 * VAR06 * VAR24 + CONST295 * VAR20 + VAR17 * (CONST078 * VAR22 - CONST142 * VAR06 * VAR26 + CONST142 * VAR08 * VAR24 + CONST228 * VAR04)) + g_17 * y * (CONST065 * VAR08 * VAR23 - CONST149 * VAR06 * VAR25 + CONST183 * VAR04 * z - CONST271 * VAR21 ) + g_18 * (CONST021 * VAR02 + CONST022 * VAR20 + CONST179 * VAR08 * VAR22 + CONST180 * VAR04 * VAR26 + CONST204 * VAR06 * VAR24) + g_2 * ( CONST275 * VAR03 * z + VAR05 * (CONST052 * VAR25 - CONST134 * VAR17 * z) + VAR07 * (-CONST214 * VAR23 + CONST227 * VAR17 * VAR25) + x * ( -CONST134 * VAR17 * VAR23 + CONST266 * VAR21)) + g_3 * (VAR16 * ( CONST099 * VAR07 * VAR26 - CONST205 * VAR05 + CONST241 * VAR24 * x) + y * (CONST116 * VAR05 * VAR26 - CONST163 * VAR07 * VAR24 + CONST190 * VAR22 * x + CONST272 * VAR03)) + g_4 * (CONST042 * VAR21 * x + CONST269 * VAR05 * VAR25 + CONST294 * VAR03 * z + VAR15 * (CONST053 * VAR07 * z + CONST261 * VAR25 * x) + VAR17 * (CONST121 * VAR23 * x + CONST145 * VAR05 * z + CONST154 * VAR07 * VAR25)) + g_5 * (VAR14 * (-CONST130 * VAR26 * x + CONST195 * VAR07) + VAR16 * (CONST112 * VAR05 + CONST222 * VAR24 * x) + y * (CONST091 * VAR07 * VAR24 + CONST105 * VAR22 * x + CONST242 * VAR05 * VAR26 + CONST285 * VAR03) ) + g_6 * (VAR05 * (CONST057 * VAR17 * z + CONST290 * VAR25) + VAR07 * (-CONST143 * VAR17 * VAR25 + CONST172 * VAR15 * z + CONST276 * VAR23) + x * (-CONST155 * VAR17 * VAR23 - CONST184 * VAR13 * z + CONST217 * VAR15 * VAR25 + CONST288 * VAR21)) + g_7 * ( CONST292 * VAR03 * y + VAR05 * (-CONST218 * VAR16 + CONST221 * VAR26 * y) + VAR07 * (CONST192 * VAR14 + CONST196 * VAR24 * y + CONST223 * VAR16 * VAR26) + x * (CONST124 * VAR14 * VAR26 + CONST191 * VAR16 * VAR24 + CONST229 * VAR22 * y - CONST250 * VAR12) ) + g_8 * (CONST011 * VAR03 * z + VAR05 * (CONST028 * VAR25 + CONST202 * VAR17 * z) + VAR07 * (CONST028 * VAR23 + CONST157 * VAR15 * z + CONST173 * VAR17 * VAR25) + x * (CONST011 * VAR21 + CONST156 * VAR15 * VAR25 + CONST199 * VAR13 * z + CONST202 * VAR17 * VAR23)) + g_9 * (CONST246 * VAR12 * z + VAR14 * (CONST141 * VAR08 * z + CONST141 * VAR25) + VAR16 * (CONST131 * VAR08 * VAR25 + CONST178 * VAR06 * z + CONST178 * VAR23) + y * (CONST046 * VAR04 * z + CONST046 * VAR21 + CONST087 * VAR08 * VAR23 + CONST088 * VAR06 * VAR25)) tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask= coord_row_offset + 1 < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask= coord_row_offset + 2 < coord_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_9.py
16125124-4587-4526-acdf-7107ff2c9612
FleetAttention_triton.py
Computational-Machine-Intelligence/LeetDecoding
leetDecoding/methods/FleetAttention_triton.py
1b545c2f5bacc155255250d1f70ac9484744559a
0
@triton.jit def FleetAttention_with_decay_kernel(B_ptr, C_ptr, V_ptr, gamma_ptr, ans_ptr, heads: tl.constexpr, seqlen: tl.constexpr, dim: tl.constexpr, rank: tl.constexpr, stride_vbh: tl.constexpr, stride_bbh: tl.constexpr, dim_BLOCK: tl.constexpr): rank_idx = tl.program_id(axis=0) bz = tl.program_id(axis=1) dim_block_idx = tl.program_id(axis=2) off_b = tl.arange(0, 1) off_dim = tl.arange(0, dim_BLOCK) off_gamma = tl.full((1,), bz % heads, dtype=tl.int32) cv = tl.zeros([1, dim_BLOCK], dtype=tl.float32) o = tl.zeros([1, dim_BLOCK], dtype=tl.float32) gamma = tl.load(gamma_ptr + off_gamma, mask=off_gamma < heads, other=0) for seq_idx in range(seqlen): offs_bc = bz * stride_bbh + seq_idx * rank + rank_idx + off_b[None, :] offs_v = (bz * stride_vbh + seq_idx * dim + dim_block_idx * dim_BLOCK + off_dim[None, :]) ans_ptrs = (ans_ptr + bz * stride_vbh + seq_idx * dim + dim_block_idx * dim_BLOCK + off_dim[None, :]) v_ptrs = V_ptr + offs_v b_ptr = B_ptr + offs_bc c_ptr = C_ptr + offs_bc b = tl.load(b_ptr, mask=off_b[None, :] < 1, other=0) c = tl.load(c_ptr, mask=off_b[None, :] < 1, other=0) v = tl.load(v_ptrs, mask=off_dim[None, :] < dim, other=0) cv = c * v + cv * gamma o = b * cv ans = tl.load(ans_ptrs, mask=off_dim[None, :] < dim, other=0) tl.store(ans_ptrs, ans + o, mask=off_dim[None, :] < dim)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Transposed Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/Computational-Machine-Intelligence/LeetDecoding/blob/1b545c2f5bacc155255250d1f70ac9484744559a/leetDecoding/methods/FleetAttention_triton.py
9b808ef9-4ee5-48d1-af63-3b5d5a3d4067
attn_qk_int8_per_block_h64.py
rodjjo/editorium
editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_h64.py
7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694
0
@triton.jit def _attn_fwd(Q, K, V, Q_scale, K_scale, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, HEAD_DIM: tl.constexpr, BLOCK_M: tl. constexpr, BLOCK_N: tl.constexpr, STAGE: tl.constexpr): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_z = off_hz // H off_h = off_hz % H qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64 ) * stride_qh vk_offset = qvk_offset // stride_qm q_scale_offset = off_hz * tl.cdiv(N_CTX, BLOCK_M) k_scale_offset = off_hz * tl.cdiv(N_CTX, BLOCK_N) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_k = tl.arange(0, HEAD_DIM) Q_ptrs = Q + qvk_offset + offs_m[:, None] * stride_qm + offs_k[None, : ] * stride_qk Q_scale_ptr = Q_scale + q_scale_offset + start_m K_ptrs = K + qvk_offset + offs_k[:, None] + offs_n[None, :] * stride_kn K_scale_ptr = K_scale + k_scale_offset V_ptrs = V + qvk_offset + offs_n[:, None] * stride_qm + offs_k[None, : ] * stride_qk O_block_ptr = Out + qvk_offset + offs_m[:, None] * stride_qm + offs_k[ None, :] * stride_qk m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0 acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) q = tl.load(Q_ptrs, mask=offs_m[:, None] < N_CTX) q_scale = tl.load(Q_scale_ptr) acc, l_i = _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs, K_scale_ptr, V_ptrs, start_m, BLOCK_M, HEAD_DIM, BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX) acc = acc / l_i[:, None] tl.store(O_block_ptr, acc.to(Out.type.element_ty), mask=offs_m[:, None] < N_CTX)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_h64.py
2a6747c9-0f4a-4d02-8dbd-aa17a83f609c
layernorm.py
dame-cell/Triformer
triformer/layernorm.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def layernorm_backward(dY, dY_row_stride, X, X_row_stride, W, b, r, mu, n_cols, eps, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx mu += row_idx dY_row = tl.load(dY + col_offsets, mask=mask, other=0).to(tl.float32) X_row = tl.load(X + col_offsets, mask=mask, other=0).to(tl.float32) W_row = tl.load(W + col_offsets, mask=mask, other=0).to(tl.float32) b_row = tl.load(b + col_offsets, mask=mask, other=0).to(tl.float32) inv_var = tl.load(r).to(tl.float32) mean = tl.load(mu).to(tl.float32) normed = (X_row - mean) * inv_var dY_W = dY_row * W_row dX_row = dY_W - tl.sum(dY_W, axis=0) / n_cols - normed * tl.sum(dY_W * normed, axis=0) / n_cols dX_row = dX_row * inv_var tl.store(dY + col_offsets, dX_row, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/layernorm.py
c2355f8d-2629-4b64-9d2f-7bb6c69f238d
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def store_full_1d(vals, ptr, sz: tl.constexpr, stride=1): """Store 1d block into vector (defined by ptr)""" offs = get_1d_offest(sz) mask = get_1d_mask(offs, sz) tl.store(ptr + offs, vals, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
ef1a3b80-243d-4b09-9484-b0a123fda695
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/sum/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_NON_REDUCE_DIM': b_nr, 'BLOCK_SIZE_REDUCE_DIM': b_r}, num_warps=w) for b_nr, b_r, w in itertools.product([2, 4, 8, 16], [2, 4, 8, 16], [2, 4, 8])], key=['M', 'N'] ) @triton.jit def triton_sum_kernel_1D_result_sum_then_buffer(input_ptr, output_ptr, M, N, BLOCK_SIZE_NON_REDUCE_DIM: tl.constexpr, BLOCK_SIZE_REDUCE_DIM: tl. constexpr, dim: tl.constexpr): """ Sum blocks of input using Triton and store in buffer """ pid = tl.program_id(axis=0) reduce_dim_len = M if dim == 0 else N non_reduce_dim_len = N if dim == 0 else M buffer = tl.zeros((1, BLOCK_SIZE_NON_REDUCE_DIM), dtype=tl.float32) block_start_non_reduce_dim = pid * BLOCK_SIZE_NON_REDUCE_DIM offsets_non_reduce_dim = block_start_non_reduce_dim + tl.arange(0, BLOCK_SIZE_NON_REDUCE_DIM) mask_non_reduce_dim = offsets_non_reduce_dim < non_reduce_dim_len for block_start_reduce_dim in range(0, reduce_dim_len, BLOCK_SIZE_REDUCE_DIM): offsets_reduce_dim = block_start_reduce_dim + tl.arange(0, BLOCK_SIZE_REDUCE_DIM) mask_reduce_dim = offsets_reduce_dim < reduce_dim_len idxs, mask = None, None if dim == 0: idxs = offsets_reduce_dim[:, None ] * non_reduce_dim_len + offsets_non_reduce_dim mask = mask_reduce_dim[:, None] & mask_non_reduce_dim elif dim == 1: idxs = offsets_non_reduce_dim[:, None ] * reduce_dim_len + offsets_reduce_dim mask = mask_non_reduce_dim[:, None] & mask_reduce_dim input = tl.load(input_ptr + idxs, mask=mask, other=mask) buffer += tl.sum(input, axis=dim) buffer_view = buffer.reshape((BLOCK_SIZE_NON_REDUCE_DIM,)) tl.store(output_ptr + offsets_non_reduce_dim, buffer_view, mask= mask_non_reduce_dim)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py
377b8e8a-7e71-4aa1-89e1-79323130a64c
test_autodiff.py
srush/triton-autodiff
tests/test_autodiff.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def ub2(X, Y): r = tl.arange(0, 16) r2 = tl.arange(0, 32) x = tl.load(X + 16 * r2[:, None] + r) y = triton_unbroadcast(x, tl.arange(0, 32)[:, None].shape) tl.store(Y + r2[:, None], y)
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py
bc5f4d34-0861-44b0-9b72-4824516d105b
causal_product.py
calclavia/Triton-Transformer
ttx/attention/causal_product.py
d1d1e5b5651cf7959866b0198d90a665e1f45354
0
@triton.jit def causal_product_kernel(q_ptr, k_ptr, v_ptr, output_ptr, batch, length, dim, vdim, **meta): BLOCK_SIZE = meta['BLOCK_SIZE'] pid = tl.program_id(axis=0) state = tl.zeros((BLOCK_SIZE, BLOCK_SIZE), dtype=tl.float32) cur_qk_pos = pid * length * dim cur_v_pos = pid * length * vdim dim_ptrs = tl.arange(0, BLOCK_SIZE) qk_mask = dim_ptrs < dim v_mask = dim_ptrs < vdim for _ in range(0, length, 1): qk_row_offsets = cur_qk_pos + dim_ptrs v_row_offsets = cur_v_pos + dim_ptrs k = tl.load(k_ptr + qk_row_offsets, mask=qk_mask, other=0) v = tl.load(v_ptr + v_row_offsets, mask=v_mask, other=0) context = tl.dot(k[:, None], v[None, :]) state += context q = tl.load(q_ptr + qk_row_offsets, mask=qk_mask, other=0) output = tl.dot(q[None, :], state) tl.store(output_ptr + v_row_offsets[None, :], output, mask=v_mask[ None, :]) cur_qk_pos += dim cur_v_pos += vdim
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/calclavia/Triton-Transformer/blob/d1d1e5b5651cf7959866b0198d90a665e1f45354/ttx/attention/causal_product.py
2cccc53e-fdd8-4955-b9a0-e935a64f8577
bucketed_argmax.py
graphcore-research/pytorch-approx-topk
approx_topk/experimental/bucketed_argmax.py
339eea971f17bf810e2eec746a06b9c93dc4cce0
0
@triton.jit def _topk_triton_kernel__parallel_bkn(xs_ptr, values_out_ptr, indices_out_ptr, xs_stride: int, n_stride: int, b: int, k: int, n: int, BLOCK_BK: tl.constexpr, BLOCK_N: tl.constexpr, PAD_VALUE: tl.constexpr, INTERLEAVED: tl.constexpr): idx = tl.program_id(axis=0) * BLOCK_BK + tl.arange(0, BLOCK_BK) b_idx, k_idx = idx // k, idx % k if INTERLEAVED: k_stride, i_stride = 1, k else: k_stride, i_stride = n_stride, 1 ni = tl.arange(0, BLOCK_N) n_idx = k_idx[:, None] * k_stride + ni[None, :] * i_stride data = tl.load(xs_ptr + b_idx[:, None] * xs_stride + n_idx, mask=(b_idx [:, None] < b) & (n_idx < n) & (ni < n_stride), other=PAD_VALUE) max_value, max_i = tl.max(data, axis=1, return_indices=True) max_index = k_idx * k_stride + max_i * i_stride tl.store(values_out_ptr + b_idx * k + k_idx, max_value, mask=b_idx < b) tl.store(indices_out_ptr + b_idx * k + k_idx, max_index, mask=b_idx < b)
{ "Data Type": [ "fp32" ], "Functionality": [ "Top-K Selection" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Batch-Oriented" ] }
[ "MIT" ]
https://github.com/graphcore-research/pytorch-approx-topk/blob/339eea971f17bf810e2eec746a06b9c93dc4cce0/approx_topk/experimental/bucketed_argmax.py
7924e701-dd48-4f1d-bf9f-b59ffeb4ff7a
ln_linear_triton.py
ethansmith2000/fused-layer-norm
ln_linear_triton.py
84fe243a829364acdcfd7cd70b699db04838af0f
0
@triton.jit def _layer_norm_bwd_dx_fused(DX, DY, DSc, DSh, Y, Sc, Sh, Mean, Rstd, Lock, stride, N, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr): row = tl.program_id(0) cols = tl.arange(0, BLOCK_SIZE_N) mask = cols < N Y += row * stride DY += row * stride DX += row * stride lock_id = row % GROUP_SIZE_M Lock += lock_id Count = Lock + GROUP_SIZE_M DSc = DSc + lock_id * N + cols DSh = DSh + lock_id * N + cols y = tl.load(Y + cols, mask=mask, other=0).to(tl.float32) dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) sc = tl.load(Sc + cols, mask=mask).to(tl.float32) sh = tl.load(Sh + cols, mask=mask).to(tl.float32) rstd = tl.load(Rstd + row) xhat = (y - sh) / sc scdy = sc * dy xhat = tl.where(mask, xhat, 0.0) scdy = tl.where(mask, scdy, 0.0) c1 = tl.sum(xhat * scdy, axis=0) / N c2 = tl.sum(scdy, axis=0) / N dx = (scdy - (xhat * c1 + c2)) * rstd tl.store(DX + cols, dx, mask=mask) partial_dsc = (dy * xhat).to(sc.dtype) partial_dsh = dy.to(sc.dtype) while tl.atomic_cas(Lock, 0, 1) == 1: pass count = tl.load(Count) if count == 0: tl.atomic_xchg(Count, 1) else: partial_dsc += tl.load(DSc, mask=mask) partial_dsh += tl.load(DSh, mask=mask) tl.store(DSc, partial_dsc, mask=mask) tl.store(DSh, partial_dsh, mask=mask) tl.atomic_xchg(Lock, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/ethansmith2000/fused-layer-norm/blob/84fe243a829364acdcfd7cd70b699db04838af0f/ln_linear_triton.py
62aa8411-9a66-4488-be2f-bd3fdaec1510
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BV', 'BT']) @triton.jit def chunk_gla_bwd_kernel_dA(v, do, dA, offsets, indices, scale, T: tl. constexpr, H: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BV: tl. constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) else: bos, eos = i_b * T, i_b * T + T T = eos - bos b_dA = tl.zeros([BT, BT], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): if HEAD_FIRST: p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * T * V, (V, T), (1, V), (i_v * BV, i_t * BT), (BV, BT), (0, 1)) else: p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (V, T), (1, H * V), (i_v * BV, i_t * BT), (BV, BT), (0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dA += tl.dot(b_do, b_v) if HEAD_FIRST: p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) else: p_dA = tl.make_block_ptr(dA + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) m_s = tl.arange(0, BT)[:, None] >= tl.arange(0, BT)[None, :] b_dA = tl.where(m_s, b_dA * scale, 0.0) tl.store(p_dA, b_dA.to(p_dA.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
7f214f7f-42e1-4445-be83-9e13d10d6055
fused_kl_div.py
sustcsonglin/flash-linear-attention
fla/modules/fused_kl_div.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def kl_div_kernel(logits, target_logits, loss, s_logits, s_loss, reduction: tl.constexpr, N: tl.constexpr, V: tl.constexpr, BV: tl.constexpr): i_n = tl.program_id(0).to(tl.int64) logits += i_n * s_logits target_logits += i_n * s_logits sm, tm = float('-inf'), float('-inf') sd, td = 0.0, 0.0 NV = tl.cdiv(V, BV) for iv in range(0, NV): o_x = iv * BV + tl.arange(0, BV) b_sl = tl.load(logits + o_x, mask=o_x < V, other=float('-inf')) b_sm = tl.max(b_sl) m_new = tl.maximum(sm, b_sm) sd = sd * tl.exp(sm - m_new) + tl.sum(tl.exp(b_sl - m_new)) sm = m_new b_tl = tl.load(target_logits + o_x, mask=o_x < V, other=float('-inf')) b_tm = tl.max(b_tl) m_new = tl.maximum(tm, b_tm) td = td * tl.exp(tm - m_new) + tl.sum(tl.exp(b_tl - m_new)) tm = m_new b_loss = 0.0 for iv in range(0, NV): o_x = iv * BV + tl.arange(0, BV) b_sl = tl.load(logits + o_x, mask=o_x < V, other=float('-inf')) b_tl = tl.load(target_logits + o_x, mask=o_x < V, other=float('-inf')) b_sp_log = b_sl - sm - tl.log(sd) b_tp_log = b_tl - tm - tl.log(td) b_sp = tl.exp(b_sp_log) b_tp = tl.exp(b_tp_log) b_kl = tl.where(o_x < V, b_tp * (b_tp_log - b_sp_log), 0) b_dl = -b_tp + b_sp b_loss += tl.sum(b_kl) if reduction == 'batchmean': b_dl = b_dl / N tl.store(logits + o_x, b_dl, mask=o_x < V) if reduction == 'batchmean': b_loss = b_loss / N tl.store(loss + i_n * s_loss, b_loss)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_kl_div.py
b8cbf987-fa5e-4eb6-a8ce-1e45c7cfcc13
layer_norm.py
jiweibo/MMA
bench/layer_norm.py
f8df6f8e3e9095110b651c31b081e39b2713a7c9
0
@triton.jit def _layer_norm_fwd_fused(Out, A, Weight, Bias, Mean, Rstd, stride, N, eps, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) Out += row * stride A += row * stride mean = 0 _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) a = tl.load(A + cols, mask=cols < N, other=0.0, eviction_policy= 'evict_last').to(tl.float32) _mean += a mean = tl.sum(_mean, axis=0) / N _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) a = tl.load(A + cols, mask=cols < N, other=0.0, eviction_policy= 'evict_last').to(tl.float32) a = tl.where(cols < N, a - mean, 0.0) _var += a * a var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) tl.store(Mean + row, mean) tl.store(Rstd + row, rstd) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N weight = tl.load(Weight + cols, mask=mask) bias = tl.load(Bias + cols, mask=mask) a = tl.load(A + cols, mask=mask, other=0.0, eviction_policy= 'evict_first').to(tl.float32) a_hat = (a - mean) * rstd out = a_hat * weight + bias tl.store(Out + cols, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/jiweibo/MMA/blob/f8df6f8e3e9095110b651c31b081e39b2713a7c9/bench/layer_norm.py
ecdab7b6-1233-4dcc-807a-beba3c0d7bbb
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/simple_gla/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def parallel_simple_gla_bwd_kernel_dq(i_bh, i_t, i_k, i_v, i_kv, q, k, v, g, do, dq, dg, s_k_h, s_k_t, s_v_h, s_v_t, scale, B: tl.constexpr, H: tl. constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl. constexpr, BS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr): p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dq = tl.zeros([BT, BK], dtype=tl.float32) for i_s in range(0, i_t * BT, BS): p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_s, i_k * BK), (BS, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (1, s_v_t), (i_v * BV, i_s), (BV, BS), (0, 1)) p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_g = tl.load(p_g, boundary_check=(0,)) b_gn = tl.load(g + i_bh * T + min(i_s + BS, T) - 1) b_gp = tl.load(g + i_bh * T + i_s - 1) if i_s % BT > 0 else 0.0 b_ds = tl.dot(b_do, b_v, allow_tf32=False) * tl.exp(b_gn - b_g)[None, : ] if i_s > 0: b_dq *= tl.exp(b_gn - b_gp) b_dq += tl.dot(b_ds.to(b_v.dtype), b_k, allow_tf32=False) p_gq = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,) ) b_gq = tl.load(p_gq, boundary_check=(0,)) b_dq *= tl.exp(b_gq)[:, None] * scale o_q = i_t * BT + tl.arange(0, BT) o_k = i_t * BT + tl.arange(0, BS) for i_s in range(i_t * BT, min((i_t + 1) * BT, T), BS): p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_s, i_k * BK), (BS, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (1, s_v_t), (i_v * BV, i_s), (BV, BS), (0, 1)) p_gk = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0,)) m_s = o_q[:, None] >= o_k[None, :] b_ds = tl.where(m_s, tl.dot(b_do, b_v, allow_tf32=False) * tl.exp( b_gq[:, None] - b_gk[None, :]), 0) * scale b_dq += tl.dot(b_ds.to(b_k.dtype), b_k, allow_tf32=False) o_k += BS p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dq = tl.make_block_ptr(dq + (i_v * B * H + i_bh) * s_k_h, (T, K), ( s_k_t, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dg = tl.make_block_ptr(dg + (i_kv * B * H + i_bh) * T, (T,), (1,), ( i_t * BT,), (BT,), (0,)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_dg = tl.sum(b_dq * b_q, 1) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/parallel.py
d1e0e5b8-b819-414f-9789-a449f97c5c56
kernels.py
ShenzheZhu/sparse_autoencoder
sparse_autoencoder/kernels.py
afef049c905fda5b0f69729127ce0d3a42399152
0
@triton.jit def triton_add_mul_kernel(x_ptr, a_ptr, b_ptr, c, stride_x0, stride_x1, stride_a0, stride_a1, stride_b0, stride_b1, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, M: tl.constexpr, N: tl.constexpr): pid_m = tl.program_id(0) pid_n = tl.program_id(1) offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N x = tl.load(x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1, mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N)) a = tl.load(a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1, mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N)) b = tl.load(b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1, mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N)) x_dtype = x.dtype x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype ) tl.store(x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1, x, mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/ShenzheZhu/sparse_autoencoder/blob/afef049c905fda5b0f69729127ce0d3a42399152/sparse_autoencoder/kernels.py
f8a04198-8560-4bd6-84b9-8bb4719d3310
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def dense_jagged_cat_jagged_out_kernel(a_ptr, b_ptr, c_ptr, b_offsets_ptr, c_offsets_ptr, max_seq_len, BLOCK_SIZE: tl.constexpr): pid_batch = tl.program_id(0) b_start = tl.load(b_offsets_ptr + pid_batch) b_end = tl.load(b_offsets_ptr + pid_batch + 1) c_start = b_start + pid_batch N = b_end - b_start N = tl.minimum(N, max_seq_len) a = tl.load(a_ptr + pid_batch) tl.store(c_ptr + c_start, a) offs_k = tl.arange(0, BLOCK_SIZE) for k in range(0, N, BLOCK_SIZE): b_offset = k + offs_k b_ptrs = b_ptr + b_start + b_offset b = tl.load(b_ptrs, mask=b_offset < N, other=0.0) tl.store(c_ptr + c_start + 1 + b_offset, b, mask=b_offset < N) tl.store(c_offsets_ptr + pid_batch, b_start + pid_batch)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
9c3d59d9-a8a8-42ff-a0c4-03f9a9c1683a
test_autodiff.py
srush/triton-autodiff
tests/test_autodiff.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def tr1(X, Y): r = tl.arange(0, 16) x = tl.load(X + r) y = comp2tt(x) tl.store(Y + 16 * r[:, None] + r, y)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py
bf9fed28-eaf5-46e1-8969-eec1c9a5c2f7
06-fused-attention.py
2lambda123/triton
python/tutorials/06-fused-attention.py
09e27725b89043a07f49c440db6a9aedcfba8432
0
@triton.jit def max_fn(x, y): return tl.math.max(x, y)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/2lambda123/triton/blob/09e27725b89043a07f49c440db6a9aedcfba8432/python/tutorials/06-fused-attention.py
313cbd67-cc75-4672-a355-e8c80754facc
ops.py
shawntan/scattermoe
scattermoe/kernels/ops.py
63b76a2f5f28c052fb4cd7c34479a54158354052
0
@triton.autotune(configs=_config_grouping(), key=['K']) @triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] == 0} ) @triton.jit def _group(src_ptr, stride_sn, stride_sk, has_coeff: tl.constexpr, coeff_ptr, FAN_OUT: tl.constexpr, tgt_ptr, stride_tn, stride_ti, grouped_idx_ptr, N, K: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl .constexpr, NO_K_MASK: tl.constexpr): pid = tl.program_id(axis=0) N_block_id = pid N_blk = N_block_id * BLOCK_N + tl.arange(0, BLOCK_N) N_mask = N_blk < N N_blk = tl.max_contiguous(tl.multiple_of(N_blk % N, BLOCK_N), BLOCK_N) N_idx = tl.load(grouped_idx_ptr + N_blk, mask=N_mask, other=0) K_blk = tl.arange(0, BLOCK_K) src_blk_ptrs = src_ptr + (N_idx // FAN_OUT)[:, None] * stride_sn + K_blk[ None, :] * stride_sk tgt_blk_ptrs = tgt_ptr + N_blk[:, None] * stride_tn + K_blk[None, : ] * stride_ti if has_coeff: c = tl.load(coeff_ptr + N_idx, mask=N_mask)[:, None] iters = tl.cdiv(K, BLOCK_K) for i in range(0, iters): if NO_K_MASK or i < iters - 1: block = tl.load(src_blk_ptrs, mask=N_mask[:, None]) if has_coeff: block *= c tl.store(tgt_blk_ptrs, block, mask=N_mask[:, None]) else: K_mask = i * BLOCK_K + K_blk < K mask = N_mask[:, None] & K_mask[None, :] block = tl.load(src_blk_ptrs, mask=mask) if has_coeff: block *= c tl.store(tgt_blk_ptrs, block, mask=mask) src_blk_ptrs += BLOCK_K * stride_sk tgt_blk_ptrs += BLOCK_K * stride_ti
{ "Data Type": [], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py
e9ec4ca4-67b0-4b4c-ae46-1b511c456193
_semi_structured_conversions.py
huyz2023/2by4-pretrain
sparse/_semi_structured_conversions.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.autotune(configs=get_configs(), key=['m', 'k']) @triton.jit def _sparse_semi_structured_from_dense_triton_8(dense_ptr, sparse_ptr, meta_reordered_ptr, mask_ptr, dense_row_stride, sparse_row_stride, mask_row_stride, dense_col_stride, sparse_col_stride, mask_col_stride, m, k, seed, BLOCK_SIZE: tl.constexpr, PRUNE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr): if ARRAY_LAYOUT == 'row': row_idx = tl.program_id(0) col_idx = tl.program_id(1) * 32 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE ) * 32 mask = col_idx < k elif ARRAY_LAYOUT == 'col': row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE col_idx = tl.program_id(1) * 32 mask = row_idx < m dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 0) * dense_col_stride, mask=mask) dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 1) * dense_col_stride, mask=mask) dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 2) * dense_col_stride, mask=mask) dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 3) * dense_col_stride, mask=mask) dense_44 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 4) * dense_col_stride, mask=mask) dense_45 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 5) * dense_col_stride, mask=mask) dense_46 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 6) * dense_col_stride, mask=mask) dense_47 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 7) * dense_col_stride, mask=mask) dense_48 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 8) * dense_col_stride, mask=mask) dense_49 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 9) * dense_col_stride, mask=mask) dense_4A = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 10) * dense_col_stride, mask=mask) dense_4B = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 11) * dense_col_stride, mask=mask) dense_4C = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 12) * dense_col_stride, mask=mask) dense_4D = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 13) * dense_col_stride, mask=mask) dense_4E = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 14) * dense_col_stride, mask=mask) dense_4F = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 15) * dense_col_stride, mask=mask) dense_4G = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 16) * dense_col_stride, mask=mask) dense_4H = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 17) * dense_col_stride, mask=mask) dense_4I = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 18) * dense_col_stride, mask=mask) dense_4J = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 19) * dense_col_stride, mask=mask) dense_4K = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 20) * dense_col_stride, mask=mask) dense_4L = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 21) * dense_col_stride, mask=mask) dense_4M = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 22) * dense_col_stride, mask=mask) dense_4N = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 23) * dense_col_stride, mask=mask) dense_4O = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 24) * dense_col_stride, mask=mask) dense_4P = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 25) * dense_col_stride, mask=mask) dense_4Q = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 26) * dense_col_stride, mask=mask) dense_4R = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 27) * dense_col_stride, mask=mask) dense_4S = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 28) * dense_col_stride, mask=mask) dense_4T = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 29) * dense_col_stride, mask=mask) dense_4U = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 30) * dense_col_stride, mask=mask) dense_4V = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 31) * dense_col_stride, mask=mask) if PRUNE == 'mse': (_dense_40, _dense_41, _dense_42, _dense_43, _dense_44, _dense_45, _dense_46, _dense_47, _dense_48, _dense_49, _dense_4A, _dense_4B, _dense_4C, _dense_4D, _dense_4E, _dense_4F) = ( dense_40, dense_41, dense_42, dense_43, dense_44, dense_45, dense_46, dense_47, dense_48, dense_49, dense_4A, dense_4B, dense_4C, dense_4D, dense_4E, dense_4F) (_dense_4G, _dense_4H, _dense_4I, _dense_4J, _dense_4K, _dense_4L, _dense_4M, _dense_4N, _dense_4O, _dense_4P, _dense_4Q, _dense_4R, _dense_4S, _dense_4T, _dense_4U, _dense_4V) = ( dense_4G, dense_4H, dense_4I, dense_4J, dense_4K, dense_4L, dense_4M, dense_4N, dense_4O, dense_4P, dense_4Q, dense_4R, dense_4S, dense_4T, dense_4U, dense_4V) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_40) > tl.abs(_dense_41), tl.abs( _dense_40) > tl.abs(_dense_42), tl.abs(_dense_40) > tl.abs( _dense_43), tl.abs(_dense_41) > tl.abs(_dense_42), tl.abs(_dense_41 ) > tl.abs(_dense_43), tl.abs(_dense_42) > tl.abs(_dense_43) m0, m1, m2, m3 = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_44) > tl.abs(_dense_45), tl.abs( _dense_44) > tl.abs(_dense_46), tl.abs(_dense_44) > tl.abs( _dense_47), tl.abs(_dense_45) > tl.abs(_dense_46), tl.abs(_dense_45 ) > tl.abs(_dense_47), tl.abs(_dense_46) > tl.abs(_dense_47) m4, m5, m6, m7 = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_48) > tl.abs(_dense_49), tl.abs( _dense_48) > tl.abs(_dense_4A), tl.abs(_dense_48) > tl.abs( _dense_4B), tl.abs(_dense_49) > tl.abs(_dense_4A), tl.abs(_dense_49 ) > tl.abs(_dense_4B), tl.abs(_dense_4A) > tl.abs(_dense_4B) m8, m9, mA, mB = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4C) > tl.abs(_dense_4D), tl.abs( _dense_4C) > tl.abs(_dense_4E), tl.abs(_dense_4C) > tl.abs( _dense_4F), tl.abs(_dense_4D) > tl.abs(_dense_4E), tl.abs(_dense_4D ) > tl.abs(_dense_4F), tl.abs(_dense_4E) > tl.abs(_dense_4F) mC, mD, mE, mF = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4G) > tl.abs(_dense_4H), tl.abs( _dense_4G) > tl.abs(_dense_4I), tl.abs(_dense_4G) > tl.abs( _dense_4J), tl.abs(_dense_4H) > tl.abs(_dense_4I), tl.abs(_dense_4H ) > tl.abs(_dense_4J), tl.abs(_dense_4I) > tl.abs(_dense_4J) mG, mH, mI, mJ = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4K) > tl.abs(_dense_4L), tl.abs( _dense_4K) > tl.abs(_dense_4M), tl.abs(_dense_4K) > tl.abs( _dense_4N), tl.abs(_dense_4L) > tl.abs(_dense_4M), tl.abs(_dense_4L ) > tl.abs(_dense_4N), tl.abs(_dense_4M) > tl.abs(_dense_4N) mK, mL, mM, mN = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4O) > tl.abs(_dense_4P), tl.abs( _dense_4O) > tl.abs(_dense_4Q), tl.abs(_dense_4O) > tl.abs( _dense_4R), tl.abs(_dense_4P) > tl.abs(_dense_4Q), tl.abs(_dense_4P ) > tl.abs(_dense_4R), tl.abs(_dense_4Q) > tl.abs(_dense_4R) mO, mP, mQ, mR = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) x1, x2, x3, x4, x5, x6 = tl.abs(_dense_4S) > tl.abs(_dense_4T), tl.abs( _dense_4S) > tl.abs(_dense_4U), tl.abs(_dense_4S) > tl.abs( _dense_4V), tl.abs(_dense_4T) > tl.abs(_dense_4U), tl.abs(_dense_4T ) > tl.abs(_dense_4V), tl.abs(_dense_4U) > tl.abs(_dense_4V) mS, mT, mU, mV = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~ x6 | ~x5 & ~x6) elif PRUNE == 'mask': m0 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 0) * mask_col_stride, mask=mask).to(tl.int1) m1 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 1) * mask_col_stride, mask=mask).to(tl.int1) m2 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 2) * mask_col_stride, mask=mask).to(tl.int1) m3 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 3) * mask_col_stride, mask=mask).to(tl.int1) m4 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 4) * mask_col_stride, mask=mask).to(tl.int1) m5 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 5) * mask_col_stride, mask=mask).to(tl.int1) m6 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 6) * mask_col_stride, mask=mask).to(tl.int1) m7 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 7) * mask_col_stride, mask=mask).to(tl.int1) m8 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 8) * mask_col_stride, mask=mask).to(tl.int1) m9 = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 9) * mask_col_stride, mask=mask).to(tl.int1) mA = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 10) * mask_col_stride, mask=mask).to(tl.int1) mB = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 11) * mask_col_stride, mask=mask).to(tl.int1) mC = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 12) * mask_col_stride, mask=mask).to(tl.int1) mD = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 13) * mask_col_stride, mask=mask).to(tl.int1) mE = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 14) * mask_col_stride, mask=mask).to(tl.int1) mF = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 15) * mask_col_stride, mask=mask).to(tl.int1) mG = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 16) * mask_col_stride, mask=mask).to(tl.int1) mH = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 17) * mask_col_stride, mask=mask).to(tl.int1) mI = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 18) * mask_col_stride, mask=mask).to(tl.int1) mJ = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 19) * mask_col_stride, mask=mask).to(tl.int1) mK = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 20) * mask_col_stride, mask=mask).to(tl.int1) mL = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 21) * mask_col_stride, mask=mask).to(tl.int1) mM = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 22) * mask_col_stride, mask=mask).to(tl.int1) mN = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 23) * mask_col_stride, mask=mask).to(tl.int1) mO = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 24) * mask_col_stride, mask=mask).to(tl.int1) mP = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 25) * mask_col_stride, mask=mask).to(tl.int1) mQ = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 26) * mask_col_stride, mask=mask).to(tl.int1) mR = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 27) * mask_col_stride, mask=mask).to(tl.int1) mS = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 28) * mask_col_stride, mask=mask).to(tl.int1) mT = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 29) * mask_col_stride, mask=mask).to(tl.int1) mU = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 30) * mask_col_stride, mask=mask).to(tl.int1) mV = tl.load(mask_ptr + row_idx * mask_row_stride + (col_idx + 31) * mask_col_stride, mask=mask).to(tl.int1) elif PRUNE == 'mvue': if ARRAY_LAYOUT == 'row': seed0 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 seed1 = seed + (tl.program_id(0) + tl.program_id(1) * m) * 2 + 1 else: seed0 = seed + (tl.program_id(0) * k // 32 + tl.program_id(1)) * 2 seed1 = seed + (tl.program_id(0) * k // 32 + tl.program_id(1) ) * 2 + 1 random0, random1, random2, random3 = tl.rand4x(seed0, tl.arange(0, BLOCK_SIZE), n_rounds=5) random4, random5, random6, random7 = tl.rand4x(seed1, tl.arange(0, BLOCK_SIZE), n_rounds=5) dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = ( _MVUE24_approx(dense_40, dense_41, dense_42, dense_43, random0, random1)) dense_44, dense_45, dense_46, dense_47, m4, m5, m6, m7 = ( _MVUE24_approx(dense_44, dense_45, dense_46, dense_47, random2, random3)) dense_48, dense_49, dense_4A, dense_4B, m8, m9, mA, mB = ( _MVUE24_approx(dense_48, dense_49, dense_4A, dense_4B, random4, random5)) dense_4C, dense_4D, dense_4E, dense_4F, mC, mD, mE, mF = ( _MVUE24_approx(dense_4C, dense_4D, dense_4E, dense_4F, random6, random7)) else: m0 = dense_40 != 0 m1 = dense_41 != 0 m2 = dense_42 != 0 m3 = dense_43 != 0 m4 = dense_44 != 0 m5 = dense_45 != 0 m6 = dense_46 != 0 m7 = dense_47 != 0 m8 = dense_48 != 0 m9 = dense_49 != 0 mA = dense_4A != 0 mB = dense_4B != 0 mC = dense_4C != 0 mD = dense_4D != 0 mE = dense_4E != 0 mF = dense_4F != 0 mG = dense_4G != 0 mH = dense_4H != 0 mI = dense_4I != 0 mJ = dense_4J != 0 mK = dense_4K != 0 mL = dense_4L != 0 mM = dense_4M != 0 mN = dense_4N != 0 mO = dense_4O != 0 mP = dense_4P != 0 mQ = dense_4Q != 0 mR = dense_4R != 0 mS = dense_4S != 0 mT = dense_4T != 0 mU = dense_4U != 0 mV = dense_4V != 0 bit0 = ~m0 & m1 bit1 = ~m0 & ~m1 bit2 = bit1 | ~m2 bit3 = bit0 | ~m1 | m2 idxs0 = bit0 | bit1.to(tl.int64) << 1 idxs1 = bit2 | bit3.to(tl.int64) << 1 sparse0 = tl.where(bit1, tl.where(bit0, dense_43, dense_42), tl.where( bit0, dense_41, dense_40)) sparse1 = tl.where(bit3, tl.where(bit2, dense_43, dense_42), tl.where( bit2, dense_41, dense_40)) bit4 = ~m4 & m5 bit5 = ~m4 & ~m5 bit6 = bit5 | ~m6 bit7 = bit4 | ~m5 | m6 idxs2 = bit4 | bit5.to(tl.int64) << 1 idxs3 = bit6 | bit7.to(tl.int64) << 1 sparse2 = tl.where(bit5, tl.where(bit4, dense_47, dense_46), tl.where( bit4, dense_45, dense_44)) sparse3 = tl.where(bit7, tl.where(bit6, dense_47, dense_46), tl.where( bit6, dense_45, dense_44)) bit8 = ~m8 & m9 bit9 = ~m8 & ~m9 bitA = bit9 | ~mA bitB = bit8 | ~m9 | mA idxs4 = bit8 | bit9.to(tl.int64) << 1 idxs5 = bitA | bitB.to(tl.int64) << 1 sparse4 = tl.where(bit9, tl.where(bit8, dense_4B, dense_4A), tl.where( bit8, dense_49, dense_48)) sparse5 = tl.where(bitB, tl.where(bitA, dense_4B, dense_4A), tl.where( bitA, dense_49, dense_48)) bitC = ~mC & mD bitD = ~mC & ~mD bitE = bitD | ~mE bitF = bitC | ~mD | mE idxs6 = bitC | bitD.to(tl.int64) << 1 idxs7 = bitE | bitF.to(tl.int64) << 1 sparse6 = tl.where(bitD, tl.where(bitC, dense_4F, dense_4E), tl.where( bitC, dense_4D, dense_4C)) sparse7 = tl.where(bitF, tl.where(bitE, dense_4F, dense_4E), tl.where( bitE, dense_4D, dense_4C)) bitG = ~mG & mH bitH = ~mG & ~mH bitI = bitH | ~mI bitJ = bitG | ~mH | mI idxs8 = bitG | bitH.to(tl.int64) << 1 idxs9 = bitI | bitJ.to(tl.int64) << 1 sparse8 = tl.where(bitH, tl.where(bitG, dense_4J, dense_4I), tl.where( bitG, dense_4H, dense_4G)) sparse9 = tl.where(bitJ, tl.where(bitI, dense_4J, dense_4I), tl.where( bitI, dense_4H, dense_4G)) bitK = ~mK & mL bitL = ~mK & ~mL bitM = bitL | ~mM bitN = bitK | ~mL | mM idxsA = bitK | bitL.to(tl.int64) << 1 idxsB = bitM | bitN.to(tl.int64) << 1 sparseA = tl.where(bitL, tl.where(bitK, dense_4N, dense_4M), tl.where( bitK, dense_4L, dense_4K)) sparseB = tl.where(bitN, tl.where(bitM, dense_4N, dense_4M), tl.where( bitM, dense_4L, dense_4K)) bitO = ~mO & mP bitP = ~mO & ~mP bitQ = bitP | ~mQ bitR = bitO | ~mP | mQ idxsC = bitO | bitP.to(tl.int64) << 1 idxsD = bitQ | bitR.to(tl.int64) << 1 sparseC = tl.where(bitP, tl.where(bitO, dense_4R, dense_4Q), tl.where( bitO, dense_4P, dense_4O)) sparseD = tl.where(bitR, tl.where(bitQ, dense_4R, dense_4Q), tl.where( bitQ, dense_4P, dense_4O)) bitS = ~mS & mT bitT = ~mS & ~mT bitU = bitT | ~mU bitV = bitS | ~mT | mU idxsE = bitS | bitT.to(tl.int64) << 1 idxsF = bitU | bitV.to(tl.int64) << 1 sparseE = tl.where(bitT, tl.where(bitS, dense_4V, dense_4U), tl.where( bitS, dense_4T, dense_4S)) sparseF = tl.where(bitV, tl.where(bitU, dense_4V, dense_4U), tl.where( bitU, dense_4T, dense_4S)) col_idx = tl.program_id(1) * 16 if ARRAY_LAYOUT == 'row': col_idx = tl.program_id(1) * 16 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE ) * 16 mask = col_idx < k // 2 else: col_idx = tl.program_id(1) * 16 mask = row_idx < m tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) * sparse_col_stride, sparse0, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) * sparse_col_stride, sparse1, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) * sparse_col_stride, sparse2, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) * sparse_col_stride, sparse3, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 4) * sparse_col_stride, sparse4, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 5) * sparse_col_stride, sparse5, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 6) * sparse_col_stride, sparse6, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 7) * sparse_col_stride, sparse7, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 8) * sparse_col_stride, sparse8, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 9) * sparse_col_stride, sparse9, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 10) * sparse_col_stride, sparseA, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 11) * sparse_col_stride, sparseB, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 12) * sparse_col_stride, sparseC, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 13) * sparse_col_stride, sparseD, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 14) * sparse_col_stride, sparseE, mask=mask) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 15) * sparse_col_stride, sparseF, mask=mask) meta_40 = idxs0 | idxs1 << 2 meta_41 = idxs2 | idxs3 << 2 meta_42 = idxs4 | idxs5 << 2 meta_43 = idxs6 | idxs7 << 2 meta_44 = idxs8 | idxs9 << 2 meta_45 = idxsA | idxsB << 2 meta_46 = idxsC | idxsD << 2 meta_47 = idxsE | idxsF << 2 meta = (meta_40 | meta_41 << 4 | meta_42 << 8 | meta_43 << 12 | meta_44 << 16 | meta_45 << 20 | meta_46 << 24 | meta_47 << 28) if ARRAY_LAYOUT == 'row': col_idx = tl.program_id(1) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) elif ARRAY_LAYOUT == 'col': col_idx = tl.program_id(1) group, interweave = 16, 2 dest_row = (row_idx // group * group + row_idx % 8 * interweave + row_idx % group // 8) dest_col = col_idx topright = ((dest_row % 2 == 0) & (dest_col % 2 == 1)).to(tl.int8) bottomleft = ((dest_row % 2 == 1) & (dest_col % 2 == 0)).to(tl.int8) dest_row = dest_row + topright - bottomleft dest_col = dest_col - topright + bottomleft interleave = 2 cols_maj = dest_col // interleave cols_min = dest_col % interleave meta_reordered_offsets = (cols_maj * m * interleave + dest_row * interleave + cols_min) if ARRAY_LAYOUT == 'row': mask = col_idx < k // 32 elif ARRAY_LAYOUT == 'col': mask = row_idx < m tl.store(meta_reordered_ptr + meta_reordered_offsets, meta, mask=mask)
{ "Data Type": [ "int8" ], "Functionality": [ "Quantization", "Top-K Selection" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/_semi_structured_conversions.py
0f3aabee-e682-46a8-89dc-314404372b6b
scaled_quant.py
drisspg/transformer_nuggets
transformer_nuggets/fp8/scaled_quant.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def scaled_cast(inpt_ptr: torch.Tensor, output_ptr: torch.Tensor, scale_ptr: torch.Tensor, abs_max_ptr: torch.Tensor, numel: int, XBLOCK: tl. constexpr, float8_dtype: tl.constexpr, max_val: tl.constexpr): """Quantize tensor to fp8 using a delayed scaled and calculate abs_max""" offset = tl.program_id(0) * XBLOCK index = offset + tl.arange(0, XBLOCK)[:] index = tl.max_contiguous(tl.multiple_of(index, XBLOCK), XBLOCK) mask = index < numel inpt = tl.load(inpt_ptr + index, mask=mask) block_max = tl.max(tl.abs(inpt)) tl.atomic_max(abs_max_ptr, block_max) scale = tl.load(scale_ptr) scaled_inpt = inpt * scale if max_val != 0.0: tl.where(scaled_inpt > max_val, max_val, scaled_inpt) tl.where(scaled_inpt < -1 * max_val, -1 * max_val, scaled_inpt) tl.store(output_ptr + index, scaled_inpt.to(float8_dtype), mask=mask)
{ "Data Type": [], "Functionality": [ "Quantization" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/scaled_quant.py
85beb12c-9aca-4963-98a0-73a6a62a12a3
rms_norm.py
tascj/kaggle-lmsys-chatbot-arena
human_pref/inference/ops/rms_norm.py
83cd93d50b9283c18711e8c63e4e1c6399c7b9ce
0
@wrap_jit_func(type_hint=dict(input=Tensor, weight=Tensor, output=Tensor, input_row_stride=int, eps=float, N_COLS=torch.int32, BLOCK_N=torch.int32)) @triton.jit def rms_norm_kernel(input, weight, output, input_row_stride: tl.constexpr, eps: tl.constexpr, N_COLS: tl.constexpr, BLOCK_N: tl.constexpr): """rms norm kernel.""" prog_id = tl.program_id(0) offsets = tl.arange(0, BLOCK_N) w = tl.load(weight + offsets, mask=offsets < N_COLS) x_ptr = input + prog_id * input_row_stride x = tl.load(x_ptr + offsets, mask=offsets < N_COLS) xf = x.to(tl.float32) var = tl.sum(xf * xf, 0) * float(1.0 / N_COLS) out = xf / tl.sqrt(var + eps) out = (w * out).to(x.dtype) out_ptr = output + prog_id * input_row_stride tl.store(out_ptr + offsets, out, mask=offsets < N_COLS)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [] }
[ "Apache" ]
https://github.com/tascj/kaggle-lmsys-chatbot-arena/blob/83cd93d50b9283c18711e8c63e4e1c6399c7b9ce/human_pref/inference/ops/rms_norm.py
db2799a9-b71d-4ed2-84ba-ff3e85441eca
multi_head_attention_kernels.py
BobMcDear/attorch
attorch/multi_head_attention_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def _bwd_kernel(Q, K, V, sm_scale, Out, DO, DQ, DK, DV, L, D, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, Z, H, N_CTX, Z_H_N_CTX, SQ_Z_H_N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl .constexpr, BLOCK_N: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, CAUSAL: tl.constexpr, MMA_V3: tl.constexpr): qk_scale = sm_scale * 1.44269504 off_hz = tl.program_id(0) off_z = off_hz // H off_h = off_hz % H Q_block_ptr = tl.make_block_ptr(base=Q, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=( BLOCK_M, BLOCK_DMODEL), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_kn, stride_kk), offsets=(0, 0), block_shape=( BLOCK_M, BLOCK_DMODEL), order=(1, 0)) V_block_ptr = tl.make_block_ptr(base=V, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_vn, stride_vk), offsets=(0, 0), block_shape=( BLOCK_M, BLOCK_DMODEL), order=(1, 0)) DO_block_ptr = tl.make_block_ptr(base=DO, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) if SEQUENCE_PARALLEL: DQ_block_ptr = tl.make_block_ptr(base=DQ, shape=(SQ_Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) else: DQ_block_ptr = tl.make_block_ptr(base=DQ, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) DK_block_ptr = tl.make_block_ptr(base=DK, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_kn, stride_kk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) DV_block_ptr = tl.make_block_ptr(base=DV, shape=(Z_H_N_CTX, BLOCK_DMODEL), strides=(stride_vn, stride_vk), offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) num_block_n = tl.cdiv(N_CTX, BLOCK_N) if not SEQUENCE_PARALLEL: for start_n in range(0, num_block_n): _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK, DV, L, D, Q_block_ptr, K_block_ptr, V_block_ptr, DO_block_ptr, DQ_block_ptr, DK_block_ptr, DV_block_ptr, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, Z, H, N_CTX, off_h, off_z, off_hz, start_n, num_block_n, BLOCK_M=BLOCK_M, BLOCK_DMODEL =BLOCK_DMODEL, BLOCK_N=BLOCK_N, SEQUENCE_PARALLEL= SEQUENCE_PARALLEL, CAUSAL=CAUSAL, MMA_V3=MMA_V3) else: start_n = tl.program_id(1) _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, Out, DO, DQ, DK, DV, L, D, Q_block_ptr, K_block_ptr, V_block_ptr, DO_block_ptr, DQ_block_ptr, DK_block_ptr, DV_block_ptr, stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, Z, H, N_CTX, off_h, off_z, off_hz, start_n, num_block_n, BLOCK_M=BLOCK_M, BLOCK_DMODEL= BLOCK_DMODEL, BLOCK_N=BLOCK_N, SEQUENCE_PARALLEL= SEQUENCE_PARALLEL, CAUSAL=CAUSAL, MMA_V3=MMA_V3)
{ "Data Type": [ "fp32", "fp16", "bf16" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/multi_head_attention_kernels.py
db218ad8-20e0-48b5-b390-052c8b786f91
softmax_online_v2.py
iclementine/optimize_softmax
softmax_online_v2.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr ): pid_m = tl.program_id(0) m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype. element_ty) z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) new_m = tl.maximum(m, inp) new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m) m = new_m z = new_z final_m = tl.max(m, 0) z = tl.sum(tl.exp(m - final_m) * z) m = final_m for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) out = e / z output_ptrs = output_ptr + offset tl.store(output_ptrs, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2.py
73a1b7d4-10ae-42d1-829c-fc9da1265666
quant_per_block.py
rodjjo/editorium
editorium/app/server/pipelines/cogvideo/sageattention/quant_per_block.py
7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694
0
@triton.jit def k_kernel_per_block_int8(X, X_int8, BLK: tl.constexpr, Scale, L, C: tl. constexpr, scale_stride): off_b = tl.program_id(1) off_blk = tl.program_id(0) x_offset = off_b * L * C offs_m = off_blk * BLK + tl.arange(0, BLK) offs_k = tl.arange(0, C) x_ptrs = X + x_offset + offs_m[:, None] * C + offs_k[None, :] x_int8_ptrs = X_int8 + x_offset + offs_m[:, None] * C + offs_k[None, :] scale_ptrs = Scale + off_b * scale_stride + off_blk x = tl.load(x_ptrs, mask=offs_m[:, None] < L) scale = tl.max(tl.abs(x)) / 127.0 x_int8 = x / scale x_int8 += 0.5 * tl.where(x_int8 >= 0, 1, -1) x_int8 = x_int8.to(tl.int8) tl.store(x_int8_ptrs, x_int8, mask=offs_m[:, None] < L) tl.store(scale_ptrs, scale)
{ "Data Type": [ "int8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/quant_per_block.py
e208a539-0dcc-4c48-b62a-23278be7b325
test_triton_varargs.py
facebookresearch/xformers
tests/test_triton_varargs.py
a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc
0
@triton.jit def weighted_sumN(output_ptr, a_ptr: 'VAR_ARGS_ARRAY', b: 'VAR_ARGS_ARRAY', BLOCK_SIZE: tl.constexpr): offset = tl.arange(0, BLOCK_SIZE) output = tl.zeros([BLOCK_SIZE], tl.float32) for i in range(len(a_ptr)): output = output + tl.load(a_ptr[i] + offset) * b[i] tl.store(output_ptr + offset, output)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py
940875b4-4e20-421f-a86c-5e25ff28a855
02-fused-softmax.py
triton-lang/triton
python/tutorials/02-fused-softmax.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def softmax_kernel(output_ptr, input_ptr, input_row_stride, output_row_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr, num_stages: tl.constexpr): row_start = tl.program_id(0) row_step = tl.num_programs(0) for row_idx in tl.range(row_start, n_rows, row_step, num_stages=num_stages ): row_start_ptr = input_ptr + row_idx * input_row_stride col_offsets = tl.arange(0, BLOCK_SIZE) input_ptrs = row_start_ptr + col_offsets mask = col_offsets < n_cols row = tl.load(input_ptrs, mask=mask, other=-float('inf')) row_minus_max = row - tl.max(row, axis=0) numerator = tl.exp(row_minus_max) denominator = tl.sum(numerator, axis=0) softmax_output = numerator / denominator output_row_start_ptr = output_ptr + row_idx * output_row_stride output_ptrs = output_row_start_ptr + col_offsets tl.store(output_ptrs, softmax_output, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/02-fused-softmax.py
bd846196-d65e-4894-a342-0d208017c704
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8]], key=['BT']) @triton.jit def chunk_local_reversed_cumsum_scalar_kernel(s, o, offsets, indices, T: tl .constexpr, H: tl.constexpr, BT: tl.constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T if HEAD_FIRST: p_s = tl.make_block_ptr(s + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) p_o = tl.make_block_ptr(o + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) else: p_s = tl.make_block_ptr(s + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) p_o = tl.make_block_ptr(o + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) b_s = tl.load(p_s, boundary_check=(0,)).to(tl.float32) b_z = tl.sum(b_s, axis=0) b_o = b_z[None] - tl.cumsum(b_s, axis=0) + b_s tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0,))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py
d8ccdd9a-8a45-418c-8e08-65a619303b4e
triton_attn_torch_function.py
ROCm/aotriton
test/triton_attn_torch_function.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 0, 'pre_load_v': True}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'pre_load_v': True}, num_stages=1, num_warps=4), triton.Config({ 'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 2, 'pre_load_v': True}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'pre_load_v': True}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 4, 'pre_load_v': True}, num_stages=1, num_warps=4), triton.Config({ 'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 0, 'pre_load_v': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'pre_load_v': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 2, 'pre_load_v': False}, num_stages=1, num_warps=4), triton.Config({ 'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'pre_load_v': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 4, 'pre_load_v': False}, num_stages=1, num_warps=4) ], key=['seqlen_q', 'seqlen_k', 'STAGE']) @triton.jit def tuned_attn_fwd(Q, K, V, sm_scale, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, seqlen_q, seqlen_k, dropout_p, philox_seed, philox_offset_base, encoded_softmax, STAGE: tl.constexpr, BLOCK_M: tl. constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, pre_load_v: tl.constexpr, ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr): bare_attn_fwd(Q, K, V, sm_scale, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, seqlen_q, seqlen_k, dropout_p, philox_seed, philox_offset_base, encoded_softmax, STAGE, BLOCK_M, BLOCK_DMODEL, BLOCK_N, pre_load_v, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Low Latency", "High Throughput" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/triton_attn_torch_function.py
feca8afb-e281-4618-b17b-e142d9d07e8c
softmax_online_v2_rev.py
iclementine/optimize_softmax
softmax_online_v2_rev.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr ): pid_m = tl.program_id(0) m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype. element_ty) z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) new_m = tl.maximum(m, inp) new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m) m = new_m z = new_z final_m = tl.max(m, 0) z = tl.sum(tl.exp(m - final_m) * z) m = final_m previous_multiple = prev_multiple_of(N, TILE_N) for start_n in range(0, N, TILE_N): n_offsets = previous_multiple - start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) out = e / z output_ptrs = output_ptr + offset tl.store(output_ptrs, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency", "High Throughput" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_rev.py
dbe1ee83-8582-4032-942f-933e819075c9
1_linear_trident_debug.py
gmgu/study-triton
toy_example/1_linear_trident_debug.py
3a9a24fd3f1de3e7465535ffe72f6deac8a419bd
0
@staticmethod @triton.jit def forward(input_ptr: tl.tensor, weight_ptr: tl.tensor, bias_ptr: tl. tensor, m_size: tl.int32, n_size: tl.int32, k_size: tl.int32, input_m_stride: tl.int32, input_k_stride: tl.int32, weight_n_stride: tl .int32, weight_k_stride: tl.int32, m_offset: tl.int32, n_offset: tl. int32, use_accelerator: tl.constexpr, m_block_size: tl.constexpr, n_block_size: tl.constexpr, k_block_size: tl.constexpr, dtype: tl.constexpr ): input_block_ptr = tl.make_block_ptr(input_ptr, shape=(m_size, k_size), strides=(input_m_stride, input_k_stride), offsets=(m_offset, 0), block_shape=(m_block_size, k_block_size), order=(1, 0)) weight_block_ptr = tl.make_block_ptr(weight_ptr, shape=(k_size, n_size), strides=(weight_k_stride, weight_n_stride), offsets=(0, n_offset), block_shape=(k_block_size, n_block_size), order=(0, 1)) output = tl.zeros((m_block_size, n_block_size), dtype) for k_offset in range(0, k_size, k_block_size): input = tl.load(input_block_ptr, boundary_check=(0, 1), padding_option='zero') weight = tl.load(weight_block_ptr, boundary_check=(0, 1), padding_option='zero') output += tl.dot(input, weight, use_accelerator).to(dtype) input_block_ptr = tl.advance(input_block_ptr, (0, k_block_size)) weight_block_ptr = tl.advance(weight_block_ptr, (k_block_size, 0)) if bias_ptr is not None: bias_block_ptr = tl.make_block_ptr(bias_ptr, shape=(n_size,), strides=(1,), offsets=(n_offset,), block_shape=(n_block_size,), order=(0,)) bias = tl.load(bias_block_ptr, boundary_check=(0,), padding_option= 'zero') output += bias return output
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Coalesced", "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/toy_example/1_linear_trident_debug.py
54490e5d-07a7-43ff-af70-75332c3c39e7
kernels.py
ShenzheZhu/sparse_autoencoder
sparse_autoencoder/kernels.py
afef049c905fda5b0f69729127ce0d3a42399152
0
@triton.jit def triton_sum_dim0_in_fp32_kernel(xs_ptr, out_ptr, stride_a, a, b, BLOCK_SIZE_A: tl.constexpr, BLOCK_SIZE_B: tl.constexpr): pid = tl.program_id(0) offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32) for i in range(0, a, BLOCK_SIZE_A): offsets_a = tl.arange(0, BLOCK_SIZE_A) + i xs = tl.load(xs_ptr + offsets_a[:, None] * stride_a + offsets_b[ None, :], mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :], other=0) xs = xs.to(tl.float32) out = tl.sum(xs, axis=0) all_out += out tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/ShenzheZhu/sparse_autoencoder/blob/afef049c905fda5b0f69729127ce0d3a42399152/sparse_autoencoder/kernels.py
a4954557-c50d-4f29-8e9a-cc269e7c427f
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [2, 4, 8]], key=['BT', 'BK', 'BV']) @triton.jit def chunk_gated_delta_rule_fwd_kernel_h(k, v, d, v_new, g, h, h0, ht, offsets, c_offsets, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) boh = tl.load(c_offsets + i_n).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T NT = tl.cdiv(T, BT) boh = i_n * NT b_h = tl.zeros([BK, BV], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) for i_t in range(NT): if HEAD_FIRST: p_h = tl.make_block_ptr(h + (i_nh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_h = tl.make_block_ptr(h + ((boh + i_t) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1)) b_hc = tl.zeros([BK, BV], dtype=tl.float32) last_idx = min((i_t + 1) * BT, T) - 1 if HEAD_FIRST: b_g_last = tl.load(g + i_nh * T + last_idx) else: b_g_last = tl.load(g + bos * H + last_idx * H + i_h) for i_c in range(tl.cdiv(min(BT, T - i_t * BT), BC)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_nh * T * K, (K, T), (1, K), ( i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_d = tl.make_block_ptr(d + i_nh * T * K, (T, K), (K, 1), ( i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_nh * T * V, (T, V), (V, 1), ( i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_v_new = tl.make_block_ptr(v_new + i_nh * T * V, (T, V), ( V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_g = tl.make_block_ptr(g + i_nh * T, (T,), (1,), (i_t * BT + i_c * BC,), (BC,), (0,)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1)) p_d = tl.make_block_ptr(d + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_v_new = tl.make_block_ptr(v_new + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0)) p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT + i_c * BC,), (BC,), (0,)) b_g = tl.load(p_g, boundary_check=(0,)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_k = (b_k * tl.exp(b_g_last - b_g)[None, :]).to(b_k.dtype) b_d = tl.load(p_d, boundary_check=(0, 1)) b_d = (b_d * tl.exp(b_g)[:, None]).to(b_d.dtype) b_v = tl.load(p_v, boundary_check=(0, 1)) b_v -= tl.dot(b_d, b_h.to(b_k.dtype)) tl.store(p_v_new, b_v.to(p_v_new.dtype.element_ty), boundary_check=(0, 1)) b_hc += tl.dot(b_k, b_v.to(b_k.dtype), allow_tf32=False) b_h *= tl.exp(b_g_last) b_h += b_hc if STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py
9158e059-6a68-45a0-bb6e-187574d65a8e
RzLinearBackward.py
apd10/RzLinear
python/rz_linear/impl/RzLinearBackward.py
eb56657b2de0a97f398f88af421b0fbcbc5469c9
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4)], key=['M', 'N', 'K']) @triton.jit def rz_linear_backward_input_grad_kernel_fp32(a_ptr, b_ptr, c_ptr, init_factor, M, N, K, H, stride_am, stride_an, stride_cm, stride_ck, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr): rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr= c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am= stride_am, stride_an=stride_an, stride_cm=stride_cm, stride_ck= stride_ck, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=False, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N= BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Coalesced", "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py
9e9e1c3a-4a92-4686-a3c3-09f634021f06
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_fwd_kernel_cum(s, r, c, p, s_sk_h, s_sk_t, s_sk_m, T, BT: tl. constexpr, BM: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr): i_m, i_bh = tl.program_id(0), tl.program_id(1) p_s = tl.make_block_ptr(s + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ( 0, i_m * BM), (BT, BM), (1, 0)) p_r = tl.make_block_ptr(r + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,), (i_m * BM,), (BM,), (0,)) p_c = tl.make_block_ptr(c + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ( 0, i_m * BM), (BT, BM), (1, 0)) p_p = tl.make_block_ptr(p + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ( 0, i_m * BM), (BT, BM), (1, 0)) b_mp = tl.zeros([BM], dtype=tl.float32) b_zp = tl.zeros([BM], dtype=tl.float32) for i in range(NT): b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32) b_m = tl.max(b_s, 0) if i == 0: b_r = tl.exp(-b_m) else: b_m = tl.maximum(b_mp, b_m) b_r = tl.exp(b_mp - b_m) b_c = tl.exp(b_s - b_m[None, :]) b_z = tl.cumsum(b_c, 0) + (b_zp * b_r)[None, :] b_p = tl.exp(-tl.log(b_z)) b_mp = b_m b_zp = tl.max(b_z, 0) tl.store(p_r, b_r.to(p_r.dtype.element_ty), boundary_check=(0,)) tl.store(p_c, b_c.to(p_c.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_p, b_p.to(p_p.dtype.element_ty), boundary_check=(0, 1)) p_s = tl.advance(p_s, (BT, 0)) p_r = tl.advance(p_r, (DM,)) p_c = tl.advance(p_c, (BT, 0)) p_p = tl.advance(p_p, (BT, 0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
7671d636-5551-4193-b0be-489e180535f4
lightning_attn2_no_decay.py
OpenNLPLab/lightning-attention
lightning_attn/ops/triton/lightning_attn2_no_decay.py
d7439519541e966084eeaaf3ffd63eecc216f414
0
@triton.jit def _bwd_intra_kernel(Q, K, V, DO, DQ, DK, DV, b: tl.constexpr, h: tl. constexpr, n: tl.constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl .constexpr, NUM_BLOCK: tl.constexpr, CBLOCK: tl.constexpr, NUM_CBLOCK: tl.constexpr): off_bh = tl.program_id(0) off_block = tl.program_id(1) off_bh % h qk_offset = off_bh * n * d v_offset = off_bh * n * e o_offset = off_bh * n * e block_offset = off_block * BLOCK + tl.arange(0, BLOCK) Q_trans_block_ptr = Q + qk_offset + block_offset[None, :] * d + tl.arange( 0, d)[:, None] K_block_ptr = K + qk_offset + block_offset[:, None] * d + tl.arange(0, d)[ None, :] V_trans_block_ptr = V + v_offset + block_offset[None, :] * e + tl.arange( 0, e)[:, None] DQ_block_ptr = DQ + qk_offset + block_offset[:, None] * d + tl.arange(0, d )[None, :] DK_trans_block_ptr = DK + qk_offset + block_offset[None, : ] * d + tl.arange(0, d)[:, None] DV_block_ptr = DV + v_offset + block_offset[:, None] * e + tl.arange(0, e)[ None, :] DO_block_ptr = DO + o_offset + block_offset[:, None] * e + tl.arange(0, e)[ None, :] array = tl.arange(0, BLOCK).to(tl.float32) index = array[:, None] - array[None, :] k = tl.load(K_block_ptr, mask=block_offset[:, None] < n, other=0.0).to(tl .float32) v_trans = tl.load(V_trans_block_ptr, mask=block_offset[None, :] < n, other=0.0).to(tl.float32) do = tl.load(DO_block_ptr, mask=block_offset[:, None] < n, other=0.0).to(tl .float32) q_trans = tl.load(Q_trans_block_ptr, mask=block_offset[None, :] < n, other=0.0).to(tl.float32) dqk = tl.dot(do, v_trans) dqk = tl.where(index >= 0, dqk, 0) dq_intra = tl.dot(dqk, k) dk_intra_trans = tl.dot(q_trans, dqk) qk_trans = tl.dot(k, q_trans) qk_trans = tl.where(index <= 0, qk_trans, 0) dv_intra = tl.dot(qk_trans, do) dq = dq_intra dk_trans = dk_intra_trans dv = dv_intra tl.store(DQ_block_ptr, dq.to(DQ_block_ptr.dtype.element_ty), mask= block_offset[:, None] < n) tl.store(DK_trans_block_ptr, dk_trans.to(DK_trans_block_ptr.dtype. element_ty), mask=block_offset[None, :] < n) tl.store(DV_block_ptr, dv.to(DV_block_ptr.dtype.element_ty), mask= block_offset[:, None] < n)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/OpenNLPLab/lightning-attention/blob/d7439519541e966084eeaaf3ffd63eecc216f414/lightning_attn/ops/triton/lightning_attn2_no_decay.py
46ac2c79-450e-48fc-8437-4e363de32217
grid.py
daemyung/practice-triton
grid.py
27f727726f1507c8380a1c11751d851c7c4a07ce
0
@triton.jit def print_grid(): pid = tl.program_id(0) tl.device_print('pid: ', pid)
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/grid.py
0d1dcbf9-4918-4161-b36a-9394e15bb253
mlstm_scan.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_scan.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def precompute_mlstm_triton_scan(K, V, F, I, F_REDUCED, C, N, NH: tl. constexpr, S: tl.constexpr, D: tl.constexpr, SB: tl.constexpr, VB: tl. constexpr): bh_id = tl.program_id(0) sb_id = tl.program_id(1) vb_id = tl.program_id(2) batch_id = bh_id // NH head_id = bh_id % NH num_sequence_blocks = tl.num_programs(1) v_range = tl.arange(0, VB) + vb_id * VB v_range_2d = v_range[None, :] v_range_3d = v_range[None, :, None] k_range = tl.arange(0, VB) sb_range_2d = tl.arange(0, SB)[:, None] sb_range_3d = tl.arange(0, SB)[:, None, None] sb_range_offset = tl.arange(0, SB) + sb_id * SB sb_range_offset_2d = sb_range_offset[:, None] batch_offset_fi = batch_id * NH * S + head_id * S batch_offset_qkv = batch_id * NH * S * D + head_id * S * D batch_offset_n = (batch_id * NH * num_sequence_blocks * D + head_id * num_sequence_blocks * D) batch_offset_c = (batch_id * NH * num_sequence_blocks * D * D + head_id * num_sequence_blocks * D * D) f = tl.load(F + sb_range_offset + batch_offset_fi, sb_range_offset < S) i = tl.load(I + sb_range_offset + batch_offset_fi, sb_range_offset < S) v_range_ = batch_offset_qkv + sb_range_offset_2d * D + v_range_2d v_mask = (sb_range_offset_2d < S) & (v_range_2d < D) v = tl.load(V + v_range_, v_mask) k_scale_factor = tl.sqrt(tl.full((1,), D, dtype=tl.float32)) for j in tl.range(tl.cdiv(D, VB)): k_range_ = batch_offset_qkv + sb_range_offset_2d * D + k_range[None, :] k_mask = (sb_range_offset_2d < S) & (k_range[None, :] < D) k = tl.load(K + k_range_, k_mask) / k_scale_factor vk = v[:, :, None] * k[:, None, :] * i[:, None, None] _, c = tl.associative_scan((tl.broadcast_to(f[:, None, None], (SB, VB, VB)), vk), 0, scan_op) c_range = (batch_offset_c + sb_range_3d * 0 + sb_id * D * D + v_range_3d * D + k_range[None, None, :]) c_mask = (sb_range_3d == SB - 1) & (v_range_3d < D) & (k_range[None, None, :] < D) tl.store(C + c_range, c, c_mask) f_reduced, n = tl.associative_scan((tl.broadcast_to(f[:, None], (SB, VB)), i[:, None] * k), 0, scan_op) n_range = batch_offset_n + sb_range_2d * 0 + sb_id * D + k_range[ None, :] n_mask = (sb_range_2d == SB - 1) & (k_range[None, :] < D) tl.store(N + n_range, n, n_mask) if j == 0: f_range = batch_offset_fi + sb_range_2d * 0 + sb_id + tl.arange( 0, VB)[None, :] f_mask = (sb_range_2d == SB - 1) & (tl.arange(0, VB)[None, :] == 0) tl.store(F_REDUCED + f_range, f_reduced, f_mask) k_range += VB
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py
2396ab23-b768-4d96-8878-5a6ea4683b65
triton_fused_local_attn_rerope.py
LouChao98/vqtree
ops/triton_fused_local_attn_rerope.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.jit def _attn_fwd_inner(acc, l_i, m_i, q1, q2, sm_scale, K1_block_ptr, K2_block_ptr, V_block_ptr, start_m, offs_m, offs_n, SEQLEN_K: tl. constexpr, WINDOW_SIZE: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl.constexpr, STAGE: tl.constexpr): if STAGE == 1: hi = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M lo = start_m * BLOCK_M - WINDOW_SIZE if hi < 0: hi = 0 if lo < 0: lo = 0 elif STAGE == 2: hi = start_m * BLOCK_M lo = start_m * BLOCK_M - WINDOW_SIZE + BLOCK_M if lo < 0: lo = 0 else: lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M lo = tl.multiple_of(lo, BLOCK_M) hi = min(hi, SEQLEN_K) EVEN_MASK_FREE = EVEN_MN & ((STAGE == 1) | (STAGE == 2)) K1_block_ptr = tl.advance(K1_block_ptr, (0, lo)) K2_block_ptr = tl.advance(K2_block_ptr, (0, lo)) V_block_ptr = tl.advance(V_block_ptr, (lo, 0)) for start_n in range(lo, hi, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) if EVEN_MASK_FREE: k1 = tl.load(K1_block_ptr) else: k1 = tl.load(K1_block_ptr, boundary_check=(1,), padding_option= 'zero') qk = tl.dot(q1, k1) * (sm_scale * RCP_LN2) if STAGE == 1: if EVEN_MN: k2 = tl.load(K2_block_ptr) else: k2 = tl.load(K2_block_ptr, boundary_check=(1,), padding_option='zero') qk2 = tl.dot(q2, k2) * (sm_scale * RCP_LN2) mask = offs_m[:, None] <= start_n + WINDOW_SIZE + offs_n[None, :] qk = tl.where(mask, qk, qk2) elif STAGE == 3: mask = offs_m[:, None] >= start_n + offs_n[None, :] qk += tl.where(mask, 0, NEGINF) if not EVEN_MASK_FREE: qk += tl.where((start_n + offs_n)[None, :] < SEQLEN_K, 0, NEGINF) m_i_new = tl.maximum(m_i, tl.max(qk, 1)) alpha = tl.math.exp2(m_i - m_i_new) p = tl.math.exp2(qk - m_i_new[:, None]) acc *= alpha[:, None] if EVEN_MASK_FREE: v = tl.load(V_block_ptr) else: v = tl.load(V_block_ptr, boundary_check=(1,), padding_option='zero' ) acc += tl.dot(p.to(V_block_ptr.dtype.element_ty), v) l_i = l_i * alpha + tl.sum(p, 1) m_i = m_i_new K1_block_ptr = tl.advance(K1_block_ptr, (0, BLOCK_N)) K2_block_ptr = tl.advance(K2_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) return acc, l_i, m_i
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn_rerope.py
0fa50a32-0fe2-47d6-882e-e43af53f8157
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv4/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_recurrent_rwkv4_forward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr, k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b, state_s_abe, state_s_c, wkv_ptr, wkv_s_b, wkv_s_t, wkv_s_c, state_out_ptr, state_out_s_b, state_out_s_abe, state_out_s_t, state_out_s_c, chans, tsz, BLOCK_SIZE_C: tl.constexpr): b_idx = tl.program_id(0) c_idx = tl.program_id(1) cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C) cmask = cs < chans k_ptr = k_ptr + b_idx * k_s_b v_ptr = v_ptr + b_idx * v_s_b alpha_ptr = state_ptr + b_idx * state_s_b beta_ptr = state_ptr + b_idx * state_s_b + state_s_abe eps_ptr = state_ptr + b_idx * state_s_b + 2 * state_s_abe wkv_ptr = wkv_ptr + b_idx * wkv_s_b alpha_out_ptr = state_out_ptr + b_idx * state_out_s_b beta_out_ptr = state_out_ptr + b_idx * state_out_s_b + state_out_s_abe eps_out_ptr = state_out_ptr + b_idx * state_out_s_b + 2 * state_out_s_abe alpha = tl.load(alpha_ptr + cs * state_s_c, mask=cmask).to(tl.float32) beta = tl.load(beta_ptr + cs * state_s_c, mask=cmask).to(tl.float32) eps = tl.load(eps_ptr + cs * state_s_c, mask=cmask).to(tl.float32) w = tl.load(w_ptr + cs * w_s_c, mask=cmask).to(tl.float32) u = tl.load(u_ptr + cs * u_s_c, mask=cmask).to(tl.float32) for t in range(tsz): kt = tl.load(k_ptr + t * k_s_t + cs * k_s_c, mask=cmask).to(tl.float32) vt = tl.load(v_ptr + t * v_s_t + cs * v_s_c, mask=cmask).to(tl.float32) ukt = u + kt tau = tl.maximum(ukt, eps) e1a = tl.exp(eps - tau) e2a = tl.exp(ukt - tau) wkv = (e1a * alpha + e2a * vt) / (e1a * beta + e2a) tl.store(wkv_ptr + t * wkv_s_t + cs * wkv_s_c, wkv, mask=cmask) w_eps = w + eps eps = tl.maximum(w_eps, kt) e1b = tl.exp(w_eps - eps) e2b = tl.exp(kt - eps) alpha = e1b * alpha + e2b * vt beta = e1b * beta + e2b tl.store(alpha_out_ptr + t * state_out_s_t + cs * state_out_s_c, alpha, mask=cmask) tl.store(beta_out_ptr + t * state_out_s_t + cs * state_out_s_c, beta, mask=cmask) tl.store(eps_out_ptr + t * state_out_s_t + cs * state_out_s_c, eps, mask=cmask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Elementwise Operations" ], "Memory Access Pattern": [ "Register Intensive", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv4/fused_recurrent.py
eac3fea0-6ec0-4a47-aee5-7bbb6b64ef29
addition.py
neuro-ml/kerops
kerops/kernels/addition.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _AddStats_cl3d_impl(X_ptr, Y_ptr, Out_ptr, Mean_ptr, Sqmean_ptr, numel, numel_no_channels, BLOCK_SIZE: tl.constexpr, num_channels: tl.constexpr, block_other: tl.constexpr): pid = tl.program_id(0) X_ptr += pid * BLOCK_SIZE Y_ptr += pid * BLOCK_SIZE Out_ptr += pid * BLOCK_SIZE channels_offset = tl.arange(0, num_channels) other_offset = tl.arange(0, block_other) offset = channels_offset[None, :] + other_offset[:, None] * num_channels mask = (other_offset < numel_no_channels - pid * block_other)[:, None] x = tl.load(X_ptr + offset, mask=mask, other=0) y = tl.load(Y_ptr + offset, mask=mask, other=0) output = (x + y).to(tl.float32) tl.store(Out_ptr + offset, output, mask=mask) mean = tl.sum(output, axis=0) / numel_no_channels sqmean = tl.sum(output * output, axis=0) / numel_no_channels tl.atomic_add(Mean_ptr + channels_offset, mean) tl.atomic_add(Sqmean_ptr + channels_offset, sqmean)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/addition.py
221c6145-dd84-45c7-9c0e-18b5a05dcdca
complex_rnn.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/complex_rnn.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def bwd_sequential_scan_complex(grad_output_real, grad_output_imag, v_real, v_imag, f_real, f_imag, hidden_real, hidden_imag, B, L, C, BLOCK_M: tl. constexpr): offset_b = tl.program_id(0) if offset_b >= B: return offset_n = tl.program_id(1) ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + (L - 1 ) * C + offset_n * BLOCK_M grad_h_real = tl.zeros([BLOCK_M], dtype=tl.float32) grad_h_imag = tl.zeros([BLOCK_M], dtype=tl.float32) for time_step in range(L - 1, -1, -1): grad_real = tl.load(grad_output_real + ptr).to(tl.float32) grad_imag = tl.load(grad_output_imag + ptr).to(tl.float32) grad_h_real += grad_real grad_h_imag += grad_imag decay_real = tl.load(f_real + ptr).to(tl.float32) decay_imag = tl.load(f_imag + ptr).to(tl.float32) h_real = tl.load(hidden_real + ptr - C, mask=ptr >= offset_b * L * C + C, other=0.0).to(tl.float32) h_imag = tl.load(hidden_imag + ptr - C, mask=ptr >= offset_b * L * C + C, other=0.0).to(tl.float32) grad_f_real = grad_h_real * h_real + grad_h_imag * h_imag grad_f_imag = grad_h_imag * h_real - grad_h_real * h_imag tl.store(f_real + ptr, grad_f_real.to(f_real.dtype.element_ty)) tl.store(f_imag + ptr, grad_f_imag.to(f_real.dtype.element_ty)) tl.store(v_real + ptr, grad_h_real.to(v_real.dtype.element_ty)) tl.store(v_imag + ptr, grad_h_imag.to(v_real.dtype.element_ty)) grad_h_real_new = grad_h_real * decay_real + grad_h_imag * decay_imag grad_h_imag_new = grad_h_imag * decay_real - grad_h_real * decay_imag grad_h_real = grad_h_real_new grad_h_imag = grad_h_imag_new ptr -= C
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/complex_rnn.py
8974a9b5-f6d2-496c-b6b2-4fdfa904b752
dw_conv.py
neuro-ml/kerops
kerops/kernels/dw_conv.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _DWConv_cl3d_impl(input_ptr, weight_ptr, output_ptr, H, W, D, H_stride, W_stride, ACCTYPE: tl.constexpr, channels: tl.constexpr, D_block: tl. constexpr): H_cell = tl.program_id(0) W_cell = tl.program_id(1) D_cell = tl.program_id(2) output_ptr += D_cell * D_block * channels input_ptr += D_cell * D_block * channels channels_offset = tl.arange(0, channels) channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels) d_offset = tl.arange(0, D_block) near_offset = tl.arange(0, 4) - 1 offset = d_offset[:, None, None] * channels + channels_offset[None, :, None ] + near_offset[None, None, :] * channels mask = d_offset[:, None, None] + near_offset[None, None, : ] < D - D_block * D_cell mask = mask and d_offset[:, None, None] + near_offset[None, None, : ] >= 0 - D_block * D_cell mask = mask and near_offset[None, None, :] != 2 weight_offset = channels_offset[None, :, None] + tl.arange(0, 4)[None, None, :] * channels weight_mask = tl.arange(0, 4)[None, None, :] != 3 weight_h0_w0 = tl.load(weight_ptr + weight_offset, mask=weight_mask, other=0.0) weight_h0_w1 = tl.load(weight_ptr + 3 * channels + weight_offset, mask= weight_mask, other=0.0) weight_h0_w2 = tl.load(weight_ptr + 6 * channels + weight_offset, mask= weight_mask, other=0.0) weight_h1_w0 = tl.load(weight_ptr + 9 * channels + weight_offset, mask= weight_mask, other=0.0) weight_h1_w1 = tl.load(weight_ptr + 12 * channels + weight_offset, mask =weight_mask, other=0.0) weight_h1_w2 = tl.load(weight_ptr + 15 * channels + weight_offset, mask =weight_mask, other=0.0) weight_h2_w0 = tl.load(weight_ptr + 18 * channels + weight_offset, mask =weight_mask, other=0.0) weight_h2_w1 = tl.load(weight_ptr + 21 * channels + weight_offset, mask =weight_mask, other=0.0) weight_h2_w2 = tl.load(weight_ptr + 24 * channels + weight_offset, mask =weight_mask, other=0.0) h0_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE) h0_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE) h1_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE) h1_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE) out_mask = d_offset[:, None] < D - D_block * D_cell out_offset = d_offset[:, None] * channels + channels_offset[None, :] H1_store = 2 * H_cell + 1 < H W1_store = 2 * W_cell + 1 < W load_all = (H_cell > 0 and H_cell < tl.cdiv(H, 2) - 1) and (W_cell > 0 and W_cell < tl.cdiv(W, 2) - 1) i = -1 j = -1 load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0) tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j ) * W_stride x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask) for k in tl.static_range(0, 16): if k == 0: h0_w0 += tl.sum(x * weight_h0_w0, axis=2) elif k == 1: h0_w0 += tl.sum(x * weight_h1_w0, axis=2) h1_w0 += tl.sum(x * weight_h0_w0, axis=2) elif k == 2: h0_w0 += tl.sum(x * weight_h2_w0, axis=2) h1_w0 += tl.sum(x * weight_h1_w0, axis=2) elif k == 3: h1_w0 += tl.sum(x * weight_h2_w0, axis=2) elif k == 4: h0_w0 += tl.sum(x * weight_h0_w1, axis=2) h0_w1 += tl.sum(x * weight_h0_w0, axis=2) elif k == 5: h0_w0 += tl.sum(x * weight_h1_w1, axis=2) h0_w1 += tl.sum(x * weight_h1_w0, axis=2) h1_w0 += tl.sum(x * weight_h0_w1, axis=2) h1_w1 += tl.sum(x * weight_h0_w0, axis=2) elif k == 6: h0_w0 += tl.sum(x * weight_h2_w1, axis=2) h0_w1 += tl.sum(x * weight_h2_w0, axis=2) h1_w0 += tl.sum(x * weight_h1_w1, axis=2) h1_w1 += tl.sum(x * weight_h1_w0, axis=2) elif k == 7: h1_w0 += tl.sum(x * weight_h2_w1, axis=2) h1_w1 += tl.sum(x * weight_h2_w0, axis=2) elif k == 8: h0_w0 += tl.sum(x * weight_h0_w2, axis=2) h0_w1 += tl.sum(x * weight_h0_w1, axis=2) elif k == 9: h0_w0 += tl.sum(x * weight_h1_w2, axis=2) h0_w1 += tl.sum(x * weight_h1_w1, axis=2) h1_w0 += tl.sum(x * weight_h0_w2, axis=2) h1_w1 += tl.sum(x * weight_h0_w1, axis=2) elif k == 10: h0_w0 += tl.sum(x * weight_h2_w2, axis=2) h0_w1 += tl.sum(x * weight_h2_w1, axis=2) h1_w0 += tl.sum(x * weight_h1_w2, axis=2) h1_w1 += tl.sum(x * weight_h1_w1, axis=2) elif k == 11: h1_w0 += tl.sum(x * weight_h2_w2, axis=2) h1_w1 += tl.sum(x * weight_h2_w1, axis=2) elif k == 12: h0_w1 += tl.sum(x * weight_h0_w2, axis=2) elif k == 13: h0_w1 += tl.sum(x * weight_h1_w2, axis=2) h1_w1 += tl.sum(x * weight_h0_w2, axis=2) elif k == 14: h0_w1 += tl.sum(x * weight_h2_w2, axis=2) h1_w1 += tl.sum(x * weight_h1_w2, axis=2) else: h1_w1 += tl.sum(x * weight_h2_w2, axis=2) k_ = k + 1 i = k_ % 4 - 1 j = k_ // 4 - 1 load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0) tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask) tmp_output_ptr = output_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride tl.store(tmp_output_ptr + out_offset, h0_w0, mask=out_mask) tmp_output_ptr = output_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1 ) * W_stride tl.store(tmp_output_ptr + out_offset, h0_w1, mask=out_mask and W1_store) tmp_output_ptr = output_ptr + (2 * H_cell + 1 ) * H_stride + 2 * W_cell * W_stride tl.store(tmp_output_ptr + out_offset, h1_w0, mask=out_mask and H1_store) tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1 ) * W_stride tl.store(tmp_output_ptr + out_offset, h1_w1, mask=out_mask and ( H1_store and W1_store))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/dw_conv.py
570a1e21-423e-48b9-9625-0e664ec62aae
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/common/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4]], key=['BK', 'BV', 'USE_GK', 'USE_GV', 'USE_G']) @triton.jit def fused_recurrent_fwd_kernel(q, k, v, g, gk, gv, o, h0, ht, offsets, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, REVERSE: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl. constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl. constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_v, i_k, i_nh = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl. int64), tl.program_id(2).to(tl.int64) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets + i_n + 1).to(tl.int64) all = T T = eos - bos else: bos, eos = i_n * T, i_n * T + T all = B * T if HEAD_FIRST: p_q = q + i_nh * T * K + ((T - 1) * K if REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) p_k = k + i_nh * T * K + ((T - 1) * K if REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) p_v = v + i_nh * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) p_o = o + (i_k * B * H + i_nh) * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) if USE_G: p_g = g + i_nh * T + (T - 1 if REVERSE else 0) if USE_GK: p_gk = gk + i_nh * T * K + ((T - 1) * K if REVERSE else 0 ) + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + i_nh * T * V + ((T - 1) * V if REVERSE else 0 ) + i_v * BV + tl.arange(0, BV) else: p_q = q + (bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_k = k + (bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_v = v + (bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_o = o + (i_k * all + bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) if USE_G: p_g = g + (bos + (T - 1 if REVERSE else 0)) * H + i_h if USE_GK: p_gk = gk + (bos + (T - 1 if REVERSE else 0) ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) if USE_GV: p_gv = gv + (bos + (T - 1 if REVERSE else 0) ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) mask_k = i_k * BK + tl.arange(0, BK) < K mask_v = i_v * BV + tl.arange(0, BV) < V mask_h = mask_k[None, :] & mask_v[:, None] b_h = tl.zeros([BV, BK], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :] ) * V + (i_v * BV + tl.arange(0, BV)[:, None]) b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32) for _ in range(0, T): b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32) if USE_GK: b_gk = tl.load(p_gk, mask=mask_k, other=0).to(tl.float32) b_h = b_h * tl.exp(b_gk[None, :]) if USE_GV: b_gv = tl.load(p_gv, mask=mask_v, other=0).to(tl.float32) b_h = b_h * tl.exp(b_gv[:, None]) if USE_G: b_g = tl.load(p_g).to(tl.float32) b_h = b_h * tl.exp(b_g) b_h += b_k[None, :] * b_v[:, None] b_o = b_h * b_q[None, :] b_o = tl.sum(b_o, axis=1) tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_v) p_q += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V p_o += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V if USE_GK: p_gk += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K if USE_GV: p_gv += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V if USE_G: p_g += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) if STORE_FINAL_STATE: p_ht = ht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :] ) * V + (i_v * BV + tl.arange(0, BV)[:, None]) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings", "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/fused_recurrent.py
a44d6bb9-9a39-4506-bfa3-b7ebd9fa2abb
test_matmul.py
triton-lang/kernels
test/test_matmul.py
eeeebdd8be7d13629de22d600621e6234057eed3
0
@triton.jit def kernel(Y, X, N, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offs < N x = tl.load(X + offs, mask=mask) tl.store(Y + offs, x, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/test/test_matmul.py
ddf1f588-9692-47ab-b525-919d573b3c8a
softmax.py
dame-cell/Triformer
triformer/softmax.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def softmax_kernel_forward(out_ptr, inp_ptr, inp_stride, out_stride, seq_len, is_causal, BLOCK_SIZE: tl.constexpr, num_warps: tl.constexpr): batch_idx = tl.program_id(0) batch_start_ptr = inp_ptr + batch_idx * inp_stride pos_offsets = tl.arange(0, BLOCK_SIZE) batch_ptrs = batch_start_ptr + pos_offsets valid_mask = pos_offsets < seq_len logits = tl.load(batch_ptrs, mask=valid_mask, other=-float('inf')) if is_causal: attn_mask = pos_offsets > batch_idx % seq_len logits = logits + tl.where(attn_mask, -float('inf'), 0.0) shifted_logits = logits - tl.max(logits, axis=0) exp_logits = tl.exp(shifted_logits) sum_exp = tl.sum(exp_logits, axis=0) probs = exp_logits / sum_exp out_batch_ptr = out_ptr + batch_idx * out_stride out_ptrs = out_batch_ptr + pos_offsets tl.store(out_ptrs, probs, mask=valid_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Latency Sensitive" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/softmax.py
54ba9a77-a828-4180-a238-3b81400afbb1
1_linear_trident_debug.py
gmgu/study-triton
toy_example/1_linear_trident_debug.py
3a9a24fd3f1de3e7465535ffe72f6deac8a419bd
0
@staticmethod @util.autotune(configs=linear_configs(), key=['m_size', 'n_size', 'k_size']) @triton.jit def forward(output_ptr: tl.tensor, input_ptr: tl.tensor, weight_ptr: tl. tensor, bias_ptr: tl.tensor, m_size: tl.int32, n_size: tl.int32, k_size: tl.int32, input_batch_stride: tl.int32, input_m_stride: tl.int32, input_k_stride: tl.int32, weight_n_stride: tl.int32, weight_k_stride: tl.int32, use_accelerator: tl.constexpr, dtype: tl.constexpr, m_block_size: tl.constexpr, n_block_size: tl.constexpr, k_block_size: tl.constexpr): pid = tl.program_id(0) num_m_blocks = tl.cdiv(m_size, m_block_size) num_n_blocks = tl.cdiv(n_size, n_block_size) num_blocks = num_m_blocks * num_n_blocks batch = pid // num_blocks block = pid % num_blocks m_block = block // num_n_blocks n_block = block % num_n_blocks m_offset = m_block * m_block_size n_offset = n_block * n_block_size output = language.Linear.forward(input_ptr + batch * input_batch_stride, weight_ptr, bias_ptr, m_size, n_size, k_size, input_m_stride, input_k_stride, weight_n_stride, weight_k_stride, m_offset, n_offset, use_accelerator, m_block_size, n_block_size, k_block_size, dtype) output_block_ptr = tl.make_block_ptr(output_ptr + batch * m_size * n_size, shape=(m_size, n_size), strides=(n_size, 1), offsets=( m_offset, n_offset), block_shape=(m_block_size, n_block_size), order=(1, 0)) tl.store(output_block_ptr, output, boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/toy_example/1_linear_trident_debug.py
171aad36-63cb-44be-831f-54745f85e0d3
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/hgrn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_hgrn_fwd_kernel_o(gc, o, s_b, s_t, s_d, T: tl.constexpr, D: tl. constexpr, BT: tl.constexpr, BD: tl.constexpr): i_d, i_b = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) b_h0 = tl.load(o + i_b * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/chunk.py
bff66182-3082-44d8-ab45-db96e371884d
activation.py
chengzeyi/stable-fast
src/sfast/triton/ops/activation.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@triton.jit def relu(x): return tl.max(x, 0.0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py
18871690-73de-4fa7-8cc1-99b5248781fc
mhmoe.py
dtadpole/triton-playground
mhmoe.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def d_leacky_relu(x): return tl.where(x >= 0, 1.0, 100.0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py
be4722dd-52dd-4663-8146-0f41c82e92f7
y_8.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_8.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def eighth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl. constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) CONST000 = 1.12741169450483 CONST003 = 4.12310562561766 CONST004 = 4.50964677801932 CONST006 = 6.76447016702898 CONST007 = 1.69594242329302 CONST008 = 1.88707052233084 CONST010 = 2.58397773170915 CONST011 = 13.136713523081 CONST012 = 13.136713523081 CONST014 = -489.184589393411 CONST015 = 24.738633753706 CONST017 = 24.738633753706 CONST019 = 48.9184589393411 CONST020 = 48.5105296237322 CONST021 = 51.744564931981 CONST024 = 65.6835676154051 CONST025 = 67.8376969317208 CONST029 = 97.0210592474644 CONST030 = -6.78376969317208 CONST031 = 103.489129863962 CONST032 = -407.026181590325 CONST033 = 108.231522672464 CONST035 = 110.066532613517 CONST036 = 110.066532613517 CONST037 = -396.284809689477 CONST040 = -361.756882439281 CONST041 = -1.88707052233084 CONST042 = 158.513923875791 CONST045 = 180.87844121964 CONST046 = 194.042118494929 CONST047 = -12.2296147348353 CONST048 = 203.513090795162 CONST050 = 216.463045344927 CONST051 = 217.054129463568 CONST052 = 216.463045344927 CONST053 = -6.78376969317208 CONST054 = -271.350787726883 CONST055 = 244.592294696706 CONST056 = 244.592294696706 CONST057 = -262.734270461621 CONST058 = -258.722824659905 CONST061 = -217.054129463568 CONST062 = -210.187416369296 CONST063 = -175.156180307747 CONST064 = -162.81047263613 CONST066 = -144.702752975712 CONST067 = -129.877827206956 CONST068 = -129.361412329953 CONST070 = -108.231522672464 CONST071 = -108.231522672464 CONST072 = -87.5780901538735 CONST073 = -3.23403530824881 CONST074 = -72.3513764878561 CONST075 = -70.0624721230988 CONST076 = -65.6835676154052 CONST077 = -61.1480736741764 CONST078 = -61.1480736741764 CONST079 = -57.7234787586472 CONST080 = -57.7234787586472 CONST081 = -51.744564931981 CONST082 = -48.5105296237322 CONST083 = -40.5868210021738 CONST084 = -39.4101405692431 CONST085 = -40.7026181590325 CONST086 = -36.0771742241545 CONST087 = -36.0771742241545 CONST088 = -26.4189873126318 CONST089 = -20.6718218536732 CONST090 = -528.379746252636 CONST091 = -16.9594242329302 CONST092 = -13.136713523081 CONST093 = -12.2296147348353 CONST094 = -11.3224231339851 CONST095 = -10.3359109268366 CONST096 = -9.70210592474644 CONST097 = -11.3224231339851 CONST098 = -13.5289403340579 CONST099 = -6.78376969317208 CONST100 = -13.5289403340579 CONST101 = -13.136713523081 CONST102 = -3.23403530824881 CONST103 = -1.61701765412441 VAR06 = x * x * x * x VAR07 = x * x * x VAR08 = x * x VAR02 = VAR06 * VAR06 VAR03 = VAR06 * VAR07 VAR04 = VAR07 * VAR07 VAR05 = VAR07 * VAR08 VAR15 = y * y * y * y VAR16 = y * y * y VAR17 = y * y VAR11 = VAR15 * VAR16 VAR12 = VAR15 * VAR16 VAR13 = VAR16 * VAR16 VAR14 = VAR16 * VAR17 VAR24 = z * z * z * z VAR25 = z * z * z VAR26 = z * z VAR20 = VAR24 * VAR24 VAR21 = VAR24 * VAR25 VAR22 = VAR25 * VAR25 VAR23 = VAR25 * VAR26 Y00 = (-CONST066 * VAR05 * VAR25 + CONST066 * VAR07 * VAR23 + CONST089 * VAR03 * z - CONST089 * VAR21 * x) Y01 = y * (CONST040 * VAR07 * VAR24 + CONST051 * VAR05 * VAR26 - CONST074 * VAR22 * x + CONST095 * VAR03) Y02 = CONST097 * VAR03 * z + VAR05 * (CONST042 * VAR17 * z - CONST088 * VAR25) + VAR07 * (-CONST088 * VAR23 + CONST090 * VAR17 * VAR25) + x * ( CONST042 * VAR17 * VAR23 + CONST094 * VAR21) Y03 = VAR16 * (CONST014 * VAR07 * VAR26 + CONST019 * VAR05 + CONST055 * VAR24 * x) + y * (CONST035 * VAR05 * VAR26 + CONST077 * VAR22 * x - CONST078 * VAR07 * VAR24 + CONST093 * VAR03) Y04 = CONST099 * VAR03 * z + VAR05 * (-CONST064 * VAR17 * z + CONST099 * VAR25) + VAR07 * (-CONST053 * VAR23 + CONST054 * VAR15 * z) + x * ( -CONST053 * VAR21 - CONST054 * VAR15 * VAR25 + CONST064 * VAR17 * VAR23 ) Y05 = VAR14 * (-CONST062 * VAR26 * x + CONST075 * VAR07) + VAR16 * ( CONST057 * VAR24 * x + CONST063 * VAR07 * VAR26 - CONST072 * VAR05 ) + y * (CONST011 * VAR05 * VAR26 + CONST024 * VAR07 * VAR24 - CONST084 * VAR22 * x + CONST092 * VAR03) Y06 = CONST102 * VAR03 * z + VAR05 * (CONST029 * VAR17 * z + CONST096 * VAR25) + VAR07 * (CONST046 * VAR17 * VAR25 + CONST058 * VAR15 * z + CONST096 * VAR23) + x * (CONST029 * VAR17 * VAR23 + CONST031 * VAR13 * z + CONST058 * VAR15 * VAR25 + CONST102 * VAR21) Y07 = CONST098 * VAR03 * y + VAR05 * (CONST033 * VAR16 + CONST083 * VAR26 * y) + VAR07 * (CONST050 * VAR16 * VAR26 + CONST067 * VAR14 + CONST083 * VAR24 * y) + x * (CONST015 * VAR12 + CONST067 * VAR14 * VAR26 - CONST070 * VAR16 * VAR24 + CONST098 * VAR22 * y) Y08 = (CONST000 * VAR02 + CONST000 * VAR20 + CONST003 * VAR11 - CONST070 * VAR15 * VAR24 + CONST080 * VAR13 * VAR26 + CONST087 * VAR17 * VAR22 + VAR04 * (CONST004 * VAR26 + CONST086 * VAR17) + VAR06 * (CONST006 * VAR24 - CONST070 * VAR15 + CONST071 * VAR17 * VAR26) + VAR08 * (CONST004 * VAR22 + CONST050 * VAR15 * VAR26 + CONST070 * VAR17 * VAR24 + CONST079 * VAR13)) Y09 = CONST098 * VAR21 * y + VAR23 * (CONST033 * VAR16 + CONST083 * VAR08 * y) + VAR25 * (CONST052 * VAR08 * VAR16 + CONST067 * VAR14 + CONST083 * VAR06 * y) + z * (CONST017 * VAR12 + CONST033 * VAR06 * VAR16 + CONST067 * VAR08 * VAR14 + CONST100 * VAR04 * y) Y10 = (CONST073 * VAR08 * VAR22 - CONST102 * VAR04 * VAR26 - CONST103 * VAR02 + CONST103 * VAR20 + VAR13 * (CONST021 * VAR26 + CONST081 * VAR08) + VAR15 * (-CONST068 * VAR06 + CONST068 * VAR24) + VAR17 * ( CONST020 * VAR08 * VAR24 + CONST020 * VAR22 + CONST082 * VAR04 + CONST082 * VAR06 * VAR26)) Y11 = VAR14 * (CONST062 * VAR08 * z - CONST075 * VAR25) + VAR16 * (- CONST057 * VAR06 * z - CONST063 * VAR08 * VAR25 + CONST072 * VAR23 ) + y * (CONST012 * VAR21 + CONST076 * VAR06 * VAR25 + CONST084 * VAR04 * z + CONST101 * VAR08 * VAR23) Y12 = (CONST007 * VAR02 + CONST007 * VAR20 + CONST030 * VAR04 * VAR26 + CONST053 * VAR08 * VAR22 + CONST091 * VAR06 * VAR24 + VAR15 * ( CONST025 * VAR06 + CONST025 * VAR24 + CONST032 * VAR08 * VAR26) + VAR17 * (CONST048 * VAR06 * VAR26 + CONST048 * VAR08 * VAR24 + CONST085 * VAR04 + CONST085 * VAR22)) Y13 = VAR16 * (CONST014 * VAR08 * VAR25 + CONST019 * VAR23 + CONST056 * VAR06 * z) + y * (CONST036 * VAR08 * VAR23 + CONST047 * VAR21 - CONST077 * VAR06 * VAR25 + CONST078 * VAR04 * z) Y14 = (CONST008 * VAR02 + CONST041 * VAR20 + CONST088 * VAR04 * VAR26 - CONST088 * VAR08 * VAR22 + VAR17 * (-CONST037 * VAR06 * VAR26 + CONST037 * VAR08 * VAR24 + CONST088 * VAR04 - CONST088 * VAR22)) Y15 = y * (-CONST040 * VAR06 * VAR25 + CONST061 * VAR08 * VAR23 + CONST074 * VAR04 * z - CONST095 * VAR21) Y16 = (CONST010 * VAR02 + CONST010 * VAR20 + CONST045 * VAR06 * VAR24 + CONST074 * VAR04 * VAR26 + CONST074 * VAR08 * VAR22) output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset < output_numel) tl.store(output_ptr + output_row_offset + 1, Y01, mask= output_row_offset + 1 < output_numel) tl.store(output_ptr + output_row_offset + 2, Y02, mask= output_row_offset + 2 < output_numel) tl.store(output_ptr + output_row_offset + 3, Y03, mask= output_row_offset + 3 < output_numel) tl.store(output_ptr + output_row_offset + 4, Y04, mask= output_row_offset + 4 < output_numel) tl.store(output_ptr + output_row_offset + 5, Y05, mask= output_row_offset + 5 < output_numel) tl.store(output_ptr + output_row_offset + 6, Y06, mask= output_row_offset + 6 < output_numel) tl.store(output_ptr + output_row_offset + 7, Y07, mask= output_row_offset + 7 < output_numel) tl.store(output_ptr + output_row_offset + 8, Y08, mask= output_row_offset + 8 < output_numel) tl.store(output_ptr + output_row_offset + 9, Y09, mask= output_row_offset + 9 < output_numel) tl.store(output_ptr + output_row_offset + 10, Y10, mask= output_row_offset + 10 < output_numel) tl.store(output_ptr + output_row_offset + 11, Y11, mask= output_row_offset + 11 < output_numel) tl.store(output_ptr + output_row_offset + 12, Y12, mask= output_row_offset + 12 < output_numel) tl.store(output_ptr + output_row_offset + 13, Y13, mask= output_row_offset + 13 < output_numel) tl.store(output_ptr + output_row_offset + 14, Y14, mask= output_row_offset + 14 < output_numel) tl.store(output_ptr + output_row_offset + 15, Y15, mask= output_row_offset + 15 < output_numel) tl.store(output_ptr + output_row_offset + 16, Y16, mask= output_row_offset + 16 < output_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_8.py
1af50369-d016-482c-9e4e-234c967c3ae6
dropout.py
daemyung/practice-triton
dropout.py
27f727726f1507c8380a1c11751d851c7c4a07ce
0
@staticmethod @triton.jit def backward(grad_input_ptr, grad_output_ptr, output_ptr, size, p, block_size: tl.constexpr): pid = tl.program_id(0) offset = pid * block_size grad_input_block_ptr = tl.make_block_ptr(grad_input_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)) grad_output_block_ptr = tl.make_block_ptr(grad_output_ptr, shape=(size, ), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)) output_block_ptr = tl.make_block_ptr(output_ptr, shape=(size,), strides =(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)) grad_output = tl.load(grad_output_block_ptr, boundary_check=(0,)) output = tl.load(output_block_ptr, boundary_check=(0,)) condition = output > 0.0 grad_input = tl.where(condition, grad_output * (1 / (1 - p)), 0.0) tl.store(grad_input_block_ptr, grad_input, boundary_check=(0,))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/dropout.py
b2c5e42d-4d18-4d41-be89-0859575d1a55
lightningAttention2.py
Computational-Machine-Intelligence/LeetDecoding
leetDecoding/methods/lightningAttention2.py
1b545c2f5bacc155255250d1f70ac9484744559a
0
@triton.jit def _fwd_kernel_without_s(Q, K, V, Out, b: tl.constexpr, h: tl.constexpr, n: tl.constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl.constexpr, NUM_BLOCK: tl.constexpr, BLOCK_MODEL: tl.constexpr): off_bh = tl.program_id(0) off_h = off_bh % h off_e = tl.program_id(1) qk_offset = off_bh * n * d v_offset = off_bh * n * e o_offset = off_bh * n * e e_offset = off_e * BLOCK_MODEL Q_block_ptr = Q + qk_offset + tl.arange(0, d)[None, :] K_trans_block_ptr = K + qk_offset + tl.arange(0, d)[:, None] V_block_ptr = V + v_offset + e_offset + tl.arange(0, BLOCK_MODEL)[None, :] O_block_ptr = Out + o_offset + e_offset + tl.arange(0, BLOCK_MODEL)[None, : ] off_block = tl.arange(0, BLOCK) index = off_block[:, None] - off_block[None, :] diag_decay = tl.where(index >= 0, 1, 0) kv = tl.zeros([d, BLOCK_MODEL], dtype=tl.float32) for i in range(NUM_BLOCK): q = tl.load(Q_block_ptr + off_block[:, None] * d, mask=off_block[:, None] < n, other=0.0).to(tl.float32) k_trans = tl.load(K_trans_block_ptr + off_block[None, :] * d, mask= off_block[None, :] < n, other=0.0).to(tl.float32) v = tl.load(V_block_ptr + off_block[:, None] * e, mask=off_block[:, None] < n, other=0.0).to(tl.float32) qk = tl.dot(q, k_trans) * diag_decay o_intra = tl.dot(qk, v) o_inter = tl.dot(q, kv) o = o_intra + o_inter tl.store(O_block_ptr + off_block[:, None] * e, o.to(O_block_ptr. dtype.element_ty), mask=off_block[:, None] < n) kv = kv + tl.dot(k_trans, v) off_block += BLOCK
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/Computational-Machine-Intelligence/LeetDecoding/blob/1b545c2f5bacc155255250d1f70ac9484744559a/leetDecoding/methods/lightningAttention2.py
832c1247-97a6-4951-b770-b50148fc3427
test_inductor.py
triton-lang/kernels
test/test_inductor.py
eeeebdd8be7d13629de22d600621e6234057eed3
0
@triton.jit def triton_(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 64 tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp11 = tl.load(in_ptr2 + x0, xmask) tmp13 = tl.load(in_ptr3 + x0, xmask) _tmp17 = tl.zeros([XBLOCK, RBLOCK], tl.float32) + 0 for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_last', other=0) tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = tl.sqrt(tmp5) tmp7 = 1 / tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tmp2 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 _tmp17 = tl.where(rmask & xmask, _tmp17 + tmp14, _tmp17) tl.store(in_out_ptr0 + (r2 + 4096 * x3 + tl.zeros([XBLOCK, RBLOCK], tl.int32)), tmp14, rmask & xmask) tmp17 = tl.sum(_tmp17, 1)[:, None] tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tl.store(in_out_ptr1 + (x3 + tl.zeros([XBLOCK, 1], tl.int32)), tmp19, xmask )
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/test/test_inductor.py
a2322d06-016c-4be9-920f-2be617b54e4c
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8]], key=['BT']) @triton.jit def chunk_local_cumsum_scalar_kernel(s, o, offsets, indices, T: tl. constexpr, H: tl.constexpr, BT: tl.constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T if HEAD_FIRST: p_s = tl.make_block_ptr(s + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) p_o = tl.make_block_ptr(o + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) else: p_s = tl.make_block_ptr(s + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) p_o = tl.make_block_ptr(o + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) b_s = tl.load(p_s, boundary_check=(0,)).to(tl.float32) b_o = tl.cumsum(b_s, axis=0) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0,))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency", "Single Instance", "Latency Sensitive" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py