uuid
stringlengths
36
36
file_name
stringlengths
5
50
repo_name
stringclasses
110 values
file_path
stringlengths
7
112
commit_hash
stringclasses
110 values
starcount
int64
0
0
input
stringlengths
39
33.8k
category
dict
licenses
sequencelengths
1
2
github_url
stringlengths
94
193
c09cde34-9ffc-4939-be5b-e95b46999a0f
softmax_loop_along_reduce_axis_v2.py
iclementine/optimize_softmax
softmax_loop_along_reduce_axis_v2.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_loop_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr): pid_m = tl.program_id(0) m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype. element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) m = tl.maximum(m, inp) m = tl.max(m, 0) z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) z += e z = tl.sum(z, 0) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) out = e / z output_ptrs = output_ptr + offset tl.store(output_ptrs, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_loop_along_reduce_axis_v2.py
d41cc477-8ab0-4b21-a14c-3997eac771ce
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv6/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BT': BT, 'BK': BK}, num_warps= num_warps) for BT in [16, 32, 64] for BK in [32, 64] for num_warps in [ 1, 2, 4, 8]], key=['K']) @triton.jit def fused_recurrent_rwkv6_bwd_kernel_dw(q, k, dq, dk, dw, offsets, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, REVERSE: tl.constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr): i_k, i_nh = tl.program_id(0), tl.program_id(1) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T T = eos - bos NT = tl.cdiv(T, BT) o_i = tl.arange(0, BT) m_i = tl.where(o_i[:, None] >= o_i[None, :], 1.0, 0.0 ) if not REVERSE else tl.where(o_i[:, None] <= o_i[None, :], 1.0, 0.0) b_z = tl.zeros([BK], dtype=tl.float32) i_t = 0 if not REVERSE else NT - 1 for _ in range(NT): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_nh * T * K, (T, K), (K, 1), (i_t * BT + 1, i_k * BK), (BT, BK), (1, 0)) p_dq = tl.make_block_ptr(dq + i_nh * T * K, (T, K), (K, 1), ( i_t * BT + 1, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_nh * T * K, (T - 1, K), (K, 1), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_nh * T * K, (T - 1, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dw = tl.make_block_ptr(dw + i_nh * T * K, (T, K), (K, 1), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + 1, i_k * BK), (BT, BK), (1, 0)) p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + 1, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T - 1, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T - 1, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dw = tl.make_block_ptr(dw + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)).to(tl.float32) b_dq = tl.load(p_dq, boundary_check=(0, 1)).to(tl.float32) b_k = tl.load(p_k, boundary_check=(0, 1)).to(tl.float32) b_dk = tl.load(p_dk, boundary_check=(0, 1)).to(tl.float32) b_dw = b_q * b_dq * scale - b_k * b_dk b_c = b_z[None, :] + tl.dot(m_i, b_dw, allow_tf32=False) tl.store(p_dw, b_c.to(p_dw.dtype.element_ty), boundary_check=(0, 1)) if i_t >= 0: b_z += tl.sum(b_dw, 0) i_t += 1 if not REVERSE else -1
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/fused_recurrent.py
ad644dd8-8dd8-4e51-b816-115a5a717ad7
hilbert.py
Kitsunetic/space-filling-pytorch
space_filling_pytorch/functional/hilbert.py
0de955ad1036973ee7506c5a0124c208acec722d
0
@triton.jit def _calculate_hilbert_distance(fx, fy, fz, space_size): x = ((fx + 1) / 2 * space_size).to(tl.int64) y = ((fy + 1) / 2 * space_size).to(tl.int64) z = ((fz + 1) / 2 * space_size).to(tl.int64) x = tl.minimum(tl.maximum(x, 0), space_size - 1) y = tl.minimum(tl.maximum(y, 0), space_size - 1) z = tl.minimum(tl.maximum(z, 0), space_size - 1) for i in tl.static_range(15, 0, -1): q = 1 << i p = q - 1 x ^= tl.where(x & q, p, 0) cond = y & q t = (x ^ y) & p x ^= tl.where(cond, p, t) y ^= tl.where(cond, 0, t) cond = z & q t = (x ^ z) & p x ^= tl.where(cond, p, t) z ^= tl.where(cond, 0, t) y ^= x z ^= y t = 0 for i in tl.static_range(15, 0, -1): q = 1 << i t ^= tl.where(z & q, q - 1, 0) x ^= t y ^= t z ^= t ret = 0 for i in tl.static_range(0, 16): q = 1 << i ret |= (x & q) << 2 * i + 2 ret |= (y & q) << 2 * i + 1 ret |= (z & q) << 2 * i + 0 return ret
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/hilbert.py
f6eb3989-cac5-479b-973d-97694fdb700e
mlstm_scan.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_scan.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def roll(y, dim=0): _, rh2, _ = tl.associative_scan((1 + 0 * y, 0.0 * y, y), dim, roll_op) return rh2
{ "Data Type": [], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py
28746acd-0067-40c2-9349-3abb547bd86d
chunk_h_split.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h_split.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None, 'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for num_warps in [2, 4, 8] for num_stages in [2, 3]], key=['BT', 'USE_G', 'USE_GK', 'USE_GV']) @triton.jit def chunk_bwd_kernel_dh_split(q, g, gk, gv, do, dht, dhs, dhr, dh0, offsets, split_indices, scale, T: tl.constexpr, S: tl.constexpr, HQ: tl. constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl. constexpr, BK: tl.constexpr, BV: tl.constexpr, NG: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr, STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_sh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_ss, i_hq = i_sh // HQ, i_sh % HQ if USE_OFFSETS: i_n, i_s = tl.load(split_indices + i_ss * 2).to(tl.int32), tl.load( split_indices + i_ss * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NS = tl.cdiv(T, S) else: NS = tl.cdiv(T, S) i_n, i_s = i_ss // NS, i_ss % NS bos, eos = i_n * T, i_n * T + T i_nh = i_n * HQ + i_hq i_ng, i_h = i_nh // NG, i_hq // NG b_dh = tl.zeros([BK, BV], dtype=tl.float32) if i_s == NS - 1: if USE_FINAL_STATE_GRADIENT: p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), ( i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_dh += tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32) p_dhr = tl.make_block_ptr(dhr + i_sh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dhr, b_dh.to(p_dhr.dtype.element_ty), boundary_check=(0, 1)) for i_t in range(tl.cdiv(min(i_s * S + S, T), BT) - 1, tl.cdiv(i_s * S, BT) - 1, -1): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (K, T), (1, HQ * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), ( HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale).to(b_q.dtype) b_do = tl.load(p_do, boundary_check=(0, 1)) last_idx = min(i_t * BT + BT, T) - 1 if USE_G: if HEAD_FIRST: p_g = g + i_ng * T + i_t * BT + tl.arange(0, BT) p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT) b_g_last = tl.load(g + i_ng * T + last_idx) else: p_g = g + (bos + i_t * BT + tl.arange(0, BT)) * H + i_h b_g_last = tl.load(g + (bos + last_idx) * H + i_h) b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0) b_q = (b_q * tl.exp(b_g)[None, :]).to(b_q.dtype) b_dh *= tl.exp(b_g_last) if USE_GK: if HEAD_FIRST: p_gk = tl.make_block_ptr(gk + i_ng * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = gk + (i_ng * T + last_idx ) * K + i_k * BK + tl.arange(0, BK) else: p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = gk + (bos + last_idx ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_q = (b_q * tl.exp(b_gk)).to(b_q.dtype) b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) < K, other=0.0) b_dh *= tl.exp(b_gk_last)[:, None] if USE_GV: if HEAD_FIRST: p_gv = tl.make_block_ptr(gv + i_ng * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = gv + (i_ng * T + last_idx ) * V + i_v * BV + tl.arange(0, BV) else: p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = gv + (bos + last_idx ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV) b_gv = tl.load(p_gv, boundary_check=(0, 1)) b_do = (b_do * tl.exp(b_gv)).to(b_do.dtype) b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) < V, other=0.0) b_dh *= tl.exp(b_gv_last)[None, :] b_dh += tl.dot(b_q, b_do) if NS > 1: p_dhs = tl.make_block_ptr(dhs + i_sh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dhs, b_dh.to(p_dhs.dtype.element_ty), boundary_check=(0, 1)) elif STORE_INITIAL_STATE_GRADIENT: p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_split.py
66cc7094-2f0e-4a81-9d3e-fc4e2f0954ad
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_fwd_kernel_K(q, k, z, h, o, A, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl. constexpr): i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_p = tl.maximum(i_t * BT - 1, 0) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_o = tl.zeros([BT, BV], dtype=tl.float32) b_A = tl.zeros([BT, BT], dtype=tl.float32) for i_k in range(tl.cdiv(K, BK)): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (K, V), ( s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale).to(b_q.dtype) b_k = tl.load(p_k, boundary_check=(0, 1)) b_h = tl.load(p_h, boundary_check=(0, 1)) b_o += tl.dot(b_q, b_h, allow_tf32=False) b_A += tl.dot(b_q, b_k, allow_tf32=False) p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_z = tl.load(p_z, boundary_check=(0, 1)) p_zp = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), (i_p * V + i_v * BV,), (BV,), (0,)) b_zp = tl.load(p_zp, boundary_check=(0,)) b_o = b_o * tl.exp(b_zp[None, :] - b_z) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) b_A = tl.where(m_s, b_A, 0.0) if i_v == 0: tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
036c35d5-8ac1-4fea-a594-dfc3a88a9bfa
rmsnorm.py
agostini01/rms-norm-exercise
optimized/rmsnorm.py
0884cc52a8cde60ff8af0fa58d5b5330ae5db87a
0
@triton.jit def rms_norm(output_ptr, input_ptr, weights_ptr, stride, N, eps, DTYPE: tl. constexpr, BLOCK_SIZE: tl.constexpr): """ RMS Norm Triton Kernel Params: - input_ptr (tensor): Pointer to Input - output_ptr (tensor): Pointer to Output - weights_ptr (tensor): Pointer to Scale applied to the normalized input - stride (int): Stride to be applied when accessing elements in the input and output tensors - N (int): Number of elements to be reduced == input_ptr.shape[-1] - eps (half/float): Epsilon value added to the variance to prevent division by zero - BLOCK_SIZE (constexpr): Size of the block for computation, provided as a compile-time constant Usage: _rms_norm[grid, block](x, y, self.w, input_stride , N, eps, BLOCK_SIZE) """ row = tl.program_id(0) output_ptr += row * stride input_ptr += row * stride tmp = 0 tmp = tl.zeros([BLOCK_SIZE], dtype=DTYPE) for offset in range(0, N, BLOCK_SIZE): cols = offset + tl.arange(0, BLOCK_SIZE) mask = cols < N a = tl.load(input_ptr + cols, mask=mask, other=0.0).to(DTYPE) tmp += a * a rms = tl.sqrt(tl.sum(tmp) / N + eps) for offset in range(0, N, BLOCK_SIZE): cols = offset + tl.arange(0, BLOCK_SIZE) mask = cols < N x = tl.load(input_ptr + cols, mask=mask, other=0.0, eviction_policy ='evict_first').to(DTYPE) w = tl.load(weights_ptr + cols, mask=mask) x_hat = x / rms y = x_hat * w tl.store(output_ptr + cols, y, mask=mask)
{ "Data Type": [ "fp32", "fp16", "bf16" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/agostini01/rms-norm-exercise/blob/0884cc52a8cde60ff8af0fa58d5b5330ae5db87a/optimized/rmsnorm.py
55da2e67-0be0-4dfc-aa0d-22b2e71956a3
softmax.py
sustcsonglin/flash-linear-attention
fla/ops/utils/softmax.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['D']) @triton.jit def softmax_fwd_kernel(x, p, D: tl.constexpr, B: tl.constexpr): i_n = tl.program_id(0) o_d = tl.arange(0, B) m_d = o_d < D b_x = tl.load(x + i_n * D + o_d, mask=m_d, other=-float('inf')) b_m = tl.max(b_x, 0) b_x = tl.exp(b_x - b_m) b_p = b_x / tl.sum(b_x, 0) tl.store(p + i_n * D + o_d, b_p.to(p.dtype.element_ty), mask=m_d)
{ "Data Type": [ "fp32", "fp16", "bf16" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/softmax.py
ff97fdfb-b204-4483-bf5b-9713cad36067
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/based/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def parallel_based_bwd_kernel(q, k, v, do, dz, dq, dk, dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BTL: tl.constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr): i_kv, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) NV = tl.cdiv(V, BV) i_k = i_kv // NV i_v = i_kv % NV i_h = i_bh % H _parallel_based_bwd_dq(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dq, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale, BTL=BTL, BTS=BTS, BK=BK, BV=BV, K=K, V=V) tl.debug_barrier() _parallel_based_bwd_dkv(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dk, dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale, BTL, BTS, BK, BV, K, V)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/parallel.py
ea89f51b-43ee-4c4b-b14e-5c296ca54f47
cross_entropy_loss.py
tdrussell/qlora-pipe
kernels/cross_entropy_loss.py
6fb7c8eeae52a0e36c41f00628985f29d8330684
0
@triton.heuristics({'DO_LOGIT_SCALING': lambda args: args['DO_LOGIT_SCALING']}) @triton.jit def _chunked_cross_entropy_forward(logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, VOCAB_SIZE: tl.constexpr, N_CHUNKS: tl. constexpr, BLOCK_SIZE: tl.constexpr, DO_LOGIT_SCALING: tl.constexpr, LOGIT_SCALE: tl.constexpr): """ 256K vocab divided in 4 chunks |-65536-| |-65536-| |-65536-| |-65536-| |-------| |-------| |-------| |-------| |-------| |-------| |-------| |-------| If y == 0: CE_i = 0 If y == 1: CE_i = logsumexp - x Notice we can do logsumexp for each chunk and then logsumexp[chunk_sum(logsumexp)] == logsumexp chunk_sum = log[chunk_sum(logsumexp)] = log[exp(logsumexp(a)) + ... + exp(logsumexp(z))] = log[exp(log[sum(exp(a))]) + ... + exp(log[sum(exp(z))])] = log[sum(exp(a)) + ... + sum(exp(z))] = logsumexp(x) This means we can perform a logsumexp for each chunk, then do a final logsumexp reduction! Ie do: logsumexp(chunked_logsumexp) - x """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) logits_ptr += row_idx * logits_row_stride.to(tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx col_offsets = chunk_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float('inf') ).to(tl.float32) if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits pass c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) if chunk_idx == 0: if label_idx != -100: x = tl.load(logits_ptr + label_idx).to(tl.float32) if DO_LOGIT_SCALING: x = LOGIT_SCALE * x pass loss = -1.0 * x else: loss = 0.0 tl.store(loss_ptr, loss) pass tl.store(logsumexp_ptr, logsumexp)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/tdrussell/qlora-pipe/blob/6fb7c8eeae52a0e36c41f00628985f29d8330684/kernels/cross_entropy_loss.py
d6089944-ecae-4155-ae54-51eac263e597
inout_tensor_parallel.py
gmgu/study-triton
2_inout_tensor/inout_tensor_parallel.py
3a9a24fd3f1de3e7465535ffe72f6deac8a419bd
0
@triton.jit def copy_kernel(in_ptr, out_ptr, n, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n x = tl.load(in_ptr + offsets, mask=mask) y = tl.store(out_ptr + offsets, x, mask=mask)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/2_inout_tensor/inout_tensor_parallel.py
6f93083e-9dd4-4587-99ae-73b0d5ae4397
triton_fused_attention.py
pytorch-labs/tritonbench
tritonbench/kernels/triton_fused_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.jit def _attn_fwd_compute_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl. constexpr, STAGE: tl.constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr): start_m = pid off_z = off_hz // H off_h = off_hz % H qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64 ) * stride_qh K_block_ptr = None V_block_ptr = None Q_block_ptr = None O_block_ptr = None if not ENABLE_TMA: Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0)) v_order: tl.constexpr = (0, 1 ) if V.dtype.element_ty == tl.float8e5 else (1, 0) V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, HEAD_DIM), order=v_order) K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=( HEAD_DIM, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0 ), block_shape=(HEAD_DIM, BLOCK_N), order=(0, 1)) O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0)) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0 acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) qk_scale = sm_scale qk_scale *= 1.44269504 with tl.async_task([0]): if ENABLE_TMA: q = tl._experimental_descriptor_load(desc_q, [(qvk_offset // stride_qm + start_m * BLOCK_M).to(tl.int32), 0], [BLOCK_M, HEAD_DIM], Q.dtype.element_ty) else: q = tl.load(Q_block_ptr) if STAGE & 1: acc, l_i, m_i = _attn_fwd_inner_ws(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, desc_k, desc_v, Q, qvk_offset, stride_kn, stride_vn, stride_vk, start_m, qk_scale, BLOCK_M, HEAD_DIM, BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5, ENABLE_TMA, LOOP_SCHEDULE) if STAGE & 2: acc, l_i, m_i = _attn_fwd_inner_ws(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, desc_k, desc_v, Q, qvk_offset, stride_kn, stride_vn, stride_vk, start_m, qk_scale, BLOCK_M, HEAD_DIM, BLOCK_N, 2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl. float8e5, ENABLE_TMA, LOOP_SCHEDULE) with tl.async_task([1, 2]): m_i += tl.math.log2(l_i) acc = acc / l_i[:, None] m_ptrs = M + off_hz * N_CTX + offs_m tl.store(m_ptrs, m_i) if ENABLE_TMA: tl._experimental_descriptor_store(desc_o, acc.to(Out.type. element_ty), [(qvk_offset // stride_om + start_m * BLOCK_M) .to(tl.int32), 0]) else: tl.store(O_block_ptr, acc.to(Out.type.element_ty))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py
899ada78-0cbe-42a7-835e-eb835b7dce73
kernel_benchmark.py
ruikangliu/FlatQuant
benchmarks/kernel_benchmark.py
9d3032065f1688cb3f71ebc8166df6d91440e871
0
@triton.jit def quant_kernel(src_ptr, stride_srcb, stride_srcm, stride_srcn, dst_ptr, stride_dstb, stride_dstm, stride_dstn, output_scale, B, M: tl.constexpr, N: tl.constexpr, np2_M: tl.constexpr, np2_N: tl.constexpr): """ quant fp16 tensor to int4 """ batch_id = tl.program_id(axis=0) + tl.program_id(axis=1) * tl.num_programs( axis=0) index_rows = tl.arange(0, np2_M) index_cols = tl.arange(0, np2_N) src_ptrs = src_ptr + batch_id * stride_srcb.to(tl.int64) + index_rows[:, None] * stride_srcm + index_cols[None, :] * stride_srcn src_mask = (index_rows[:, None] < M) & (index_cols[None, :] < N) src = tl.load(src_ptrs, mask=src_mask, other=0.0) abs_src_val = tl.abs(src) max_src_val = tl.max(abs_src_val) scale = max_src_val / 7.0 quant_val = libdevice.llrint(src / scale) quant_val = max(-8, min(quant_val, 7)) quant_val = quant_val.reshape(np2_M, np2_N // 2, 2, can_reorder=False) quant_val_even, quant_val_odd = quant_val.split() quant_val_odd = quant_val_odd << 4 res = tl.zeros((np2_M, np2_N // 2), dtype=tl.uint8) res = res | quant_val_odd & 240 res = res | quant_val_even & 15 offs_resm = tl.arange(0, np2_M) offs_resn = tl.arange(0, np2_N // 2) dst_ptrs = dst_ptr + stride_dstb.to(tl.int64 ) * batch_id + stride_dstm * offs_resm[:, None ] + stride_dstn * offs_resn[None, :] res_mask = (offs_resm[:, None] < M) & (offs_resn[None, :] < N // 2) tl.store(dst_ptrs, res, mask=res_mask) tl.store(output_scale + batch_id, scale)
{ "Data Type": [ "fp16", "uint8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/ruikangliu/FlatQuant/blob/9d3032065f1688cb3f71ebc8166df6d91440e871/benchmarks/kernel_benchmark.py
e91bc5a6-77ff-44ee-8f09-c99d4cd87aa7
quant_triton.py
CompendiumLabs/ziggy
ziggy/backends/quant_triton.py
bd12fe50ca3475743f62ae26d4c184108e441e03
0
@triton.jit def quantize_kernel(X, Y, N, K, K1, scale, zero_point, BITS: tl.constexpr, QFACT: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl. constexpr, BLOCK_SIZE_K1: tl.constexpr): dtype = X.dtype.element_ty scale_ty = tl.full((), scale, dtype=dtype) zero_point_ty = tl.full((), zero_point, dtype=dtype) QMASK = (1 << BITS) - 1 QMASK_FLT = tl.full((), QMASK, dtype=dtype) pid_n = tl.program_id(0) pid_k = tl.program_id(1) bk = tl.arange(0, BLOCK_SIZE_K) bk1 = tl.arange(0, BLOCK_SIZE_K1) x_shift = BITS * (bk % QFACT) rn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) rk = pid_k * BLOCK_SIZE_K + bk rk1 = pid_k * BLOCK_SIZE_K1 + bk1 mask_x = rn[:, None] < N mask_y = rn[:, None] < N X1 = X + (rn[:, None] * K + rk[None, :]) Y1 = Y + (rn[:, None] * K1 + rk1[None, :]) x = tl.load(X1, mask=mask_x) xf = clamp(x / scale_ty + zero_point_ty, 0.0, QMASK_FLT) xi = tl.math.rint(xf).to(tl.uint8) xq = xi << x_shift mat = tl.reshape(xq, (BLOCK_SIZE_N, BLOCK_SIZE_K1, QFACT)) out = tl.sum(mat, axis=2) tl.store(Y1, out, mask=mask_y)
{ "Data Type": [ "fp32", "fp16", "bf16", "uint8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/CompendiumLabs/ziggy/blob/bd12fe50ca3475743f62ae26d4c184108e441e03/ziggy/backends/quant_triton.py
55dad000-aa04-4d5a-bea5-663140e7161a
k_dropout.py
cpuhrsch/torchfused
torchfused/triton/k_dropout.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.autotune(configs=_k_configs, key=['N']) @triton.jit def k_dropout_fw(Y, X, BIAS, SEEDS, stride, N, p, **META): """ Apply dropout on an input tensor Y : Output (M, N) X : Input (M, N) S : Seeds (M,) p : dropout probability """ BLOCK_SIZE = META['BLOCK_SIZE'] row = tl.program_id(axis=0) col = tl.program_id(axis=1) offsets = row * stride + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) < N x_ptrs = X + offsets x = tl.load(x_ptrs, mask=mask) if META['USE_BIAS']: b_ptrs = BIAS + col * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) b = tl.load(b_ptrs, mask=mask) x += b if META['ACTIVATION']: x = META['ACTIVATION'](x) if p > 0.0: output = _drop_and_scale(SEEDS, row, p, offsets, x) else: output = x y_ptrs = Y + offsets tl.store(y_ptrs, output, mask=mask)
{ "Data Type": [], "Functionality": [ "Elementwise Operations", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_dropout.py
d89888a4-f831-415d-9ee8-3c77f4fc0d29
awq_triton.py
Charlie-XIAO/sparse-vllm
vllm/model_executor/layers/quantization/awq_triton.py
d228909a30b0c245c35417fb7d2acdf9a3690042
0
@triton.jit def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, group_size, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, SPLIT_K: tl.constexpr): pid = tl.program_id(axis=0) pid_z = tl.program_id(1) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) pid_m = pid // num_pid_n pid_n = pid % num_pid_n accumulator_dtype = c_ptr.type.element_ty accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype= accumulator_dtype) reverse_awq_order_tensor = ((tl.arange(0, 2) * 4)[None, :] + tl.arange( 0, 4)[:, None]).reshape(8) shifts = reverse_awq_order_tensor * 4 shifts = tl.broadcast_to(shifts[None, :], (BLOCK_SIZE_K * (BLOCK_SIZE_N // 8), 8)) shifts = tl.reshape(shifts, (BLOCK_SIZE_K, BLOCK_SIZE_N)) offsets_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) masks_am = offsets_am < M offsets_bn = pid_n * (BLOCK_SIZE_N // 8) + tl.arange(0, BLOCK_SIZE_N // 8) masks_bn = offsets_bn < N // 8 offsets_zn = pid_n * (BLOCK_SIZE_N // 8) + tl.arange(0, BLOCK_SIZE_N // 8) masks_zn = offsets_zn < N // 8 offsets_sn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) masks_sn = offsets_sn < N offsets_k = pid_z * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K) offsets_a = K * offsets_am[:, None] + offsets_k[None, :] offsets_b = N // 8 * offsets_k[:, None] + offsets_bn[None, :] a_ptrs = a_ptr + offsets_a b_ptrs = b_ptr + offsets_b for k in range(0, tl.cdiv(K, BLOCK_SIZE_K * SPLIT_K)): masks_k = offsets_k < K masks_a = masks_am[:, None] & masks_k[None, :] a = tl.load(a_ptrs, mask=masks_a) masks_b = masks_k[:, None] & masks_bn[None, :] b = tl.load(b_ptrs, mask=masks_b) b = tl.interleave(b, b) b = tl.interleave(b, b) b = tl.interleave(b, b) offsets_szk = (BLOCK_SIZE_K * SPLIT_K * k + pid_z * BLOCK_SIZE_K ) // group_size + tl.arange(0, 1) offsets_z = N // 8 * offsets_szk[:, None] + offsets_zn[None, :] masks_zk = offsets_szk < K // group_size masks_z = masks_zk[:, None] & masks_zn[None, :] zeros_ptrs = zeros_ptr + offsets_z zeros = tl.load(zeros_ptrs, mask=masks_z) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.broadcast_to(zeros, (BLOCK_SIZE_K, BLOCK_SIZE_N)) offsets_s = N * offsets_szk[:, None] + offsets_sn[None, :] masks_sk = offsets_szk < K // group_size masks_s = masks_sk[:, None] & masks_sn[None, :] scales_ptrs = scales_ptr + offsets_s scales = tl.load(scales_ptrs, mask=masks_s) scales = tl.broadcast_to(scales, (BLOCK_SIZE_K, BLOCK_SIZE_N)) b = b >> shifts & 15 zeros = zeros >> shifts & 15 b = (b - zeros) * scales b = b.to(c_ptr.type.element_ty) accumulator = tl.dot(a, b, accumulator, out_dtype=accumulator_dtype) offsets_k += BLOCK_SIZE_K * SPLIT_K a_ptrs += BLOCK_SIZE_K * SPLIT_K b_ptrs += BLOCK_SIZE_K * SPLIT_K * (N // 8) c = accumulator.to(c_ptr.type.element_ty) offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + pid_z * N * M + N * offs_cm[:, None] + offs_cn[None, :] c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) tl.store(c_ptrs, c, mask=c_mask)
{ "Data Type": [ "uint8", "fp32" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/quantization/awq_triton.py
5b54c71f-c6fd-49ad-b31b-72956c7bdb18
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_chunk_linear_attn_fwd_kernel(q, k, v, o, h0, ht, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B, H, T, K: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, CHECK: tl.constexpr): i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_h = tl.zeros([BK, BV], dtype=tl.float32) p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (0, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k * BK, 0), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (0, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + (i_bh + i_k * B * H) * s_v_h, (T, V), ( s_v_t, s_v_d), (0, i_v * BV), (BT, BV), (1, 0)) if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) for i in range(0, tl.cdiv(T, BT)): b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale).to(b_q.dtype) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_s = tl.dot(b_q, b_k, allow_tf32=False) b_s = tl.where(m_s, b_s, 0) b_o = tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False) if CHECK and i == 0: b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) b_h = b_h + tl.dot(b_k, b_v, allow_tf32=False) else: b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) b_h = b_h + tl.dot(b_k, b_v, allow_tf32=False) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) p_q = tl.advance(p_q, (BT, 0)) p_k = tl.advance(p_k, (0, BT)) p_v = tl.advance(p_v, (BT, 0)) p_o = tl.advance(p_o, (BT, 0)) if STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/fused_chunk.py
75455e8e-2630-4564-b24a-6cc4c167f9ee
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_bwd_kernel_intra_KV(v, z, A, do, dv, s_v_h, s_v_t, s_v_d, T: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl.constexpr): i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_t, i_i = i_c // NC, i_c % NC p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0)) p_zn = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), ((i_t * BT + i_i * BC + BC - 1) * V + i_v * BV,), (BV,), (0,)) b_zn = tl.load(p_zn, boundary_check=(0,)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_dv = tl.zeros([BC, BV], dtype=tl.float32) for i_j in range(i_i + 1, NC): p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0)) p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_do = (b_do * tl.exp(b_zn[None, :] - b_z)).to(b_do.dtype) b_A = tl.load(p_A, boundary_check=(0, 1)) b_dv += tl.dot(b_A, b_do, allow_tf32=False) b_dv *= tl.exp(b_v - b_zn[None, :]) o_i = tl.arange(0, BC) for j in range(0, BC): p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (1,), ((i_t * BT + i_i * BC + j) * V + i_v * BV,), (BV,), (0,)) p_A = tl.make_block_ptr(A + i_bh * T * BT, (T * BT,), (1,), ((i_t * BT + i_i * BC + j) * BT + i_i * BC,), (BC,), (0,)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T * V,), (1,), ((i_t * BT + i_i * BC + j) * V + i_v * BV,), (BV,), (0,)) b_A = tl.load(p_A, boundary_check=(0,)) b_z = tl.load(p_z, boundary_check=(0,)) b_do = tl.load(p_do, boundary_check=(0,)) m_i = o_i[:, None] <= j b_dv += tl.where(m_i, tl.exp(b_v - b_z[None, :]) * b_A[:, None] * b_do[None, :], 0.0) p_dv = tl.make_block_ptr(dv + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0)) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Attention Mechanisms", "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
2cc1ebc8-1e04-49be-a864-a110ad100feb
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_bwd_kernel_dp(v, rv, cv, pv, do, dp, s_qk_h, s_qk_t, s_qk_d, s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BV: tl.constexpr, BM: tl. constexpr, DV: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr): i_m, i_v, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) p_v = tl.make_block_ptr(v + i_bh * s_qk_h, (DV, T), (s_qk_d, s_qk_t), ( i_v * BV, 0), (BV, BT), (0, 1)) p_rv = tl.make_block_ptr(rv + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,), (i_m * BM,), (BM,), (0,)) p_cv = tl.make_block_ptr(cv + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (0, i_m * BM), (BT, BM), (1, 0)) p_pv = tl.make_block_ptr(pv + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (0, i_m * BM), (BT, BM), (1, 0)) p_do = tl.make_block_ptr(do + i_bh * s_qk_h, (T, DV), (s_qk_t, s_qk_d), (0, i_v * BV), (BT, BV), (1, 0)) p_dp = tl.make_block_ptr(dp + (i_v * n_bh + i_bh) * s_sk_h, (T, DM), ( s_sk_t, s_sk_m), (0, i_m * BM), (BT, BM), (1, 0)) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_hv = tl.zeros([BV, BM], dtype=tl.float32) for _ in range(NT): b_v = tl.load(p_v, boundary_check=(0, 1)) b_rv = tl.load(p_rv, boundary_check=(0,)) b_cv = tl.load(p_cv, boundary_check=(0, 1)) b_pv = tl.load(p_pv, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_inter = tl.dot(b_do, b_hv.to(b_do.dtype), allow_tf32=False) * b_rv[ None, :] b_intra = tl.dot(tl.where(m_s, tl.dot(b_do, b_v, allow_tf32=False), 0).to(b_v.dtype), b_cv, allow_tf32=False) b_dp = (b_inter + b_intra) * b_pv b_hv = b_hv * b_rv[None, :] + tl.dot(b_v, b_cv, allow_tf32=False) tl.store(p_dp, b_dp.to(p_dp.dtype.element_ty), boundary_check=(0, 1)) p_v = tl.advance(p_v, (0, BT)) p_rv = tl.advance(p_rv, (DM,)) p_cv = tl.advance(p_cv, (BT, 0)) p_pv = tl.advance(p_pv, (BT, 0)) p_do = tl.advance(p_do, (BT, 0)) p_dp = tl.advance(p_dp, (BT, 0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Backpropagation", "Elementwise Operations", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
46b97460-47fd-4e2b-8f02-0a3f885ed908
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _multi_head_jagged_flash_attention_bwd_kernel(q_ptr, k_ptr, v_ptr, o_ptr, offset_ptr, dq_ptr, dk_ptr, dv_ptr, do_ptr, delta_ptr, lse_ptr, stride_qh, stride_qm, stride_qd, stride_kh, stride_kn, stride_kd, stride_vh, stride_vn, stride_vd, stride_oh, stride_om, stride_od, stride_lse_h, stride_delta_h, stride_dq_h, stride_dq_m, stride_dq_d, stride_dk_h, stride_dk_n, stride_dk_d, stride_dv_h, stride_dv_n, stride_dv_d, stride_do_h, stride_do_m, stride_do_d, num_heads: tl. constexpr, max_seq_len: tl.constexpr, D: tl.constexpr, allow_tf32: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_D: tl. constexpr): pid_bh = tl.program_id(axis=1) pid_batch = pid_bh // num_heads pid_head = pid_bh % num_heads begin = tl.load(offset_ptr + pid_batch) end = tl.load(offset_ptr + pid_batch + 1) seqlen = tl.minimum(end - begin, max_seq_len) if seqlen == 0: return pid_n = tl.program_id(axis=0) offs_d = tl.arange(0, BLOCK_D) offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) offs_m = tl.arange(0, BLOCK_M) q_ptrs = q_ptr + pid_head * stride_qh + begin * stride_qm + (offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qd) k_ptrs = k_ptr + pid_head * stride_kh + begin * stride_kn + (offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kd) v_ptrs = v_ptr + pid_head * stride_vh + begin * stride_vn + (offs_n[:, None] * stride_vn + offs_d[None, :] * stride_vd) do_ptrs = do_ptr + pid_head * stride_do_h + begin * stride_do_m + ( offs_m[:, None] * stride_do_m + offs_d[None, :] * stride_do_d) k = tl.load(k_ptrs, mask=(offs_d[None, :] < D) & (offs_n[:, None] < seqlen), other=0.0) v = tl.load(v_ptrs, mask=(offs_d[None, :] < D) & (offs_n[:, None] < seqlen), other=0.0) dv = tl.zeros([BLOCK_N, BLOCK_D], dtype=tl.float32) dk = tl.zeros([BLOCK_N, BLOCK_D], dtype=tl.float32) for begin_m in range(0, seqlen, BLOCK_M): offs_m_curr = begin_m + offs_m q = tl.load(q_ptrs, mask=(offs_d[None, :] < D) & (offs_m_curr[:, None] < seqlen), other=0.0) qk = tl.dot(q, tl.trans(k), allow_tf32=allow_tf32) mn_mask = (offs_m_curr[:, None] < seqlen) & (offs_n[None, :] < seqlen) lse_i = tl.load(lse_ptr + pid_head * stride_lse_h + begin + offs_m_curr, mask=offs_m_curr < seqlen, other=float('inf')) p = tl.exp(qk - lse_i[:, None]) p = tl.where(mn_mask, p, 0.0) p /= max_seq_len p = p.to(do_ptr.dtype.element_ty) do = tl.load(do_ptrs, mask=(offs_d[None, :] < D) & (offs_m_curr[:, None] < seqlen), other=0.0) dv += tl.dot(tl.trans(p), do, allow_tf32=allow_tf32) dp = tl.dot(do, tl.trans(v), allow_tf32=allow_tf32) Di = tl.load(delta_ptr + pid_head * stride_delta_h + begin + offs_m_curr, mask=offs_m_curr < seqlen) ds = p * (dp - Di[:, None] * max_seq_len) ds = ds.to(q_ptr.dtype.element_ty) dk += tl.dot(tl.trans(ds), q, allow_tf32=allow_tf32) q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_do_m dk_ptrs = dk_ptr + pid_head * stride_dk_h + begin * stride_dk_n + ( offs_n[:, None] * stride_dk_n + offs_d[None, :] * stride_dk_d) dv_ptrs = dv_ptr + pid_head * stride_dv_h + begin * stride_dv_n + ( offs_n[:, None] * stride_dv_n + offs_d[None, :] * stride_dv_d) tl.store(dk_ptrs, dk, mask=(offs_d[None, :] < D) & (offs_n[:, None] < seqlen)) tl.store(dv_ptrs, dv, mask=(offs_d[None, :] < D) & (offs_n[:, None] < seqlen)) start_m = tl.program_id(axis=0) * BLOCK_M offs_m_curr = start_m + tl.arange(0, BLOCK_M) dq_ptrs_curr = dq_ptr + pid_head * stride_dq_h + begin * stride_dq_m + ( offs_m_curr[:, None] * stride_dq_m + offs_d[None, :] * stride_dq_d) dq_curr = tl.zeros([BLOCK_M, BLOCK_D], dtype=tl.float32) q_ptrs_curr = q_ptr + pid_head * stride_qh + begin * stride_qm + ( offs_m_curr[:, None] * stride_qm + offs_d[None, :] * stride_qd) q_curr = tl.load(q_ptrs_curr, mask=(offs_d[None, :] < D) & (offs_m_curr [:, None] < seqlen)) lse_i_curr = tl.load(lse_ptr + pid_head * stride_lse_h + begin + offs_m_curr, mask=offs_m_curr < seqlen) do_ptrs_curr = do_ptr + pid_head * stride_do_h + begin * stride_do_m + ( offs_m_curr[:, None] * stride_do_m + offs_d[None, :] * stride_do_d) do_curr = tl.load(do_ptrs_curr, mask=(offs_d[None, :] < D) & ( offs_m_curr[:, None] < seqlen)) Di_curr = tl.load(delta_ptr + pid_head * stride_delta_h + begin + offs_m_curr, mask=offs_m_curr < seqlen) block_start = 0 while block_start < seqlen: offs_n_curr = block_start + tl.arange(0, BLOCK_N) k_ptrs_curr = k_ptr + pid_head * stride_kh + begin * stride_kn + ( offs_n_curr[:, None] * stride_kn + offs_d[None, :] * stride_kd) v_ptrs_curr = v_ptr + pid_head * stride_vh + begin * stride_vn + ( offs_n_curr[:, None] * stride_vn + offs_d[None, :] * stride_vd) k_curr = tl.load(k_ptrs_curr, mask=(offs_d[None, :] < D) & ( offs_n_curr[:, None] < seqlen)) v_curr = tl.load(v_ptrs_curr, mask=(offs_d[None, :] < D) & ( offs_n_curr[:, None] < seqlen)) qk_curr = tl.dot(q_curr, tl.trans(k_curr), allow_tf32=allow_tf32) mn_mask_curr = (offs_m_curr[:, None] < seqlen) & (offs_n_curr[None, :] < seqlen) p_curr = tl.exp(qk_curr - lse_i_curr[:, None]) p_curr = tl.where(mn_mask_curr, p_curr, 0.0) p_curr /= max_seq_len dp_curr = tl.dot(do_curr, tl.trans(v_curr), allow_tf32=allow_tf32) ds_curr = p_curr * (dp_curr - Di_curr[:, None] * max_seq_len) ds_curr = ds_curr.to(k_ptr.dtype.element_ty) dq_curr += tl.dot(ds_curr, k_curr, allow_tf32=allow_tf32) block_start += BLOCK_N tl.store(dq_ptrs_curr, dq_curr, mask=(offs_d[None, :] < D) & ( offs_m_curr[:, None] < seqlen))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Backpropagation", "Attention Mechanisms", "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
62678117-b269-40bc-af2e-e8df801e151a
addition.py
neuro-ml/kerops
kerops/kernels/addition.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _AddStats_cl3d_backward_impl(Addgrad_ptr, Meangrad_ptr, Sqmeangrad_ptr, Sum_ptr, Outputgrad_ptr, numel, numel_no_channels, BLOCK_SIZE: tl. constexpr, num_channels: tl.constexpr, block_other: tl.constexpr): pid = tl.program_id(0) Addgrad_ptr += pid * BLOCK_SIZE Sum_ptr += pid * BLOCK_SIZE Outputgrad_ptr += pid * BLOCK_SIZE channels_offset = tl.arange(0, num_channels) other_offset = tl.arange(0, block_other) offset = channels_offset[None, :] + other_offset[:, None] * num_channels mask = (other_offset < numel_no_channels - pid * block_other)[:, None] sum = tl.load(Sum_ptr + offset, mask=mask, other=0.0) add_grad = tl.load(Addgrad_ptr + offset, mask=mask, other=0.0) mean_grad = tl.load(Meangrad_ptr + channels_offset[None, :]) sqmean_grad = tl.load(Sqmeangrad_ptr + channels_offset[None, :]) sqmean_grad_part = 2 * sum.to(tl.float32) * sqmean_grad / numel_no_channels mean_grad_part = mean_grad / numel_no_channels grad = add_grad + sqmean_grad_part + mean_grad_part grad = grad.to(tl.float16) tl.store(Outputgrad_ptr + offset, grad, mask=mask)
{ "Data Type": [ "fp16", "fp32" ], "Functionality": [ "Backpropagation", "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/addition.py
50f8d2cb-95bb-458d-a7f1-0ea3a9eb20e2
fused_cross_entropy.py
sustcsonglin/flash-linear-attention
fla/modules/fused_cross_entropy.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'HAS_SMOOTHING': lambda args: args['label_smoothing'] > 0.0}) @triton.jit def cross_entropy_fwd_kernel(loss_ptr, lse_ptr, z_loss_ptr, logits_ptr, labels_ptr, label_smoothing, logit_scale, lse_square_scale, ignore_index, total_classes, class_start_idx, n_cols, n_rows, logits_row_stride, BLOCK_SIZE: tl.constexpr, HAS_SMOOTHING: tl. constexpr, SPLIT: tl.constexpr): row_idx = tl.program_id(0) col_block_idx = tl.program_id(1) logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64) col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) label_idx = tl.load(labels_ptr + row_idx) logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols, other=-float('inf')) logits = logits.to(tl.float32) * logit_scale max_logits = tl.max(logits, 0) if HAS_SMOOTHING: sum_logits = tl.sum(tl.where(col_offsets < n_cols, logits, 0.0), 0) lse = tl.log(tl.sum(tl.exp(logits - max_logits), 0)) + max_logits tl.store(lse_ptr + col_block_idx * n_rows + row_idx, lse) if label_idx == ignore_index: loss = 0.0 z_loss = 0.0 else: label_idx -= class_start_idx if label_idx >= col_block_idx * BLOCK_SIZE and label_idx < min(n_cols, (col_block_idx + 1) * BLOCK_SIZE): logits_label = tl.load(logits_ptr + label_idx) * logit_scale if HAS_SMOOTHING: loss = (lse if not SPLIT else 0.0 ) - label_smoothing * sum_logits / total_classes - (1 - label_smoothing) * logits_label else: loss = (lse if not SPLIT else 0.0) - logits_label elif HAS_SMOOTHING: loss = label_smoothing * ((lse if not SPLIT else 0.0) - sum_logits / total_classes) else: loss = 0.0 if not SPLIT: z_loss = lse_square_scale * lse * lse loss += z_loss else: z_loss = 0.0 tl.store(loss_ptr + col_block_idx * n_rows + row_idx, loss) if not SPLIT: tl.store(z_loss_ptr + col_block_idx * n_rows + row_idx, z_loss)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_cross_entropy.py
908d9a97-c88c-4037-834b-8db2531abaa4
y_1.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_1.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def first_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) CONST_00 = tl.sqrt(3.0) Y10 = CONST_00 * x Y11 = CONST_00 * y Y12 = CONST_00 * z output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) tl.store(output_ptr + output_row_offset, Y10, mask=output_row_offset < output_numel) tl.store(output_ptr + output_row_offset + 1, Y11, mask= output_row_offset + 1 < output_numel) tl.store(output_ptr + output_row_offset + 2, Y12, mask= output_row_offset + 2 < output_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_1.py
8f18db57-d2fd-4917-988c-68e1b2c06a4f
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def store_2d(vals, ptr, sz0: tl.constexpr, sz1: tl.constexpr, n0, n1, max0, max1, stride0=None, stride1=1): """Store 2d block into (n0,n1)th chunk of matrix (defined by ptr), where each chunk has size (sz0, sz1)""" stride0 = stride0 or sz1 offs0 = get_1d_offest(sz0, n0) offs1 = get_1d_offest(sz1, n1) offs = get_2d_offset(offs0, offs1, stride0, stride1) mask = get_2d_mask(offs0, offs1, max0, max1) tl.store(ptr + offs, vals, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
df8f566b-5179-4d60-9c4b-96a961c77a70
softmax_split.py
iclementine/optimize_softmax
softmax_split.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def logsumexp_kernel(out_ptr, in_ptr, M, N, TILE_N: tl.constexpr): pid_n = tl.program_id(0) num_programs_n = tl.num_programs(0) pid_m = tl.program_id(1) n_offsets = pid_n * TILE_N + tl.arange(0, TILE_N) mask = n_offsets < N offset = pid_m * N + n_offsets inp = tl.load(in_ptr + offset, mask=mask, other=-float('inf')).to(out_ptr .dtype.element_ty) m = tl.max(inp, 0) e = tl.exp(inp - m) z = tl.sum(e, 0) logz = m + tl.log(z) output_ptrs = out_ptr + pid_m * num_programs_n + pid_n tl.store(output_ptrs, logz)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_split.py
92000632-b05c-45c6-918d-cc07885d8e64
10-experimental-tma-store-matrix-multiplication.py
hgl71964/SIP
benchmarks/10-experimental-tma-store-matrix-multiplication.py
767ed720d4bd5cee21670b125b62c434258c532b
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages =7, num_warps=4)], key=['M', 'N', 'K']) @triton.jit def matmul_kernel(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m block_offset_m = pid_m * BLOCK_SIZE_M block_offset_n = pid_n * BLOCK_SIZE_N a_tile_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=( stride_am, stride_ak), offsets=(block_offset_m, 0), block_shape=( BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0)) b_tile_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=( stride_bk, stride_bn), offsets=(0, block_offset_n), block_shape=( BLOCK_SIZE_K, BLOCK_SIZE_N), order=(0, 1)) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, K, BLOCK_SIZE_K): a = tl.load(a_tile_ptr) b = tl.load(b_tile_ptr) accumulator += tl.dot(a, b) a_tile_ptr = tl.advance(a_tile_ptr, [0, BLOCK_SIZE_K]) b_tile_ptr = tl.advance(b_tile_ptr, [BLOCK_SIZE_K, 0]) c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=( stride_cm, stride_cn), offsets=(block_offset_m, block_offset_n), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0)) tl.store(c_block_ptr, accumulator)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings", "Persistent Kernels" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/hgl71964/SIP/blob/767ed720d4bd5cee21670b125b62c434258c532b/benchmarks/10-experimental-tma-store-matrix-multiplication.py
b957f60e-0bd5-47bd-acd6-2d465af4466c
chunk_h_split.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h_split.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=['BT', 'USE_G', 'USE_GK', 'USE_GV']) @triton.jit def chunk_fwd_kernel_h_reduction(g, gk, gv, hs, hr, ht, offsets, split_offsets, T: tl.constexpr, S: tl.constexpr, H: tl.constexpr, K: tl .constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl .constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl. constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NS = tl.cdiv(T, S) boh = tl.load(split_offsets + i_n).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T NS = tl.cdiv(T, S) boh = i_n * NS b_h = tl.zeros([BK, BV], dtype=tl.float32) for i_s in range(1, NS): p_hs = tl.make_block_ptr(hs + ((boh + i_s - 1) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_hr = tl.make_block_ptr(hr + ((boh + i_s) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h += tl.load(p_hs, boundary_check=(0, 1)).to(tl.float32) tl.store(p_hr, b_h.to(p_hr.dtype.element_ty), boundary_check=(0, 1)) for i_t in range(tl.cdiv(i_s * S, BT), tl.cdiv(min(i_s * S + S, T), BT) ): last_idx = min(i_t * BT + BT, T) - 1 if USE_G: if HEAD_FIRST: b_g_last = tl.load(g + i_nh * T + last_idx) else: b_g_last = tl.load(g + bos * H + last_idx * H + i_h) b_h *= tl.exp(b_g_last) if USE_GK: if HEAD_FIRST: p_gk_last = (gk + i_nh * T * K + last_idx * K + i_k * BK + tl.arange(0, BK)) else: p_gk_last = gk + (bos + last_idx ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK ) b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) < K, other=0.0) b_h *= tl.exp(b_gk_last)[:, None] if USE_GV: if HEAD_FIRST: p_gv_last = (gv + i_nh * T * V + last_idx * V + i_v * BV + tl.arange(0, BV)) else: p_gv_last = gv + (bos + last_idx ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV ) b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) < V, other=0.0) b_h *= tl.exp(b_gv_last)[None, :] if NS > 1: if STORE_FINAL_STATE: p_hs = tl.make_block_ptr(hs + ((boh + NS - 1) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), ( i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h += tl.load(p_hs, boundary_check=(0, 1)).to(tl.float32) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1) )
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_split.py
19ce5a53-d804-465c-95c2-1902ab6d7de8
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv6/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BC']) @triton.jit def chunk_rwkv6_fwd_A_kernel_intra_sub_intra_merge(A, A2, offsets, indices, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, NK: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) all = T T = eos - bos else: bos, eos = i_b * T, i_b * T + T all = B * T if i_t * BT + i_c * BC >= T: return b_A = tl.zeros([BC, BC], dtype=tl.float32) for i_k in range(0, NK): if HEAD_FIRST: p_A = tl.make_block_ptr(A + (i_k * B * H + i_bh) * T * BC, (T, BC), (BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC), (1, 0)) else: p_A = tl.make_block_ptr(A + (i_k * all + bos) * H * BC + i_h * BC, (T, BC), (H * BC, 1), (i_t * BT + i_c * BC, 0), (BC, BC ), (1, 0)) b_A += tl.load(p_A, boundary_check=(0, 1)) if HEAD_FIRST: p_A2 = tl.make_block_ptr(A2 + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_c * BC, i_c * BC), (BC, BC), (1, 0)) else: p_A2 = tl.make_block_ptr(A2 + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT + i_c * BC, i_c * BC), (BC, BC), (1, 0)) tl.store(p_A2, b_A.to(A2.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py
cdc37f44-be66-44db-8bec-5e91db230ff7
_quantize.py
IBM/qattn
qattn/nn/functional/_quantize.py
07ceda0aceb9afd299d622325944c0c0471827fe
0
@triton.jit def dequantize(x: tl.tensor, scale: tl.tensor) ->tl.tensor: """Dequantize quantized tensor to floating point. Args: x (tl.tensor): quantized tensor. scale (tl.tensor): quantization scaling factor Returns: tl.tensor: Dequantized floating-point tensor. """ return (x * scale).to(tl.float32)
{ "Data Type": [ "fp32" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_quantize.py
fba78c1a-b444-4758-891c-009b933f193d
conv.py
chengzeyi/stable-fast
src/sfast/triton/ops/conv.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@conv_heuristics() @triton.jit def _kernel_delta_x_hwc(x, w, bias, y, stride_xn, stride_xc, stride_xh, stride_xw, stride_wn, stride_wc, stride_wh, stride_ww, stride_yn, stride_yc, stride_yh, stride_yw, delta_xh_ptr, delta_xw_ptr, delta_xc_ptr, BATCH, IN_C, IN_H, IN_W, KERNEL_N, KERNEL_H, KERNEL_W, OUT_H, OUT_W, stride_h, stride_w, padding_h, padding_w, dilation_h, dilation_w, output_padding_h, output_padding_w, groups, ACC_TYPE: tl. constexpr, CONV1X1_NHWC: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_H: tl.constexpr, WITH_BIAS: tl.constexpr): """ each program instance computes a [BLOCK_BATCH, BLOCK_N, BLOCK_H, BLOCK_W] block of y """ pid_nhw = tl.program_id(0) pid_k = tl.program_id(1) off_y_k = pid_k * BLOCK_N + tl.arange(0, BLOCK_N) off_y_nhw = pid_nhw * BLOCK_M + tl.arange(0, BLOCK_M) off_y_n = off_y_nhw // (OUT_H * OUT_W) off_y_hw = off_y_nhw % (OUT_H * OUT_W) off_y_h = off_y_hw // OUT_W + output_padding_h off_y_w = off_y_hw % OUT_W + output_padding_w off_x_n = off_y_n off_x_h = off_y_h * stride_h - padding_h off_x_w = off_y_w * stride_w - padding_w off_x_nhw = off_x_n * stride_xn + off_x_h * stride_xh + off_x_w * stride_xw off_x_crs = tl.arange(0, BLOCK_K) CRS = IN_C * KERNEL_H * KERNEL_W if not CONV1X1_NHWC: delta_xh_ptrs = delta_xh_ptr + off_x_crs delta_xw_ptrs = delta_xw_ptr + off_x_crs delta_xc_ptrs = delta_xc_ptr + off_x_crs delta_xh = tl.load(delta_xh_ptrs, mask=off_x_crs < CRS, other=0) delta_xw = tl.load(delta_xw_ptrs, mask=off_x_crs < CRS, other=0) delta_xc = tl.load(delta_xc_ptrs, mask=off_x_crs < CRS, other=0) off_x_crs_unpacked = (delta_xh * stride_xh + delta_xw * stride_xw + delta_xc * stride_xc) x_ptrs = x + off_x_nhw[:, None] + off_x_crs_unpacked[None, :] else: x_ptrs = x + off_x_nhw[:, None] + off_x_crs[None, :] delta_xh = 0 delta_xw = 0 mask_x = (off_x_n < BATCH)[:, None] & (off_x_crs < CRS)[None, :] & ( off_x_h[:, None] + delta_xh[None, :] >= 0) & (off_x_h[:, None] + delta_xh[None, :] < IN_H) & (off_x_w[:, None] + delta_xw[None, :] >= 0 ) & (off_x_w[:, None] + delta_xw[None, :] < IN_W) off_w_crs = tl.arange(0, BLOCK_K) off_w_k = off_y_k w_ptrs = w + off_w_crs[:, None] + off_w_k[None, :] * stride_wn mask_w = (off_x_crs < CRS)[:, None] & (off_w_k < KERNEL_N)[None, :] matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0) matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0) acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) for crs in range(0, CRS, BLOCK_K): acc += tl.dot(matrix_x, matrix_w, out_dtype=ACC_TYPE) w_ptrs += BLOCK_K off_x_crs = crs + BLOCK_K + tl.arange(0, BLOCK_K) if not CONV1X1_NHWC: delta_xh_ptrs += BLOCK_K delta_xw_ptrs += BLOCK_K delta_xc_ptrs += BLOCK_K delta_xh = tl.load(delta_xh_ptrs, mask=off_x_crs < CRS, other=0) delta_xw = tl.load(delta_xw_ptrs, mask=off_x_crs < CRS, other=0) delta_xc = tl.load(delta_xc_ptrs, mask=off_x_crs < CRS, other=0) off_x_crs_unpacked = (delta_xh * stride_xh + delta_xw * stride_xw + delta_xc * stride_xc) x_ptrs = x + off_x_nhw[:, None] + off_x_crs_unpacked[None, :] else: x_ptrs += BLOCK_K mask_x = (off_x_n < BATCH)[:, None] & (off_x_crs < CRS)[None, :] & ( off_x_h[:, None] + delta_xh[None, :] >= 0) & (off_x_h[:, None] + delta_xh[None, :] < IN_H) & (off_x_w[:, None] + delta_xw[None, :] >= 0) & (off_x_w[:, None] + delta_xw[None, :] < IN_W) mask_w = (off_x_crs < CRS)[:, None] & (off_w_k < KERNEL_N)[None, :] matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0) matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0) if WITH_BIAS: acc += tl.load(bias + off_y_k)[None, :] acc = acc.to(y.dtype.element_ty) off_y_k = pid_k * BLOCK_N + tl.arange(0, BLOCK_N) off_y_nhw = pid_nhw * BLOCK_M + tl.arange(0, BLOCK_M) off_y_n = off_y_nhw // (OUT_H * OUT_W) off_y_hw = off_y_nhw % (OUT_H * OUT_W) off_y_h = off_y_hw // OUT_W + output_padding_h off_y_w = off_y_hw % OUT_W + output_padding_w y_ptrs = y + off_y_n[:, None] * stride_yn + off_y_h[:, None ] * stride_yh + off_y_w[:, None] * stride_yw + off_y_k[None, : ] * stride_yc mask_y = (off_y_n < BATCH)[:, None] & (off_y_h < OUT_H + output_padding_h)[ :, None] & (off_y_w < OUT_W + output_padding_w)[:, None] & (off_y_k < KERNEL_N)[None, :] tl.store(y_ptrs, acc, mask=mask_y) return
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/conv.py
c0a72086-6095-4821-ae15-cdbb362b3308
cluster_test.py
jax-ml/jax-triton
tests/cluster_test.py
859cc392bec876d132bd0790ea6c00b6c246dd2b
0
@triton.jit def dummy_kernel(x_ptr, o_ptr): offs = tl.program_id(axis=0) * 4 + tl.arange(0, 4) tl.store(o_ptr + offs, tl.load(x_ptr + offs))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/tests/cluster_test.py
99f349d5-d5b7-4d07-b2cd-6f21651e8219
flash_attention.py
falkaer/multi-scale-music
seq/flash_attention.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def _fwd_kernel(Q, K, V, S, Out, sm_scale, TMP, L, M, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, stride_oz, stride_oh, stride_om, stride_ok, stride_tz, stride_th, stride_tm, stride_lz, stride_lh, stride_lm, stride_mz, stride_mh, stride_mm, M_Q, N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl. constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, CAUSAL: tl. constexpr, USE_ALIBI: tl.constexpr): start_m = tl.program_id(0) * BLOCK_M off_h = tl.program_id(1) off_z = tl.program_id(2) offs_m = start_m + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = off_z * stride_qz + off_h * stride_qh + offs_m[:, None ] * stride_qm + offs_d[None, :] * stride_qk off_k = off_z * stride_kz + off_h * stride_kh + offs_n[:, None ] * stride_kn + offs_d[None, :] * stride_kk off_v = off_z * stride_vz + off_h * stride_vh + offs_n[:, None ] * stride_vn + offs_d[None, :] * stride_vk q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v t_ptrs = TMP + off_z * stride_tz + off_h * stride_th + offs_m * stride_tm m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) if EVEN_M: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_m[:, None] < M_Q, other=0) q = q.to(tl.float16) if USE_ALIBI: slope = tl.load(S + off_h) if CAUSAL & EVEN_M & EVEN_N: bound = start_m + BLOCK_M else: bound = N_CTX for start_n in range(0, bound, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) if EVEN_N: k = tl.load(k_ptrs) else: k = tl.load(k_ptrs, mask=start_n + offs_n[:, None] < N_CTX, other=0 ) k = k.to(tl.float16) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k, trans_b=True) qk *= sm_scale if USE_ALIBI & CAUSAL: qk += causal_alibi_mask(slope, offs_m, start_n + offs_n, M_Q, N_CTX, EVEN_M, EVEN_N) elif USE_ALIBI: qk += symmetric_alibi_mask(slope, offs_m, start_n + offs_n, M_Q, N_CTX, EVEN_M, EVEN_N) elif CAUSAL: qk += causal_mask(offs_m, start_n + offs_n, M_Q, N_CTX, EVEN_M, EVEN_N) else: qk += bounds_mask(offs_m, start_n + offs_n, M_Q, N_CTX, EVEN_M, EVEN_N) m_ij = tl.maximum(tl.max(qk, axis=1), -10000) p = tl.exp(qk - m_ij[:, None]) l_ij = tl.sum(p, axis=1) m_i_new = tl.maximum(m_i, m_ij) alpha = tl.exp(m_i - m_i_new) beta = tl.exp(m_ij - m_i_new) l_i_new = alpha * l_i + beta * l_ij p_scale = beta / l_i_new p = p * p_scale[:, None] acc_scale = l_i / l_i_new * alpha tl.store(t_ptrs, acc_scale) acc_scale = tl.load(t_ptrs) acc = acc * acc_scale[:, None] if EVEN_N: v = tl.load(v_ptrs) else: v = tl.load(v_ptrs, mask=start_n + offs_n[:, None] < N_CTX, other=0 ) v = v.to(tl.float16) p = p.to(tl.float16) acc += tl.dot(p, v) l_i = l_i_new m_i = m_i_new v_ptrs += BLOCK_N * stride_vn k_ptrs += BLOCK_N * stride_kn offs_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) l_ptrs = L + off_z * stride_lz + off_h * stride_lh + offs_m * stride_lm m_ptrs = M + off_z * stride_mz + off_h * stride_mh + offs_m * stride_mm if EVEN_M: tl.store(l_ptrs, l_i) tl.store(m_ptrs, m_i) else: tl.store(l_ptrs, l_i, mask=offs_m < M_Q) tl.store(m_ptrs, m_i, mask=offs_m < M_Q) offs_d = tl.arange(0, BLOCK_DMODEL) off_o = off_z * stride_oz + off_h * stride_oh + offs_m[:, None ] * stride_om + offs_d[None, :] * stride_ok out_ptrs = Out + off_o if EVEN_M: tl.store(out_ptrs, acc) else: tl.store(out_ptrs, acc, mask=offs_m[:, None] < M_Q)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py
e528bce6-ce8b-4667-9ce7-53bb2e0fbc58
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_bwd_kernel_dk(q, k, rk, ck, ds, dk, dsk, s_qk_h, s_qk_t, s_qk_d, s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BK: tl.constexpr, BM: tl.constexpr, DK: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr): i_k, i_m, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) p_q = tl.make_block_ptr(q + i_bh * s_qk_h, (T, DK), (s_qk_t, s_qk_d), ( (NT - 1) * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_qk_h, (DK, T), (s_qk_d, s_qk_t), ( i_k * BK, (NT - 1) * BT), (BK, BT), (0, 1)) p_rk = tl.make_block_ptr(rk + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,), (i_m * BM,), (BM,), (0,)) p_ck = tl.make_block_ptr(ck + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ((NT - 1) * BT, i_m * BM), (BT, BM), (1, 0)) p_ds = tl.make_block_ptr(ds + i_bh * s_sk_h, (DM, T), (s_sk_m, s_sk_t), (i_m * BM, (NT - 1) * BT), (BM, BT), (0, 1)) p_dk = tl.make_block_ptr(dk + (i_m * n_bh + i_bh) * s_qk_h, (T, DK), ( s_qk_t, s_qk_d), ((NT - 1) * BT, i_k * BK), (BT, BK), (1, 0)) p_dsk = tl.make_block_ptr(dsk + (i_k * n_bh + i_bh) * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ((NT - 1) * BT, i_m * BM), (BT, BM), (1, 0)) o_i = tl.arange(0, BT) m_s, m_t = o_i[:, None] <= o_i[None, :], o_i[:, None] >= o_i[None, :] b_dhk = tl.zeros([BM, BK], dtype=tl.float32) for i in range(NT): p_rk = tl.make_block_ptr(rk + i_bh * s_sk_t * NT, (NT * DM,), ( s_sk_m,), ((NT - i) % NT * DM + i_m * BM,), (BM,), (0,)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_rk = tl.load(p_rk, boundary_check=(0,)) b_ck = tl.load(p_ck, boundary_check=(0, 1)) b_ds = tl.load(p_ds, boundary_check=(0, 1)) b_inter = tl.dot((b_ck * b_rk[None, :]).to(b_q.dtype), b_dhk.to(b_q .dtype), allow_tf32=False) b_intra = tl.dot(tl.where(m_s, tl.dot(b_ck, b_ds, allow_tf32=False), 0.0).to(b_q.dtype), b_q, allow_tf32=False) b_dk = b_inter + b_intra b_inter = tl.dot(b_dhk.to(b_k.dtype), b_k, allow_tf32=False) * b_rk[ :, None] b_intra = tl.dot(b_ds, tl.where(m_t, tl.dot(b_q, b_k, allow_tf32= False), 0.0).to(b_q.dtype), allow_tf32=False) b_dsk = b_ck * tl.trans(b_inter + b_intra) b_dhk = b_dhk * b_rk[:, None] + tl.dot(b_ds, b_q, allow_tf32=False) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dsk, b_dsk.to(p_dsk.dtype.element_ty), boundary_check=(0, 1) ) p_q = tl.advance(p_q, (-BT, 0)) p_k = tl.advance(p_k, (0, -BT)) p_ck = tl.advance(p_ck, (-BT, 0)) p_ds = tl.advance(p_ds, (0, -BT)) p_dk = tl.advance(p_dk, (-BT, 0)) p_dsk = tl.advance(p_dsk, (-BT, 0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
47a0737b-519d-4b88-a3af-0251a50b9828
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_recurrent_linear_attn_bwd_kernel(q, k, v, do, dq, dk, dv, h0, s_k_h, s_v_h, scale, B, H, T, K: tl.constexpr, V: tl.constexpr, BK: tl. constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr): i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) p_do = do + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) p_dq = dq + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK) mask_bk = i_k * BK + tl.arange(0, BK) < K mask_bv = i_v * BV + tl.arange(0, BV) < V b_h = tl.zeros([BK, BV], dtype=tl.float32) if USE_INITIAL_STATE: mask_kv = mask_bk[:, None] & mask_bv[None, :] p_h0 = h0 + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[:, None] ) * V + (i_v * BV + tl.arange(0, BV)[None, :]) b_h += tl.load(p_h0, mask=mask_kv, other=0).to(tl.float32) for _ in range(0, T): b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask_bv, other=0).to(tl.float32) b_h += b_k[:, None] * b_v[None, :] _d_q = b_h * b_do[None, :] d_q = tl.sum(_d_q, axis=1) * scale tl.store(p_dq, d_q.to(p_dq.dtype.element_ty), mask=mask_bk) p_k += K p_do += V p_v += V p_dq += K tl.debug_barrier() p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K p_do = do + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V p_dk = dk + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K p_dv = dv + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V d_h = tl.zeros([BK, BV], dtype=tl.float32) for _ in range(T): b_do = tl.load(p_do, mask=mask_bv, other=0).to(tl.float32) b_q = tl.load(p_q, mask=mask_bk, other=0).to(tl.float32) * scale b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32) d_h += b_q[:, None] * b_do[None, :] d_k = tl.sum(d_h * b_v[None, :], axis=1) d_v = tl.sum(d_h * b_k[:, None], axis=0) tl.store(p_dk, d_k.to(p_dk.dtype.element_ty), mask=mask_bk) tl.store(p_dv, d_v.to(p_dv.dtype.element_ty), mask=mask_bv) p_do -= V p_q -= K p_k -= K p_v -= V p_dk -= K p_dv -= V
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Recurrent Neural Networks", "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/fused_recurrent.py
47931159-10d0-421b-8cec-3607da49ce15
scratch.py
falkaer/multi-scale-music
seq/scratch.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def apply_dropout(x, offsets, p, seed, mask_val=0.0): scale = 1 / (1 - p) rand = tl.rand(seed, offsets) return tl.where(rand > p, x * scale, mask_val)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/scratch.py
3dcdbcd6-e7fc-47f6-a45e-152876ac2a6c
gemm_postop_gelu_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({ 'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32 ), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages =2, num_warps=32)], key=['M', 'N', 'K']) @triton.jit def matmul_kernel_with_block_pointers(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr, stride_cn: tl.constexpr, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=( stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0)) b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=( stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0)) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for _ in range(0, K, BLOCK_SIZE_K): a = tl.load(a_block_ptr, boundary_check=(0, 1)) b = tl.load(b_block_ptr, boundary_check=(0, 1)) accumulator += tl.dot(a, b) a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K)) b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0)) c = gelu(accumulator) c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=( stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0)) tl.store(c_block_ptr, c, boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Activation Functions" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
c2013b8d-c916-45ca-9e2e-44a7016851d9
swiglu.py
ardywibowo/triton-mode
kernels/swiglu.py
5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1
0
@triton.jit def triton_swiglu_forward(input_a_ptr, input_b_ptr, output_ptr, row_stride, num_columns: tl.constexpr, BLOCK_SIZE: tl.constexpr): prog_id = tl.program_id(0).to(tl.int64) input_a_ptr += prog_id * row_stride input_b_ptr += prog_id * row_stride output_ptr += prog_id * row_stride column_offsets = tl.arange(0, BLOCK_SIZE) active_mask = column_offsets < num_columns input_a_row = tl.load(input_a_ptr + column_offsets, mask=active_mask, other=0).to(tl.float32) input_b_row = tl.load(input_b_ptr + column_offsets, mask=active_mask, other=0) result_row = silu(input_a_row) * input_b_row tl.store(output_ptr + column_offsets, result_row, mask=active_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/swiglu.py
852c0812-545a-4d54-a311-09ef7b9a1c60
gemm_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [1, 2, 3]] + [triton. Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2, 3, 4]] + [triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2]] + [triton.Config({ 'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages=s, num_warps=32) for s in [2, 3]], key=['M', 'N', 'K']) @triton.jit def matmul_kernel_with_block_pointers(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr, stride_cn: tl.constexpr, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=( stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0)) b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=( stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0)) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for _ in range(0, K, BLOCK_SIZE_K): a = tl.load(a_block_ptr, boundary_check=(0, 1)) b = tl.load(b_block_ptr, boundary_check=(0, 1)) accumulator += tl.dot(a, b) a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K)) b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0)) c = accumulator.to(tl.float32) c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=( stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0)) tl.store(c_block_ptr, c, boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_benchmark.py
8fca3bd0-a9e4-43f9-9d3d-89cfd7925602
bnrelu.py
neuro-ml/kerops
kerops/kernels/bnrelu.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _ApplyBNReLU_cl3d_impl(X_ptr, Out_ptr, Weight_ptr, Bias_ptr, numel_no_channels, BLOCK_SIZE: tl.constexpr, num_channels: tl.constexpr, block_other: tl.constexpr): pid = tl.program_id(0) X_ptr += pid * BLOCK_SIZE Out_ptr += pid * BLOCK_SIZE channels_offset = tl.arange(0, num_channels) other_offset = tl.arange(0, block_other) offset = channels_offset[None, :] + other_offset[:, None] * num_channels mask = (other_offset < numel_no_channels - pid * block_other)[:, None] x = tl.load(X_ptr + offset, mask=mask, other=0).to(tl.float32) weight = tl.load(Weight_ptr + channels_offset[None, :]) bias = tl.load(Bias_ptr + channels_offset[None, :]) output = x * weight + bias output = tl.maximum(output, 0.0) tl.store(Out_ptr + offset, output, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/bnrelu.py
817c1276-c556-4c1d-b4c5-f46a380bd438
test_autodiff.py
srush/triton-autodiff
tests/test_autodiff.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def tr2(X, dX, dY): r = tl.arange(0, 16) r2 = tl.arange(0, 16)[:, None] x = tl.load(X + r) dy = tl.load(dY + 16 * r2 + r) tl.static_print('shape', dy.shape) dx = dcomp2dx(x, dy) tl.static_print('shape', dx.shape) tl.store(dX + r, dx)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py
0390ce58-3047-487a-b922-61daab851d88
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/launch_latency/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.jit def nop_kernel(): pass
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/launch_latency/kernels.py
a2951d7f-12bc-4c56-a281-b5e60b0cf38c
3_mat_mul.py
DataLama/triton-tutorials
tutorials/basic/3_mat_mul.py
95fb36429bdae3333cfcde76b18a00781ba5953e
0
@triton.jit def matmul_kernel(x_ptr, y_ptr, z_ptr, m_size, k_size, n_size, m_block_size: tl.constexpr, k_block_size: tl.constexpr, n_block_size: tl.constexpr): pid = tl.program_id(0) num_n_blocks = tl.cdiv(n_size, n_block_size) m_block = pid // num_n_blocks n_block = pid % num_n_blocks m_offsets = tl.arange(0, m_block_size) + m_block * m_block_size n_offsets = tl.arange(0, n_block_size) + n_block * n_block_size k_offsets = tl.arange(0, k_block_size) x_ptrs = x_ptr + m_offsets[:, None] * k_size + k_offsets[None, :] y_ptrs = y_ptr + k_offsets[:, None] * n_size + n_offsets[None, :] z_ptrs = z_ptr + m_offsets[:, None] * n_size + n_offsets[None, :] z = tl.zeros((m_block_size, n_block_size), dtype=tl.float32) for _ in range(0, k_size, k_block_size): x_sub = tl.load(x_ptrs) y_sub = tl.load(y_ptrs) z += tl.dot(x_sub, y_sub, allow_tf32=False) x_ptrs += k_block_size y_ptrs += k_block_size * n_size tl.store(z_ptrs, z)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/DataLama/triton-tutorials/blob/95fb36429bdae3333cfcde76b18a00781ba5953e/tutorials/basic/3_mat_mul.py
d5aed4cd-281b-47ba-b953-7ad565ff7b41
test_inductor.py
triton-lang/kernels
test/test_inductor.py
eeeebdd8be7d13629de22d600621e6234057eed3
0
@triton.jit def triton_(in_ptr0, out_ptr0, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x5 = xindex tmp0 = -1 + x1 tmp1 = -1 + x0 tmp2 = 2 + x1 tmp3 = 2 + x0 tmp4 = 0 tmp5 = tl.where(tmp0 != tmp0, tmp0, tl.where(tmp0 > tmp4, tmp0, tmp4)) tmp6 = tl.where(tmp1 != tmp1, tmp1, tl.where(tmp1 > tmp4, tmp1, tmp4)) tmp7 = 8 tmp8 = tl.where(tmp2 != tmp2, tmp2, tl.where(tmp2 < tmp7, tmp2, tmp7)) tmp9 = tl.where(tmp3 != tmp3, tmp3, tl.where(tmp3 < tmp7, tmp3, tmp7)) tmp10 = tmp5 + tmp4 tmp11 = tmp6 + tmp4 tmp12 = 1 tmp13 = tmp8 - tmp12 tmp14 = tl.where(tmp10 != tmp10, tmp10, tl.where(tmp10 < tmp13, tmp10, tmp13)) tmp15 = tmp9 - tmp12 tmp16 = tl.where(tmp11 != tmp11, tmp11, tl.where(tmp11 < tmp15, tmp11, tmp15)) tmp17 = tl.load(in_ptr0 + (tmp16 + 8 * tmp14 + 64 * x2), None).to(tl. float32) tmp18 = tmp17 / 9 tmp19 = tmp10 < tmp8 tmp20 = tmp11 < tmp9 tmp21 = tmp19 & tmp20 tmp22 = 0.0 tmp23 = tl.where(tmp21, tmp18, tmp22) tmp24 = tmp6 + tmp12 tmp25 = tl.where(tmp24 != tmp24, tmp24, tl.where(tmp24 < tmp15, tmp24, tmp15)) tmp26 = tl.load(in_ptr0 + (tmp25 + 8 * tmp14 + 64 * x2), None).to(tl. float32) tmp27 = tmp26 / 9 tmp28 = tmp24 < tmp9 tmp29 = tmp19 & tmp28 tmp30 = tmp23 + tmp27 tmp31 = tl.where(tmp29, tmp30, tmp23) tmp32 = 2 tmp33 = tmp6 + tmp32 tmp34 = tl.where(tmp33 != tmp33, tmp33, tl.where(tmp33 < tmp15, tmp33, tmp15)) tmp35 = tl.load(in_ptr0 + (tmp34 + 8 * tmp14 + 64 * x2), None).to(tl. float32) tmp36 = tmp35 / 9 tmp37 = tmp33 < tmp9 tmp38 = tmp19 & tmp37 tmp39 = tmp31 + tmp36 tmp40 = tl.where(tmp38, tmp39, tmp31) tmp41 = tmp5 + tmp12 tmp42 = tl.where(tmp41 != tmp41, tmp41, tl.where(tmp41 < tmp13, tmp41, tmp13)) tmp43 = tl.load(in_ptr0 + (tmp16 + 8 * tmp42 + 64 * x2), None).to(tl. float32) tmp44 = tmp43 / 9 tmp45 = tmp41 < tmp8 tmp46 = tmp45 & tmp20 tmp47 = tmp40 + tmp44 tmp48 = tl.where(tmp46, tmp47, tmp40) tmp49 = tl.load(in_ptr0 + (tmp25 + 8 * tmp42 + 64 * x2), None).to(tl. float32) tmp50 = tmp49 / 9 tmp51 = tmp45 & tmp28 tmp52 = tmp48 + tmp50 tmp53 = tl.where(tmp51, tmp52, tmp48) tmp54 = tl.load(in_ptr0 + (tmp34 + 8 * tmp42 + 64 * x2), None).to(tl. float32) tmp55 = tmp54 / 9 tmp56 = tmp45 & tmp37 tmp57 = tmp53 + tmp55 tmp58 = tl.where(tmp56, tmp57, tmp53) tmp59 = tmp5 + tmp32 tmp60 = tl.where(tmp59 != tmp59, tmp59, tl.where(tmp59 < tmp13, tmp59, tmp13)) tmp61 = tl.load(in_ptr0 + (tmp16 + 8 * tmp60 + 64 * x2), None).to(tl. float32) tmp62 = tmp61 / 9 tmp63 = tmp59 < tmp8 tmp64 = tmp63 & tmp20 tmp65 = tmp58 + tmp62 tmp66 = tl.where(tmp64, tmp65, tmp58) tmp67 = tl.load(in_ptr0 + (tmp25 + 8 * tmp60 + 64 * x2), None).to(tl. float32) tmp68 = tmp67 / 9 tmp69 = tmp63 & tmp28 tmp70 = tmp66 + tmp68 tmp71 = tl.where(tmp69, tmp70, tmp66) tmp72 = tl.load(in_ptr0 + (tmp34 + 8 * tmp60 + 64 * x2), None).to(tl. float32) tmp73 = tmp72 / 9 tmp74 = tmp63 & tmp37 tmp75 = tmp71 + tmp73 tmp76 = tl.where(tmp74, tmp75, tmp71) tl.store(out_ptr0 + (x5 + tl.zeros([XBLOCK], tl.int32)), tmp76, None)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/test/test_inductor.py
e039d6a9-bf1b-4d2b-a12a-b4c00fb3c499
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_bwd_kernel_dv(do, v, rv, cv, p, dv, dsv, s_qk_h, s_qk_t, s_qk_d, s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BV: tl.constexpr, BM: tl.constexpr, DV: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr): i_v, i_m, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) p_do = tl.make_block_ptr(do + i_bh * s_qk_h, (T, DV), (s_qk_t, s_qk_d), ((NT - 1) * BT, i_v * BV), (BT, BV), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * s_qk_h, (DV, T), (s_qk_d, s_qk_t), ( i_v * BV, (NT - 1) * BT), (BV, BT), (0, 1)) p_rv = tl.make_block_ptr(rv + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,), (i_m * BM,), (BM,), (0,)) p_cv = tl.make_block_ptr(cv + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ((NT - 1) * BT, i_m * BM), (BT, BM), (1, 0)) p_p = tl.make_block_ptr(p + i_bh * s_sk_h, (DM, T), (s_sk_m, s_sk_t), ( i_m * BM, (NT - 1) * BT), (BM, BT), (0, 1)) p_dv = tl.make_block_ptr(dv + (i_m * n_bh + i_bh) * s_qk_h, (T, DV), ( s_qk_t, s_qk_d), ((NT - 1) * BT, i_v * BV), (BT, BV), (1, 0)) p_dsv = tl.make_block_ptr(dsv + (i_v * n_bh + i_bh) * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ((NT - 1) * BT, i_m * BM), (BT, BM), (1, 0)) o_i = tl.arange(0, BT) m_s, m_t = o_i[:, None] <= o_i[None, :], o_i[:, None] >= o_i[None, :] b_dhv = tl.zeros([BM, BV], dtype=tl.float32) for i in range(NT): p_rv = tl.make_block_ptr(rv + i_bh * s_sk_t * NT, (NT * DM,), ( s_sk_m,), ((NT - i) % NT * DM + i_m * BM,), (BM,), (0,)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_rv = tl.load(p_rv, boundary_check=(0,)) b_cv = tl.load(p_cv, boundary_check=(0, 1)) b_p = tl.load(p_p, boundary_check=(0, 1)) b_inter = tl.dot((b_cv * b_rv[None, :]).to(b_do.dtype), b_dhv.to( b_do.dtype), allow_tf32=False) b_intra = tl.dot(tl.where(m_s, tl.dot(b_cv, b_p, allow_tf32=False), 0.0).to(b_do.dtype), b_do, allow_tf32=False) b_dv = b_inter + b_intra b_inter = tl.dot(b_dhv.to(b_v.dtype), b_v, allow_tf32=False) * b_rv[ :, None] b_intra = tl.dot(b_p, tl.where(m_t, tl.dot(b_do, b_v, allow_tf32= False), 0.0).to(b_do.dtype), allow_tf32=False) b_dsv = b_cv * tl.trans(b_inter + b_intra) b_dhv = b_dhv * b_rv[:, None] + tl.dot(b_p, b_do, allow_tf32=False) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dsv, b_dsv.to(p_dsv.dtype.element_ty), boundary_check=(0, 1) ) p_do = tl.advance(p_do, (-BT, 0)) p_v = tl.advance(p_v, (0, -BT)) p_cv = tl.advance(p_cv, (-BT, 0)) p_p = tl.advance(p_p, (0, -BT)) p_dv = tl.advance(p_dv, (-BT, 0)) p_dsv = tl.advance(p_dsv, (-BT, 0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
b6e5ba5c-cdbe-470c-b934-3a35c1c5bedd
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BS': BS}, num_warps=num_warps) for BS in [16, 32, 64] for num_warps in [2, 4, 8]], key=['S', 'BT']) @triton.jit def chunk_local_reversed_cumsum_vector_kernel(s, o, offsets, indices, T: tl .constexpr, H: tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl. constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr): i_s, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T o_i = tl.arange(0, BT) m_s = tl.where(o_i[:, None] <= o_i[None, :], 1.0, 0.0) if HEAD_FIRST: p_s = tl.make_block_ptr(s + i_bh * T * S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * T * S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) else: p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H * S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) p_o = tl.make_block_ptr(o + (bos * H + i_h) * S, (T, S), (H * S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32) b_o = tl.dot(m_s, b_s, allow_tf32=False) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py
7f6f8b3f-9d2a-4ae0-a2ad-35655192d89f
_flash_attention.py
IBM/qattn
qattn/nn/functional/_flash_attention.py
07ceda0aceb9afd299d622325944c0c0471827fe
0
@triton.autotune(configs=_get_configs(), key=['N_CTX', 'H', 'Z']) @triton.heuristics({'EVEN_CTX': lambda args: args['N_CTX'] % args['BLOCK_M' ] == 0}) @triton.jit def _fwd_kernel(Q, K, V, sm_scale, qkv_scale_ptr, out_scale_ptr, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, EVEN_CTX: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_z = off_hz // H off_h = off_hz % H qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64 ) * stride_qh Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=( BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0 ), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qkv_scale = tl.load(qkv_scale_ptr) qk_scale = qkv_scale * qkv_scale * sm_scale * 1.44269504 if EVEN_CTX: q = tl.load(Q_block_ptr) else: q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero') for start_n in range(0, N_CTX, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) if EVEN_CTX: k = tl.load(K_block_ptr) else: k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero' ) qk = tl.dot(q, k, allow_tf32=False, out_dtype=tl.int32) qk_fp32 = qk * qk_scale m_ij = tl.maximum(m_i, tl.max(qk_fp32, 1)) p = tl.math.exp2(qk_fp32 - m_ij[:, None]) alpha = tl.math.exp2(m_i - m_ij) m_i = m_ij if EVEN_CTX: v = tl.load(V_block_ptr) else: v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero' ) v = (v * qkv_scale).to(tl.bfloat16) acc *= alpha[:, None] acc += tl.dot(p.to(tl.bfloat16), v, allow_tf32=True) l_i = l_i * alpha + tl.sum(p, 1) K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) out_scale = tl.load(out_scale_ptr) acc = tl.math.llrint(acc / (l_i[:, None] * out_scale)).to(tl.int8) O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) if EVEN_CTX: tl.store(O_block_ptr, acc) else: tl.store(O_block_ptr, acc, boundary_check=(0,))
{ "Data Type": [ "bf16", "int8" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_flash_attention.py
e981bb06-78d6-48a1-8fb8-10d5a6f3bfc8
mhmoe.py
dtadpole/triton-playground
mhmoe.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def d_sigmoid(o): return o * (1 - o)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Backpropagation" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py
f79fde84-d03b-40dd-bc48-598749dc2167
layer_norm_dquant.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/layer_norm_dquant.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _layer_norm_dquant_kernel(X, Y, W, B, out, scale, stride, N, eps, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) Y += row * stride X += row * stride out += row * stride _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) a = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) _mean += a mean = tl.sum(_mean, axis=0) / N _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x - mean, 0.0) _var += x * x var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) _max_x = 0.0 for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N w = tl.load(W + cols, mask=mask) b = tl.load(B + cols, mask=mask) x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32) _norm = (x - mean) * rstd * w + b tl.store(out + cols, _norm, mask=mask) _max_x = tl.maximum(_max_x, tl.max(tl.abs(_norm), axis=0)) scale_x = _max_x / 127.0 tl.store(scale + row, scale_x) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N _norm = tl.load(out + cols, mask=mask, other=0.0) _norm = _norm / scale_x + 0.5 tl.store(Y + cols, _norm.to(tl.int8), mask=mask)
{ "Data Type": [ "int8", "fp32" ], "Functionality": [ "Normalization", "Quantization" ], "Memory Access Pattern": [ "Coalesced", "Register Intensive" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/layer_norm_dquant.py
4e233564-be5f-4852-b99c-34dbe18c056e
sparse_copy.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/sparse_copy.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def copy_dense_to_sparse_kernel(input_ptr, output_ptr, scores_ptr, sparse_rows_ptr, num_columns: tl.constexpr, num_experts_per_token: tl. constexpr, block_size: tl.constexpr): dense_row = tl.program_id(0) offsets = tl.arange(0, block_size) + block_size * tl.program_id(1) mask = None if num_columns % block_size == 0 else offsets < num_columns out = tl.load(input_ptr + dense_row * num_columns + offsets, mask=mask) for top_index in range(num_experts_per_token): sparse_row = tl.load(sparse_rows_ptr + dense_row * num_experts_per_token + top_index) out_scaled = out if scores_ptr is None else out * tl.load( scores_ptr + dense_row * num_experts_per_token + top_index).to(tl .float32) tl.store(output_ptr + sparse_row * num_columns + offsets, out_scaled, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Top-K Selection" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_copy.py
82c88b83-f264-4ebd-b42a-842e936a1e5b
activation.py
chengzeyi/stable-fast
src/sfast/triton/ops/activation.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@triton.jit def silu(x): return x * tl.sigmoid(x.to(tl.float32)).to(x.dtype)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py
0ea9b7a0-8530-4964-b178-926fbf55411b
triton_kernels.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/triton_kernels.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def _triton_fourth_order_bwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl. tensor, g_x_ptr: tl.tensor, g_y_ptr: tl.tensor, g_z_ptr: tl.tensor, g_1_0_ptr: tl.tensor, g_1_1_ptr: tl.tensor, g_1_2_ptr: tl.tensor, g_2_0_ptr: tl.tensor, g_2_1_ptr: tl.tensor, g_2_2_ptr: tl.tensor, g_2_3_ptr: tl.tensor, g_2_4_ptr: tl.tensor, g_3_0_ptr: tl.tensor, g_3_1_ptr: tl.tensor, g_3_2_ptr: tl.tensor, g_3_3_ptr: tl.tensor, g_3_4_ptr: tl.tensor, g_3_5_ptr: tl.tensor, g_3_6_ptr: tl.tensor, g_4_0_ptr: tl.tensor, g_4_1_ptr: tl.tensor, g_4_2_ptr: tl.tensor, g_4_3_ptr: tl.tensor, g_4_4_ptr: tl.tensor, g_4_5_ptr: tl.tensor, g_4_6_ptr: tl.tensor, g_4_7_ptr: tl.tensor, g_4_8_ptr: tl.tensor, BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr): sqrt_3 = 3 ** 0.5 sqrt_5 = 5 ** 0.5 sqrt_15 = 15 ** 0.5 block_id = tl.program_id(0) offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id x_row_start = x_ptr + offset y_row_start = y_ptr + offset z_row_start = z_ptr + offset x = tl.load(x_row_start, mask=offset < vector_length) y = tl.load(y_row_start, mask=offset < vector_length) z = tl.load(z_row_start, mask=offset < vector_length) g_1_0 = tl.load(g_1_0_ptr + offset, mask=offset < vector_length) g_1_1 = tl.load(g_1_1_ptr + offset, mask=offset < vector_length) g_1_2 = tl.load(g_1_2_ptr + offset, mask=offset < vector_length) g_x = sqrt_3 * g_1_0 g_y = sqrt_3 * g_1_1 g_z = sqrt_3 * g_1_2 g_2_0 = tl.load(g_2_0_ptr + offset, mask=offset < vector_length) g_2_1 = tl.load(g_2_1_ptr + offset, mask=offset < vector_length) g_2_2 = tl.load(g_2_2_ptr + offset, mask=offset < vector_length) g_2_3 = tl.load(g_2_3_ptr + offset, mask=offset < vector_length) g_2_4 = tl.load(g_2_4_ptr + offset, mask=offset < vector_length) g_x += sqrt_15 * z * g_2_0 g_z += sqrt_15 * x * g_2_0 g_x += sqrt_15 * y * g_2_1 g_y += sqrt_15 * x * g_2_1 g_y += sqrt_15 * z * g_2_2 g_z += sqrt_15 * y * g_2_2 g_x += -1.0 * sqrt_5 * x * g_2_3 g_y += 2.0 * sqrt_5 * y * g_2_3 g_z += -1.0 * sqrt_5 * z * g_2_3 g_x += -1.0 * sqrt_15 * x * g_2_4 g_z += sqrt_15 * z * g_2_4 g_3_0 = tl.load(g_3_0_ptr + offset, mask=offset < vector_length) g_3_1 = tl.load(g_3_1_ptr + offset, mask=offset < vector_length) g_3_2 = tl.load(g_3_2_ptr + offset, mask=offset < vector_length) g_3_3 = tl.load(g_3_3_ptr + offset, mask=offset < vector_length) g_3_4 = tl.load(g_3_4_ptr + offset, mask=offset < vector_length) g_3_5 = tl.load(g_3_5_ptr + offset, mask=offset < vector_length) g_3_6 = tl.load(g_3_6_ptr + offset, mask=offset < vector_length) sq_x = x * x sq_y = y * y sq_z = z * z cu_z = sq_z * z cu_x = sq_x * x cu_y = sq_y * y g_x += sqrt_15 * g_3_0 * (-1.62018517460196 * sq_x + 1.08012344973464 * sq_z + 0.540061724867322 * sq_z) g_x += 2.64575131106459 * sqrt_15 * g_3_1 * y * z g_x -= g_3_2 * (4.8605555238059 * sq_x - 6.48074069840786 * sq_y + 1.62018517460197 * sq_z) g_x -= 7.93725393319377 * g_3_3 * x * y g_x -= 3.24037034920393 * g_3_4 * x * z g_x -= 2.64575131106459 * sqrt_15 * g_3_5 * x * y g_x -= sqrt_15 * g_3_6 * z * (1.08012344973464 * x + 2.16024689946929 * x) g_y += 2.64575131106459 * sqrt_15 * g_3_1 * x * z g_y += 12.9614813968157 * g_3_2 * x * y g_y -= g_3_3 * (3.96862696659689 * sq_x - 7.93725393319377 * sq_y + 3.96862696659689 * sq_z) g_y += 12.9614813968157 * g_3_4 * y * z g_y -= 1.3228756555323 * sqrt_15 * g_3_5 * (sq_x - sq_z) g_z += sqrt_15 * g_3_0 * x * (1.08012344973464 * z + 2.16024689946929 * z) g_z += 2.64575131106459 * sqrt_15 * g_3_1 * x * y g_z -= 3.24037034920393 * g_3_2 * x * z g_z -= 7.93725393319377 * g_3_3 * y * z g_z -= g_3_4 * (1.62018517460197 * sq_x - 6.48074069840786 * sq_y + 4.8605555238059 * sq_z) g_z += 2.64575131106459 * sqrt_15 * g_3_5 * y * z g_z -= sqrt_15 * g_3_6 * (1.08012344973464 * sq_x + 0.540061724867322 * sq_x - 1.62018517460196 * sq_z) g_4_0 = tl.load(g_4_0_ptr + offset, mask=offset < vector_length) g_4_1 = tl.load(g_4_1_ptr + offset, mask=offset < vector_length) g_4_2 = tl.load(g_4_2_ptr + offset, mask=offset < vector_length) g_4_3 = tl.load(g_4_3_ptr + offset, mask=offset < vector_length) g_4_4 = tl.load(g_4_4_ptr + offset, mask=offset < vector_length) g_4_5 = tl.load(g_4_5_ptr + offset, mask=offset < vector_length) g_4_6 = tl.load(g_4_6_ptr + offset, mask=offset < vector_length) g_4_7 = tl.load(g_4_7_ptr + offset, mask=offset < vector_length) g_4_8 = tl.load(g_4_8_ptr + offset, mask=offset < vector_length) g_x -= sqrt_15 * g_4_0 * (3.43693177121688 * sq_x * z + 3.43693177121688 * sq_x * z - 1.14564392373896 * cu_z - 1.14564392373896 * cu_z) g_x += sqrt_15 * g_4_1 * y * (-4.8605555238059 * sq_x + 3.24037034920393 * sq_z + 1.62018517460197 * sq_z) g_x -= g_4_2 * (0.649519052838329 * sqrt_15 * sq_x * z + 7.54672942406179 * sq_x * z - 2.59807621135332 * sqrt_15 * sq_y * z - 10.0623058987491 * sq_y * z + 0.21650635094611 * sqrt_15 * cu_z + 2.51557647468726 * cu_z) g_x -= g_4_3 * y * (0.918558653543692 * sqrt_15 * sq_x + 16.0090306546024 * sq_x - 9.48683298050514 * sq_y + 0.918558653543692 * sqrt_15 * sq_z + 5.33634355153414 * sq_z + 0.459279326771846 * sqrt_15 * (sq_x - sq_z)) g_x += g_4_4 * (-9.0 * x * sq_y + 2.25 * x * sq_z - 9.0 * x * sq_y + 2.25 * x * sq_z + 4.5 * cu_x) g_x -= g_4_5 * y * z * (-0.918558653543692 * sqrt_15 * x + 10.6726871030683 * x + 1.83711730708738 * sqrt_15 * x) g_x -= g_4_6 * (2.59807621135332 * sqrt_15 * x * sq_y - 0.21650635094611 * sqrt_15 * x * sq_z + 2.51557647468726 * x * sq_z + 10.0623058987491 * x * sq_y - 2.51557647468726 * x * sq_z + 0.21650635094611 * sqrt_15 * x * sq_z - 5.03115294937453 * cu_x - 0.433012701892219 * sqrt_15 * cu_x) g_x -= sqrt_15 * g_4_7 * y * z * (3.24037034920393 * x + 6.48074069840786 * x) g_x -= sqrt_15 * g_4_8 * (1.14564392373896 * x * sq_z + 4.58257569495584 * x * sq_z + 1.14564392373896 * x * sq_z - 2.29128784747792 * cu_x) g_y += sqrt_15 * g_4_1 * x * (-1.62018517460197 * sq_x + 3.24037034920393 * sq_z + 1.62018517460197 * sq_z) g_y += g_4_2 * x * z * (5.19615242270663 * sqrt_15 * y + 20.1246117974981 * y) g_y -= g_4_3 * x * (5.33634355153414 * sq_x - 28.4604989415154 * sq_y + 0.918558653543692 * sqrt_15 * sq_z + 5.33634355153414 * sq_z + 0.459279326771846 * sqrt_15 * (sq_x - sq_z)) g_y -= g_4_4 * (9.0 * sq_x * y + 9.0 * sq_x * y + 9.0 * y * sq_z + 9.0 * y * sq_z - 12.0 * cu_y) g_y -= g_4_5 * z * (0.918558653543692 * sqrt_15 * sq_x + 5.33634355153414 * sq_x - 28.4604989415154 * sq_y + 5.33634355153414 * sq_z - 0.459279326771846 * sqrt_15 * (sq_x - sq_z)) g_y -= g_4_6 * (10.0623058987491 * sq_x * y + 2.59807621135332 * sqrt_15 * y * (sq_x - sq_z) - 10.0623058987491 * y * sq_z) g_y -= sqrt_15 * g_4_7 * z * (3.24037034920393 * sq_x + 1.62018517460197 * sq_x - 1.62018517460197 * sq_z) g_z -= sqrt_15 * g_4_0 * (1.14564392373896 * cu_x - 3.43693177121688 * x * sq_z - 3.43693177121688 * x * sq_z + 1.14564392373896 * cu_x) g_z += sqrt_15 * g_4_1 * x * y * (3.24037034920393 * z + 6.48074069840786 * z) g_z -= g_4_2 * (0.21650635094611 * sqrt_15 * cu_x - 2.59807621135332 * sqrt_15 * x * sq_y - 10.0623058987491 * x * sq_y + 0.649519052838329 * sqrt_15 * x * sq_z + 7.54672942406179 * x * sq_z + 2.51557647468726 * cu_x) g_z -= g_4_3 * x * y * (-0.918558653543692 * sqrt_15 * z + 10.6726871030683 * z + 1.83711730708738 * sqrt_15 * z) g_z += g_4_4 * (2.25 * sq_x * z + 2.25 * sq_x * z - 9.0 * sq_y * z - 9.0 * sq_y * z + 4.5 * cu_z) g_z -= g_4_5 * y * (0.918558653543692 * sqrt_15 * sq_x + 5.33634355153414 * sq_x - 9.48683298050514 * sq_y + 0.918558653543692 * sqrt_15 * sq_z + 16.0090306546024 * sq_z - 0.459279326771846 * sqrt_15 * (sq_x - sq_z)) g_z += g_4_6 * (-0.21650635094611 * sqrt_15 * sq_x * z + 2.51557647468726 * sq_x * z - 2.51557647468726 * sq_x * z + 0.21650635094611 * sqrt_15 * sq_x * z + 2.59807621135332 * sqrt_15 * sq_y * z + 10.0623058987491 * sq_y * z - 5.03115294937453 * cu_z - 0.433012701892219 * sqrt_15 * cu_z) g_z -= sqrt_15 * g_4_7 * y * (3.24037034920393 * sq_x + 1.62018517460197 * sq_x - 4.8605555238059 * sq_z) g_z -= sqrt_15 * g_4_8 * (1.14564392373896 * sq_x * z + 4.58257569495584 * sq_x * z + 1.14564392373896 * sq_x * z - 2.29128784747792 * cu_z) tl.store(g_x_ptr + offset, g_x, mask=offset < vector_length) tl.store(g_y_ptr + offset, g_y, mask=offset < vector_length) tl.store(g_z_ptr + offset, g_z, mask=offset < vector_length)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Register Intensive" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py
77276a0a-ca29-4f47-ba40-e00bfa0d002c
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/sum/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': b_n, 'BLOCK_SIZE_K': b_k}, num_warps=w) for b_n, b_k, w in itertools.product ([(4 ** n) for n in range(7)], [(4 ** n) for n in range(4)], [2, 4, 8]) ], key=['N']) @triton.jit def triton_sum_kernel_2D_result_dim_1_buffer_then_sum(input_ptr, output_ptr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, BLOCK_SIZE_N: tl. constexpr, BLOCK_SIZE_K: tl.constexpr): pid = tl.program_id(axis=0) pid_m = pid // tl.cdiv(K, BLOCK_SIZE_K) pid_k = pid % tl.cdiv(K, BLOCK_SIZE_K) buffer = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_K), dtype=tl.float32) block_start_k = pid_k * BLOCK_SIZE_K offsets_k = block_start_k + tl.arange(0, BLOCK_SIZE_K) mask_k = offsets_k < K for block_start_n in range(0, N, BLOCK_SIZE_N): offsets_n = block_start_n + tl.arange(0, BLOCK_SIZE_N) mask_n = offsets_n < N idxs_base = offsets_n[:, None] * K + offsets_k idxs = idxs_base + pid_m * N * K mask = mask_n[:, None] & mask_k input = tl.load(input_ptr + idxs, mask=mask, other=0) buffer += input output = tl.sum(buffer, axis=0) output_offsets = pid_m * K + offsets_k tl.store(output_ptr + output_offsets, output, mask=mask_k)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py
819bf337-44ca-4907-932b-9e1c4bbf4365
pointwise.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/pointwise.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_fill_kernel(input_ptr, value: tl.constexpr, numel: tl.constexpr, dtype: tl.constexpr, block_size: tl.constexpr): block_start = tl.program_id(axis=0).to(tl.int64) * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < numel tl.store(input_ptr + offsets, tl.full((block_size,), value, dtype), mask=mask)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py
903ee2c1-1f8e-43cb-94cc-41866b2e116f
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def calc_p_loss(input, target, size, p_loss: tl.constexpr, reduction: tl. constexpr): """ Measures the L1 or squared L2 norm of the difference between the input and target (i.e., mean absolute error or mean squared error). Args: input: Input. The input must be of shape [BLOCK_SIZE]. target: Target. The target must be of shape [BLOCK_SIZE]. size: Number of elements in the input and target. This value is used only if reduction is 'mean'. p_loss: p-norm used to compute the error. Options are 1 for MAE and 2 for MSE. reduction: Reduction strategy for the output. Options are 'none' for no reduction, 'mean' for averaging the error across all entries, and 'sum' for summing the error across all entries. Returns: Error. """ input = input.to(tl.float32) target = target.to(tl.float32) diff = input - target if p_loss == 1: error = tl.abs(diff) elif p_loss == 2: error = diff * diff if reduction == 'none': output = error elif reduction == 'mean': output = tl.sum(error) / size elif reduction == 'sum': output = tl.sum(error) return output
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
cc350db1-2b6c-4dc4-9911-76e59d19de8e
quant_per_block.py
rodjjo/editorium
editorium/app/server/pipelines/cogvideo/sageattention/quant_per_block.py
7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694
0
@triton.jit def q_kernel_per_block_int8(X, X_int8, BLK: tl.constexpr, Scale, L, C: tl. constexpr, scale_stride): off_b = tl.program_id(1) off_blk = tl.program_id(0) x_offset = off_b * L * C offs_m = off_blk * BLK + tl.arange(0, BLK) offs_k = tl.arange(0, C) x_ptrs = X + x_offset + offs_m[:, None] * C + offs_k[None, :] x_int8_ptrs = X_int8 + x_offset + offs_m[:, None] * C + offs_k[None, :] scale_ptrs = Scale + off_b * scale_stride + off_blk x = tl.load(x_ptrs, mask=offs_m[:, None] < L) x *= C ** -0.5 * 1.44269504 scale = tl.max(tl.abs(x)) / 127.0 x_int8 = x / scale x_int8 += 0.5 * tl.where(x_int8 >= 0, 1, -1) x_int8 = x_int8.to(tl.int8) tl.store(x_int8_ptrs, x_int8, mask=offs_m[:, None] < L) tl.store(scale_ptrs, scale)
{ "Data Type": [ "int8", "fp32" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/quant_per_block.py
f225f117-ee5a-49a5-a8cd-9527e6e0e161
triton_mars_adamw.py
lessw2020/MARS-AdamW-PyTorch
triton_mars_adamw.py
c312b763d079f38291492bc911e8ea8aa1967433
0
@triton.jit def mars_adamw_kernel(param_ptr, grad_ptr, exp_avg_ptr, exp_avg_sq_ptr, prev_grad_ptr, lr, beta1, beta2, eps, weight_decay, gamma, max_grad_norm, step, bias_correction1, bias_correction2, n_elements, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements param = tl.load(param_ptr + offsets, mask=mask) grad = tl.load(grad_ptr + offsets, mask=mask) exp_avg = tl.load(exp_avg_ptr + offsets, mask=mask) exp_avg_sq = tl.load(exp_avg_sq_ptr + offsets, mask=mask) prev_grad = tl.load(prev_grad_ptr + offsets, mask=mask) grad_diff = grad - prev_grad correction = gamma * beta1 / (1 - beta1) * grad_diff c_t = grad + correction c_t_norm = tl.sqrt(tl.sum(c_t * c_t)) scale = tl.where(c_t_norm > max_grad_norm, max_grad_norm / c_t_norm, 1.0) c_t = c_t * scale exp_avg = beta1 * exp_avg + (1 - beta1) * c_t exp_avg_sq = beta2 * exp_avg_sq + (1 - beta2) * (c_t * c_t) tl.store(prev_grad_ptr + offsets, grad, mask=mask) step_size = lr / bias_correction1 denom = tl.sqrt(exp_avg_sq) / tl.sqrt(bias_correction2) + eps update = exp_avg / denom param = param - step_size * (update + weight_decay * param) tl.store(param_ptr + offsets, param, mask=mask) tl.store(exp_avg_ptr + offsets, exp_avg, mask=mask) tl.store(exp_avg_sq_ptr + offsets, exp_avg_sq, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/lessw2020/MARS-AdamW-PyTorch/blob/c312b763d079f38291492bc911e8ea8aa1967433/triton_mars_adamw.py
4282b81a-47ee-476a-b8f3-60d4b209555f
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_bwd_kernel_K(q, k, v, z, h, A, do, dh, dq, dk, dv, dA, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl. constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl. constexpr, BV: tl.constexpr): i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_p = tl.maximum(i_t * BT - 1, 0) n_bh = tl.num_programs(2) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_A = tl.make_block_ptr(A + (i_k * n_bh + i_bh) * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_A = tl.dot((b_q * scale).to(b_q.dtype), tl.trans(b_k), allow_tf32=False) b_A = tl.where(m_s, b_A, 0.0) tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1)) b_dq = tl.zeros([BT, BK], dtype=tl.float32) b_dk = tl.zeros([BT, BK], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_z = tl.make_block_ptr(z + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_zp = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), (i_p * V + i_v * BV,), (BV,), (0,)) p_zc = tl.make_block_ptr(z + i_bh * s_v_h, (T * V,), (s_v_d,), (( i_t * BT + BT - 1) * V + i_v * BV,), (BV,), (0,)) p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * K * V, (V, K), ( s_h_d, s_h_t), (i_v * BV, i_k * BK), (BV, BK), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dh = tl.make_block_ptr(dh + i_bh * s_h_h + i_t * K * V, (K, V), ( s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_k * n_bh + i_bh) * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_zp = tl.load(p_zp, boundary_check=(0,)) b_zc = tl.load(p_zc, boundary_check=(0,)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_v = tl.exp(b_v - b_zc[None, :]).to(b_v.dtype) b_z = tl.load(p_z, boundary_check=(0, 1)) b_z = tl.exp(b_zp[None, :] - b_z) b_h = tl.load(p_h, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_do = (b_do * b_z * scale).to(b_do.dtype) b_dh = tl.load(p_dh, boundary_check=(0, 1)) b_dq += tl.dot(b_do, b_h, allow_tf32=False) b_dk += tl.dot(b_v, tl.trans(b_dh), allow_tf32=False) b_dv = b_v * tl.dot(b_k, b_dh, allow_tf32=False) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dq += tl.dot(b_dA, b_k, allow_tf32=False) b_dk += tl.dot(tl.trans(b_dA).to(b_k.dtype), b_q, allow_tf32=False) p_dq = tl.make_block_ptr(dq + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings", "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
adfad45d-a068-444b-b194-56d3022e2f4a
scratch.py
falkaer/multi-scale-music
seq/scratch.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def _dropout(X, O, stride_x1, stride_x2, stride_o1, stride_o2, dropout_prob, dropout_seed, M, N, BLOCK: tl.constexpr): offs_m = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK) offs_n = tl.program_id(1) * BLOCK + tl.arange(0, BLOCK) X = X + offs_m[:, None] * stride_x1 + offs_n[None, :] * stride_x2 x = tl.load(X, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N)) offsets = offs_m[:, None] * M + offs_n[None, :] x = apply_dropout(x, offsets, dropout_prob, dropout_seed) O = O + offs_m[:, None] * stride_o1 + offs_n[None, :] * stride_o2 tl.store(O, x, mask=(offs_m[:, None] < M) & (offs_n[None, :] < N))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/scratch.py
dbf37971-a262-44ef-989d-77aeac137b61
utils.py
huyz2023/2by4-pretrain
sparse/utils.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _sparse24(x0, x1, x2, x3): a1, a2, a3, a4, a5, a6 = tl.abs(x0) > tl.abs(x1), tl.abs(x0) > tl.abs(x2 ), tl.abs(x0) > tl.abs(x3), tl.abs(x1) > tl.abs(x2), tl.abs(x1 ) > tl.abs(x3), tl.abs(x2) > tl.abs(x3) m0, m1, m2, m3 = (a2 & a3 | a1 & a2 | a1 & a3, ~a1 & a5 | a4 & a5 | ~a1 & a4, ~a2 & ~a4 | ~a2 & a6 | ~a4 & a6, ~a3 & ~a5 | ~a3 & ~a6 | ~a5 & ~a6) return x0, x1, x2, x3, m0, m1, m2, m3
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/utils.py
52dd01fd-ccd7-4456-9d0e-9fcf7c68fc38
GELUglu.py
huyz2023/2by4-pretrain
sparse/GELUglu.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _gelu_glu_fwd_kernel_(output_ptr, input_ptr, output_row_stride, input_row_stride, output_col_stride, input_col_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr): col_idx = tl.program_id(0) row_idx = tl.arange(0, BLOCK_SIZE) x = tl.load(input_ptr + row_idx * input_row_stride + col_idx * input_col_stride, mask=tl.arange(0, BLOCK_SIZE) < n_rows, other=- float('inf')) gate = tl.load(input_ptr + row_idx * input_row_stride + (col_idx + n_cols // 2) * input_col_stride, mask=tl.arange(0, BLOCK_SIZE) < n_rows, other=-float('inf')) gate_cube = gate * gate * gate beta = 0.7978845608028654 kappa = 0.044715 inner = beta * (gate + kappa * gate_cube) inner_tanh = tanh(inner) gate_gelu = 0.5 * gate * (inner_tanh + 1) gelu_glu = gate_gelu * x tl.store(output_ptr + row_idx * output_row_stride + col_idx * output_col_stride, gelu_glu, mask=tl.arange(0, BLOCK_SIZE) < n_rows)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py
38d42e7c-347d-4ccf-b10b-1860c22d5877
chunk_h_parallel.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h_parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32, 64, 128] for num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=[ 'BT', 'USE_G', 'USE_GK', 'USE_GV']) @triton.jit def chunk_fwd_kernel_h_parallel(k, v, h, g, gk, gv, h0, ht, offsets, indices, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) NV = tl.cdiv(V, BV) i_k, i_v = i_kv // NV, i_kv % NV i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_tg = i_t i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) else: bos, eos = i_b * T, i_b * T + T NT = tl.cdiv(T, BT) i_n, i_tg = i_b, i_b * NT + i_t i_nh = i_n * H + i_h if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (V, 1 ), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) if i_t == 0: if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), ( i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) else: b_h = tl.zeros([BK, BV], dtype=tl.float32) tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) last_idx = min(i_t * BT + BT, T) - 1 if USE_G: if HEAD_FIRST: b_g_last = tl.load(g + i_bh * T + last_idx) p_g = g + i_bh * T + i_t * BT + tl.arange(0, BT) p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT) else: b_g_last = tl.load(g + bos * H + last_idx * H + i_h) p_g = g + bos * H + (i_t * BT + tl.arange(0, BT)) * H + i_h b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0) b_v = (b_v * tl.exp(b_g_last - b_g)[:, None]).to(b_v.dtype) if USE_GK: if HEAD_FIRST: p_gk = tl.make_block_ptr(gk + i_bh * T * K, (K, T), (1, K), ( i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = (gk + i_bh * T * K + last_idx * K + i_k * BK + tl. arange(0, BK)) else: p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = gk + (bos + last_idx ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK) b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) < K, other=0.0) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_k = (b_k * tl.exp(b_gk_last[:, None] - b_gk)).to(b_k.dtype) if USE_GV: if HEAD_FIRST: p_gv = tl.make_block_ptr(gv + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = (gv + i_bh * T * V + last_idx * V + i_v * BV + tl. arange(0, BV)) else: p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = gv + (bos + last_idx ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV) b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) < V, other=0.0) b_gv = tl.load(p_gv, boundary_check=(0, 1)) b_v = (b_v * tl.exp(b_gv_last[None, :] - b_gv)).to(b_v.dtype) b_h = tl.dot(b_k, b_v) if i_t < NT - 1: if HEAD_FIRST: p_h = tl.make_block_ptr(h + (i_bh * NT + i_t + 1) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_h = tl.make_block_ptr(h + ((i_tg + 1) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1)) elif STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups", "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_parallel.py
fa2c1086-b9be-4a8b-ae3e-2b5cda3eb8d6
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def prepare_qg_kg(q, k, g, qg, kg, s_k_h, scale, K: tl.constexpr, BT: tl. constexpr, BK: tl.constexpr): i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) p_q = q + i_bh * s_k_h + i_c * BT * K + i_k * BK + tl.arange(0, BK) p_g = g + i_bh * s_k_h + i_c * BT * K + i_k * BK + tl.arange(0, BK) p_k = k + i_bh * s_k_h + i_c * BT * K + i_k * BK + tl.arange(0, BK) p_qg = qg + i_bh * s_k_h + i_c * BT * K + i_k * BK + tl.arange(0, BK) p_kg = kg + i_bh * s_k_h + i_c * BT * K + i_k * BK + tl.arange(0, BK) mask = i_k * BK + tl.arange(0, BK) < K last_decay = tl.load(g + i_bh * s_k_h + (i_c * BT + BT - 1) * K + i_k * BK + tl.arange(0, BK)) for i in range(BT): b_q = tl.load(p_q, mask=mask, other=0) b_k = tl.load(p_k, mask=mask, other=0) _g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_q *= tl.exp(_g) * scale b_k *= tl.exp(last_decay - _g) tl.store(p_kg, b_k.to(p_kg.dtype.element_ty), mask=mask) tl.store(p_qg, b_q.to(p_qg.dtype.element_ty), mask=mask) p_q += K p_g += K p_k += K p_kg += K p_qg += K
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py
c79e3bed-77f3-4997-8cb9-825857d15dba
blocksparse_attention_kernel.py
Charlie-XIAO/sparse-vllm
vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py
d228909a30b0c245c35417fb7d2acdf9a3690042
0
@triton.heuristics({'M_LT_N': lambda kwargs: kwargs['BLOCK_M'] < kwargs[ 'BLOCK_N']}) @triton.jit def _fwd_kernel_batch_inference(Q, K, V, Out, sm_scale, q_batch_starts, q_batch_ends, k_batch_starts, k_batch_ends, q_batch_ids, q_start_sids, stride_qb, stride_qt, stride_qh, stride_qd, stride_kb, stride_kt, stride_kh, stride_kd, stride_vb, stride_vt, stride_vh, stride_vd, stride_ob, stride_ot, stride_oh, stride_od, layout_crow_ptr, layout_col_ptr, layout_crow_stride_h, layout_crow_stride_m, layout_col_stride_h, layout_col_stride_m, q_k_ratio, HAS_BATCH_DIM: tl. constexpr, D_HEAD: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl. constexpr, BLOCK_D: tl.constexpr, BLOCK_M_LOADING: tl.constexpr, EVEN_D: tl.constexpr, M_LT_N: tl.constexpr): """ NOTATION: pid: position id sid: storage id sbid: storage block id pbid: position block id offs_m, offs_n: storage offsets of m-dim(q, row) and n-dim(k, col) TODO(linxihui): Optimize grouped-attn """ off_zm = tl.program_id(0) off_h = tl.program_id(1) off_h_for_kv = off_h // q_k_ratio if HAS_BATCH_DIM: off_z = tl.program_id(2) Q += off_z * stride_qb K += off_z * stride_kb V += off_z * stride_vb Out += off_z * stride_ob start_m = off_zm q_start_sid = start_m * BLOCK_M else: off_z = tl.load(q_batch_ids + off_zm).to(tl.int32) q_start_sid = tl.load(q_start_sids + off_zm) start_m = q_start_sid // BLOCK_M offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M_LOADING) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_D) q_cu_start = tl.load(q_batch_starts + off_z).to(tl.int32) q_seqlen = tl.load(q_batch_ends + off_z).to(tl.int32) - q_cu_start k_cu_start = tl.load(k_batch_starts + off_z).to(tl.int32) k_seqlen = tl.load(k_batch_ends + off_z).to(tl.int32) - k_cu_start past_len = k_seqlen - q_seqlen Q += q_cu_start * stride_qt + off_h * stride_qh K += k_cu_start * stride_kt + off_h_for_kv * stride_kh V += k_cu_start * stride_vt + off_h_for_kv * stride_vh Out += q_cu_start * stride_ot + off_h * stride_oh q_pbid = (past_len + q_start_sid) // BLOCK_M if EVEN_D: q = tl.load(Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=offs_m[:, None] < q_seqlen) else: q = tl.load(Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=(offs_m[:, None] < q_seqlen) & (offs_d[None, :] < D_HEAD), other=0) sparse_crow_ptr = (layout_crow_ptr + off_h * layout_crow_stride_h + q_pbid * layout_crow_stride_m) k_block_start = tl.load(sparse_crow_ptr).to(tl.int32) k_block_end = tl.load(sparse_crow_ptr + 1).to(tl.int32) m_i = tl.zeros([BLOCK_M_LOADING], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M_LOADING], dtype=tl.float32) acc = tl.zeros([BLOCK_M_LOADING, BLOCK_D], dtype=tl.float32) k_ptrs = K + offs_n[None, :] * stride_kt + offs_d[:, None] * stride_kd v_ptrs = V + offs_n[:, None] * stride_vt + offs_d[None, :] * stride_vd sm_scale *= 1.44269504 for k_block_col_idx in range(k_block_start, k_block_end - 1): acc, l_i, m_i = _fwd_kernel_inner(acc, l_i, m_i, q, Q, k_block_col_idx, layout_col_ptr, layout_col_stride_h, layout_col_stride_m, k_ptrs, v_ptrs, off_h, offs_m, offs_n, offs_d, stride_kt, stride_vt, sm_scale, k_seqlen, past_len, False, BLOCK_M_LOADING, BLOCK_N, D_HEAD, EVEN_D, M_LT_N) acc, l_i, m_i = _fwd_kernel_inner(acc, l_i, m_i, q, Q, k_block_end - 1, layout_col_ptr, layout_col_stride_h, layout_col_stride_m, k_ptrs, v_ptrs, off_h, offs_m, offs_n, offs_d, stride_kt, stride_vt, sm_scale, k_seqlen, past_len, True, BLOCK_M_LOADING, BLOCK_N, D_HEAD, EVEN_D, M_LT_N) m_i += tl.math.log2(l_i) acc = acc / l_i[:, None] if EVEN_D: tl.store(Out + offs_m[:, None] * stride_ot + offs_d[None, :] * stride_od, acc, mask=offs_m[:, None] < q_seqlen) else: tl.store(Out + offs_m[:, None] * stride_ot + offs_d[None, :] * stride_od, acc, mask=(offs_m[:, None] < q_seqlen) & (offs_d[ None, :] < D_HEAD))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py
c300b4e5-1dbd-46a8-8b9f-f7553220381b
activations.py
sustcsonglin/flash-linear-attention
fla/modules/activations.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['D']) @triton.jit def logsigmoid_bwd_kernel(x, dx, dy, temperature, T: tl.constexpr, D: tl. constexpr, B: tl.constexpr): i = tl.program_id(0) o_i = i * B + tl.arange(0, B) m_i = o_i < T b_x = tl.load(x + o_i, mask=m_i, other=0.0).to(tl.float32) b_dy = tl.load(dy + o_i, mask=m_i, other=0.0).to(tl.float32) b_dx = b_dy * (1.0 - tl.sigmoid(b_x)) / temperature tl.store(dx + o_i, b_dx.to(dx.dtype.element_ty), mask=m_i)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/activations.py
b43d8c40-6f78-4266-94b9-fa7ef9d6e79b
fused_attention.py
jax-ml/jax-triton
examples/fused_attention.py
859cc392bec876d132bd0790ea6c00b6c246dd2b
0
@triton.jit def fused_attention_kernel(Q, K, V, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, L, M, Out, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr): start_m = tl.program_id(0) off_hz = tl.program_id(1) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, : ] * stride_qk off_k = off_hz * stride_qh + offs_n[None, :] * stride_kn + offs_d[:, None ] * stride_kk off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, : ] * stride_qk q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_prev = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) q = tl.load(q_ptrs) for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N): k = tl.load(k_ptrs) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k) m_curr = tl.maximum(tl.max(qk, 1), m_prev) l_prev *= tl.exp(m_prev - m_curr) p = tl.exp(qk - m_curr[:, None]) l_curr = tl.sum(p, 1) + l_prev l_rcp = 1.0 / l_curr p *= l_rcp acc *= (l_prev * l_rcp)[:, None] p = p.to(tl.float16) v = tl.load(v_ptrs) acc += tl.dot(p, v) l_prev = l_curr m_prev = m_curr k_ptrs += BLOCK_N * stride_kn v_ptrs += BLOCK_N * stride_vk start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_prev) tl.store(m_ptrs, m_prev) offs_n = tl.arange(0, BLOCK_DMODEL) off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, : ] * stride_on out_ptrs = Out + off_o tl.store(out_ptrs, acc)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/examples/fused_attention.py
4cfcaf22-7a0b-419b-aee9-80c0e072e341
p_loss_kernels.py
BobMcDear/attorch
attorch/p_loss_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=element_wise_kernel_configs(), key=['size']) @triton.jit def p_loss_forward_kernel(input_pointer, target_pointer, output_pointer, size, p_loss: tl.constexpr, reduction: tl.constexpr, BLOCK_SIZE: tl. constexpr): """ Measures the L1 or squared L2 norm of the difference between the input and target (i.e., mean absolute error or mean squared error). Args: input_pointer: Pointer to the input. The input must be of shape [size]. target_pointer: Pointer to the target. The target must be of shape [size]. output_pointer: Pointer to a container the error is written to. The container must be of shape [size] if reduction is 'none', and otherwise of shape [size/BLOCK_SIZE]. size: Number of elements in the input and target. p_loss: p-norm used to compute the error. Options are 1 for MAE and 2 for MSE. reduction: Reduction strategy for the output. Options are 'none' for no reduction, 'mean' for averaging the error across all entries, and 'sum' for summing the error across all entries. If a reduction method is specified, the reduced result of each program is written to a separate index in the output container, which should later be summed. BLOCK_SIZE: Block size. """ pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask).to(tl.float32) target = tl.load(target_pointer + offset, mask=mask).to(tl.float32) diff = input - target if p_loss == 1: error = tl.abs(diff) elif p_loss == 2: error = diff * diff if reduction == 'none': tl.store(output_pointer + offset, error, mask=mask) elif reduction == 'mean': tl.store(output_pointer + pid, tl.sum(error) / size) elif reduction == 'sum': tl.store(output_pointer + pid, tl.sum(error))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/p_loss_kernels.py
06715f51-47ba-4d5e-b5b1-762ebe00b772
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_bwd_kernel_rcum_inter(s, z, ss, doo, s_s_h, s_s_t, s_s_d, T: tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr, NT: tl.constexpr): i_m, i_bh = tl.program_id(0), tl.program_id(1) b_sp = tl.zeros([BS], dtype=tl.float32) b_zp = tl.full([BS], float('inf'), dtype=tl.float32) for i_t in range(NT - 1, -1, -1): p_s = tl.make_block_ptr(s + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), ( i_t * BT, i_m * BS), (BT, BS), (1, 0)) p_z = tl.make_block_ptr(z + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), ( i_t * BT, i_m * BS), (BT, BS), (1, 0)) p_zc = tl.make_block_ptr(z + i_bh * s_s_h, (T * S,), (s_s_d,), (i_t * BT * S + i_m * BS,), (BS,), (0,)) p_ss = tl.make_block_ptr(ss + i_bh * s_s_h, (T, S), (s_s_t, s_s_d), (i_t * BT, i_m * BS), (BT, BS), (1, 0)) p_doo = tl.make_block_ptr(doo + i_bh * s_s_h, (T, S), (s_s_t, s_s_d ), (i_t * BT, i_m * BS), (BT, BS), (1, 0)) b_zc = tl.load(p_zc, boundary_check=(0,)) b_s = tl.load(p_s, boundary_check=(0, 1)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_ss = tl.load(p_ss, boundary_check=(0, 1)) b_doo = tl.exp(b_s - b_zp[None, :]) * b_sp[None, :] tl.store(p_doo, b_doo.to(p_doo.dtype.element_ty), boundary_check=(0, 1) ) b_sp = b_sp * tl.exp(b_zc - b_zp) + tl.sum(b_ss * tl.exp(b_zc[None, :] - b_z), 0) b_zp = b_zc
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
a7157c38-ed86-4b92-a574-24221d10c58b
RzLinearBackward.py
apd10/RzLinear
python/rz_linear/impl/RzLinearBackward.py
eb56657b2de0a97f398f88af421b0fbcbc5469c9
0
@triton.jit def rz_linear_backward_input_grad_core(a_ptr, b_ptr, c_ptr, init_factor, M, N, K, H, stride_am, stride_an, stride_cm, stride_ck, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int, allow_tf32: tl. constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr): """Kernel for computing the matmul C = (A x B^T) A has shape (M, N), B has shape H->(K, N) and C has shape (M, K) """ pid = tl.program_id(axis=0) num_pid_k = tl.cdiv(K, BLOCK_SIZE_K) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) pid_m = pid // num_pid_k pid_k = pid % num_pid_k offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_an = tl.arange(0, BLOCK_SIZE_N) a_ptrs = a_ptr + offs_am[:, None] * stride_am + offs_an[None, : ] * stride_an b_offset = b_ptr + tl.arange(0, BLOCK_SIZE_N)[:, None] + tl.arange(0, BLOCK_SIZE_K)[None, :] * BLOCK_SIZE_N b_ptrs = b_offset + ((pid_k * R3 + 0 * R2 + R1) % R0 * R0 + (pid_k * R7 + 0 * R5 + R4) % R0) % (H - BLOCK_SIZE_K * BLOCK_SIZE_N) offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K) a_zero = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) b_zero = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_K), dtype=tl.float32) c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32) for n in range(0, tl.cdiv(N, BLOCK_SIZE_N)): offs_n = n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) a_mask = (offs_cm[:, None] < M) & (offs_n[None, :] < N) b_mask = (offs_n[:, None] < N) & (offs_ck[None, :] < K) a = tl.load(a_ptrs, mask=a_mask, other=a_zero) b = tl.load(b_ptrs, mask=b_mask, other=b_zero) c += tl.dot(a, b, allow_tf32=allow_tf32) a_ptrs += BLOCK_SIZE_N * stride_an b_ptrs = b_offset + ((pid_k * R3 + (n + 1) * R2 + R1) % R0 * R0 + ( pid_k * R7 + (n + 1) * R5 + R4) % R0) % (H - BLOCK_SIZE_K * BLOCK_SIZE_N) offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K) offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_ck * offs_ck[None, : ] c_mask = (offs_cm[:, None] < M) & (offs_ck[None, :] < K) tl.store(c_ptrs, c * init_factor, mask=c_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Coalesced", "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py
05d08524-53cf-4f3e-9cb8-78a7df7efc34
y_6.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_6.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def sixth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset < output_numel) g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask= output_row_offset + 1 < output_numel) g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask= output_row_offset + 2 < output_numel) g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask= output_row_offset + 3 < output_numel) g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask= output_row_offset + 4 < output_numel) g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask= output_row_offset + 5 < output_numel) g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask= output_row_offset + 6 < output_numel) g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask= output_row_offset + 7 < output_numel) g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask= output_row_offset + 8 < output_numel) g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask= output_row_offset + 9 < output_numel) g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask= output_row_offset + 10 < output_numel) g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask= output_row_offset + 11 < output_numel) g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask= output_row_offset + 12 < output_numel) CONST000 = 2.0 CONST002 = 4.0 CONST003 = 3.0 CONST004 = 6.53117523880657 CONST006 = 8.94318001328386 CONST007 = 8.38944649544891 CONST008 = 10.3266947761614 CONST009 = 9.79676285820985 CONST013 = 16.3279380970164 CONST014 = 17.8863600265677 CONST015 = 16.5227116418583 CONST016 = 20.6533895523229 CONST017 = 20.2812259244849 CONST018 = 21.6333076527839 CONST020 = 17.8863600265677 CONST022 = 29.3902885746295 CONST024 = 35.7727200531355 CONST026 = 40.5624518489699 CONST028 = 41.9472324772445 CONST029 = 48.9838142910493 CONST030 = 51.6334738808072 CONST035 = 71.5454401062709 CONST037 = 81.1249036979398 CONST039 = 82.6135582092915 CONST040 = -3.26558761940328 CONST042 = 117.561154298518 CONST046 = 208.99760764181 CONST048 = -251.683394863467 CONST049 = -214.636320318813 CONST050 = -214.636320318813 CONST051 = 16.5227116418583 CONST052 = -167.788929908978 CONST053 = -156.748205731358 CONST054 = -145.309475774982 CONST055 = -123.920337313937 CONST056 = -117.561154298518 CONST057 = 3.26558761940328 CONST058 = -108.16653826392 CONST059 = -107.318160159406 CONST060 = -104.498803820905 CONST061 = -104.498803820905 CONST062 = -83.8944649544891 CONST063 = -82.6135582092915 CONST064 = -78.3741028656788 CONST065 = -72.6547378874909 CONST066 = -71.5454401062709 CONST067 = -58.7805771492591 CONST068 = -54.0832691319598 CONST069 = -52.2494019104525 CONST070 = -52.2494019104525 CONST071 = -48.9838142910492 CONST072 = -41.3067791046458 CONST073 = -39.1870514328394 CONST074 = -35.7727200531355 CONST075 = -29.3902885746295 CONST076 = -27.0416345659799 CONST077 = -26.1247009552263 CONST078 = -26.1247009552263 CONST079 = -19.5935257164197 CONST080 = -14.5309475774982 CONST081 = -13.52081728299 CONST082 = -10.7318160159406 CONST083 = -9.79676285820985 CONST084 = -7.15454401062709 CONST085 = -6.76040864149498 CONST086 = -3.38020432074749 CONST087 = -1.63279380970164 VAR07 = x * x * x VAR08 = x * x VAR05 = VAR07 * VAR08 VAR06 = VAR08 * VAR08 VAR16 = y * y * y VAR17 = y * y VAR14 = VAR16 * VAR17 VAR15 = VAR17 * VAR17 VAR25 = z * z * z VAR26 = z * z VAR23 = VAR25 * VAR26 VAR24 = VAR26 * VAR26 g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask= coord_row_offset + 1 < coord_numel) g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask= coord_row_offset + 2 < coord_numel) g_x += g_0 * (CONST054 * VAR08 * VAR25 - CONST065 * VAR06 * z - CONST080 * VAR23) + g_1 * y * (CONST028 * VAR06 + CONST028 * VAR24 + CONST048 * VAR08 * VAR26) + g_10 * (CONST000 * x * (CONST006 * VAR24 + CONST059 * VAR17 * VAR26) + CONST002 * VAR07 * (CONST006 * VAR26 + CONST014 * VAR17) + CONST082 * VAR05) + g_11 * y * (- CONST052 * VAR07 * z + CONST052 * VAR25 * x) + g_12 * (-CONST054 * VAR07 * VAR26 + CONST065 * VAR24 * x + CONST080 * VAR05) + g_2 * (- CONST074 * VAR06 * z + CONST084 * VAR23 + VAR17 * (CONST049 * VAR08 * z - CONST066 * VAR25)) + g_3 * (VAR16 * (CONST064 * VAR08 - CONST064 * VAR26) + y * (CONST029 * VAR06 + CONST067 * VAR08 * VAR26 + CONST075 * VAR24)) + g_4 * (CONST003 * VAR08 * (CONST004 * VAR25 + CONST069 * VAR17 * z) + CONST013 * VAR06 * z - CONST040 * VAR23 - CONST070 * VAR15 * z + CONST070 * VAR17 * VAR25) + g_5 * ( CONST003 * VAR08 * (CONST016 * VAR26 * y + CONST072 * VAR16) + CONST008 * VAR24 * y + CONST015 * VAR14 + CONST030 * VAR06 * y + CONST072 * VAR16 * VAR26) + g_6 * (CONST000 * x * (CONST026 * VAR17 * VAR26 + CONST076 * VAR15 + CONST086 * VAR24) + CONST002 * VAR07 * ( CONST017 * VAR17 + CONST086 * VAR26) + CONST085 * VAR05) + g_7 * (- CONST072 * VAR25 * x * y + z * (CONST063 * VAR16 * x - CONST072 * VAR07 * y)) + g_8 * (CONST000 * x * (CONST077 * VAR15 - CONST087 * VAR24) + CONST002 * VAR07 * (-CONST077 * VAR17 + CONST087 * VAR26) + CONST083 * VAR05) + g_9 * (CONST053 * VAR16 * x * z + y * (CONST042 * VAR07 * z - CONST073 * VAR25 * x)) g_y += CONST000 * g_2 * y * (CONST066 * VAR07 * z - CONST066 * VAR25 * x ) + g_1 * (CONST007 * VAR05 + CONST028 * VAR24 * x + CONST062 * VAR07 * VAR26) + g_10 * (CONST024 * VAR06 * y + CONST050 * VAR08 * VAR26 * y - CONST074 * VAR24 * y) + g_11 * (CONST007 * VAR23 + CONST028 * VAR06 * z + CONST062 * VAR08 * VAR25) + g_3 * (CONST003 * VAR17 * (-CONST064 * VAR26 * x + CONST078 * VAR07) + CONST009 * VAR05 + CONST075 * VAR24 * x + CONST079 * VAR07 * VAR26) + g_4 * ( CONST061 * VAR07 * y * z + x * (CONST046 * VAR16 * z + CONST060 * VAR25 * y)) + g_5 * (CONST008 * VAR05 + VAR07 * (CONST016 * VAR26 + CONST055 * VAR17) + x * (CONST008 * VAR24 + CONST055 * VAR17 * VAR26 - CONST063 * VAR15)) + g_6 * (CONST018 * VAR14 + CONST026 * VAR06 * y + CONST026 * VAR24 * y + CONST058 * VAR16 * VAR26 + VAR08 * (CONST037 * VAR26 * y + CONST058 * VAR16)) + g_7 * (CONST008 * VAR23 + VAR25 * (CONST016 * VAR08 + CONST055 * VAR17) + z * ( CONST008 * VAR06 + CONST039 * VAR15 + CONST055 * VAR08 * VAR17) ) + g_8 * (CONST060 * VAR08 * VAR16 - CONST060 * VAR16 * VAR26 + CONST069 * VAR24 * y - CONST070 * VAR06 * y) + g_9 * (CONST003 * VAR17 * (CONST064 * VAR08 * z - CONST077 * VAR25) + CONST022 * VAR06 * z - CONST079 * VAR08 * VAR25 + CONST083 * VAR23) g_z += g_0 * (CONST054 * VAR07 * VAR26 - CONST065 * VAR24 * x - CONST080 * VAR05) + g_1 * y * (CONST052 * VAR07 * z - CONST052 * VAR25 * x) + g_10 * (CONST020 * VAR06 * z + CONST035 * VAR17 * VAR25 + CONST082 * VAR23 + VAR08 * (CONST050 * VAR17 * z - CONST074 * VAR25)) + g_11 * y * (CONST028 * VAR06 + CONST028 * VAR24 + CONST048 * VAR08 * VAR26) + g_12 * (CONST054 * VAR08 * VAR25 - CONST065 * VAR06 * z - CONST080 * VAR23) + g_2 * (CONST074 * VAR24 * x - CONST084 * VAR05 + VAR17 * (-CONST049 * VAR26 * x + CONST066 * VAR07)) + g_3 * (-CONST053 * VAR16 * x * z + y * (CONST056 * VAR25 * x + CONST073 * VAR07 * z)) + g_4 * (CONST057 * VAR05 + VAR07 * ( CONST069 * VAR17 - CONST079 * VAR26) + x * (CONST013 * VAR24 + CONST053 * VAR17 * VAR26 - CONST070 * VAR15)) + g_5 * (-CONST072 * VAR07 * y * z + x * (CONST063 * VAR16 * z - CONST072 * VAR25 * y) ) + g_6 * (CONST037 * VAR17 * VAR25 + CONST068 * VAR15 * z + CONST085 * VAR06 * z + CONST085 * VAR23 + VAR08 * (CONST037 * VAR17 * z + CONST081 * VAR25)) + g_7 * (CONST003 * VAR26 * (CONST016 * VAR08 * y + CONST072 * VAR16) + CONST008 * VAR06 * y + CONST030 * VAR24 * y + CONST051 * VAR14 + CONST072 * VAR08 * VAR16) + g_8 * ( CONST004 * VAR08 * VAR25 + CONST040 * VAR06 * z + CONST061 * VAR17 * VAR25 - CONST070 * VAR15 * z - CONST083 * VAR23) + g_9 * (VAR16 * ( CONST064 * VAR08 - CONST064 * VAR26) + y * (CONST022 * VAR06 - CONST067 * VAR08 * VAR26 + CONST071 * VAR24)) tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask= coord_row_offset + 1 < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask= coord_row_offset + 2 < coord_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_6.py
a0f70947-0554-494b-86bd-c3544da457a5
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv6/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BK', 'NC', 'BT']) @triton.jit def chunk_rwkv6_bwd_kernel_intra(q, k, gi, ge, dA, dq, dk, offsets, indices, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, NC: tl.constexpr, USE_OFFSETS: tl. constexpr, HEAD_FIRST: tl.constexpr): i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H i_t, i_i = i_c // NC, i_c % NC if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) else: bos, eos = i_b * T, i_b * T + T T = eos - bos if i_t * BT + i_i * BC >= T: return o_k = i_k * BK + tl.arange(0, BK) m_k = o_k < K if HEAD_FIRST: p_ge = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: p_ge = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) b_ge = tl.load(p_ge, boundary_check=(0, 1)) b_dq = tl.zeros([BC, BK], dtype=tl.float32) if i_i > 0: if HEAD_FIRST: p_gn = tl.max_contiguous(tl.multiple_of(gi + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) else: p_gn = tl.max_contiguous(tl.multiple_of(gi + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) b_gn = tl.load(p_gn, mask=m_k, other=0) for i_j in range(0, i_i): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(gi + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(gi + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_kg = b_k * tl.exp(b_gn[None, :] - b_gk) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dq += tl.dot(b_dA, b_kg) b_dq *= tl.exp(b_ge - b_gn[None, :]) o_i = tl.arange(0, BC) m_dA = i_t * BT + i_i * BC + tl.arange(0, BC) < T if HEAD_FIRST: o_dA = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC) ) * BT + i_i * BC p_kj = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_gkj = tl.max_contiguous(tl.multiple_of(gi + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: o_dA = bos * H * BT + (i_t * BT + i_i * BC + tl.arange(0, BC) ) * H * BT + i_h * BT + i_i * BC p_kj = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_gkj = tl.max_contiguous(tl.multiple_of(gi + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) for j in range(0, min(BC, T - i_t * BT - i_i * BC)): b_dA = tl.load(dA + o_dA + j, mask=m_dA, other=0) b_kj = tl.load(p_kj, mask=m_k, other=0).to(tl.float32) b_gkj = tl.load(p_gkj, mask=m_k, other=0).to(tl.float32) m_i = o_i[:, None] > j b_dq += tl.where(m_i, b_dA[:, None] * b_kj[None, :] * tl.exp(b_ge - b_gkj[None, :]), 0.0) p_kj += K if HEAD_FIRST else H * K p_gkj += K if HEAD_FIRST else H * K tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.debug_barrier() if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(gi + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(gi + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_dk = tl.zeros([BC, BK], dtype=tl.float32) NC = min(NC, tl.cdiv(T - i_t * BT, BC)) if i_i < NC - 1: if HEAD_FIRST: p_gn = tl.max_contiguous(tl.multiple_of(gi + i_bh * T * K + ( i_t * BT + i_i * BC + BC - 1) * K + o_k, BK), BK) else: p_gn = tl.max_contiguous(tl.multiple_of(gi + bos * H * K + (i_t * BT + i_i * BC + BC - 1) * H * K + i_h * K + o_k, BK), BK) b_gn = tl.load(p_gn, mask=m_k, other=0) for i_j in range(i_i + 1, NC): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_ge = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (BT, T), (1, BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_ge = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + (bos * H + i_h) * BT, (BT, T), (1, H * BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_ge = tl.load(p_ge, boundary_check=(0, 1)) b_qg = b_q * tl.exp(b_ge - b_gn[None, :]) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dk += tl.dot(b_dA, b_qg) b_dk *= tl.exp(b_gn[None, :] - b_gk) if HEAD_FIRST: o_dA = i_bh * T * BT + (i_t * BT + i_i * BC ) * BT + i_i * BC + tl.arange(0, BC) p_qj = tl.max_contiguous(tl.multiple_of(q + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_gqj = tl.max_contiguous(tl.multiple_of(ge + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: o_dA = bos * H * BT + (i_t * BT + i_i * BC ) * H * BT + i_h * BT + i_i * BC + tl.arange(0, BC) p_qj = tl.max_contiguous(tl.multiple_of(q + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_gqj = tl.max_contiguous(tl.multiple_of(ge + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) for j in range(0, min(BC, T - i_t * BT - i_i * BC)): b_dA = tl.load(dA + o_dA + j * (1 if HEAD_FIRST else H) * BT) b_qj = tl.load(p_qj, mask=m_k, other=0).to(tl.float32) b_gqj = tl.load(p_gqj, mask=m_k, other=0).to(tl.float32) m_i = o_i[:, None] < j b_dk += tl.where(m_i, b_dA[:, None] * b_qj[None, :] * tl.exp(b_gqj[ None, :] - b_gk), 0.0) p_qj += K if HEAD_FIRST else H * K p_gqj += K if HEAD_FIRST else H * K tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py
7dce9668-e8af-4269-bead-36df38944bcf
test_trampolines.py
makslevental/triton-pp
tests/test_trampolines.py
e2b3e2a35d96007fa1ae129432cf8e99f44588a1
0
@triton.jit def kernel_0123(): c64 = arith.constant(64) v0 = tl.get_program_id(axis='x') air.channel('bob')
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/makslevental/triton-pp/blob/e2b3e2a35d96007fa1ae129432cf8e99f44588a1/tests/test_trampolines.py
4c1cbb13-c951-420c-9482-b1b3dfa04e72
flash_attn_v2.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/flash_attn_v2.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _triton_attn_fwd(Q, K, V, sm_scale, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_km, stride_kk, stride_vz, stride_vh, stride_vm, stride_vk, stride_oz, stride_oh, stride_om, stride_ok, Z, H, N_CTX, POWER_OF_2_N_CTX: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl. constexpr, STAGE: tl.constexpr, GROUPS: tl.constexpr, ORDER_12: tl. constexpr): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_z = off_hz // H off_h = off_hz % H off_g = tl.program_id(2) q_offset = off_z.to(tl.int64) * stride_qz + (off_h * GROUPS + off_g).to(tl .int64) * stride_qh k_offset = off_z.to(tl.int64) * stride_kz + off_h.to(tl.int64) * stride_kh v_offset = off_z.to(tl.int64) * stride_vz + off_h.to(tl.int64) * stride_vh o_offset = off_z.to(tl.int64) * stride_oz + (off_h * GROUPS + off_g).to(tl .int64) * stride_oh Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) V_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_vm, stride_vk), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=(BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_km), offsets=(0, 0), block_shape =(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_ok), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0 acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qk_scale = sm_scale qk_scale *= 1.44269504 q = tl.load(Q_block_ptr, boundary_check=(0, 1)) if ORDER_12: if STAGE & 1: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX) if STAGE & 2: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 2, offs_m, offs_n, N_CTX) else: if STAGE & 2: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 2, offs_m, offs_n, N_CTX) if STAGE & 1: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, BLOCK_DMODEL, BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX) acc = acc / l_i[:, None] tl.store(O_block_ptr, acc.to(Out.type.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/flash_attn_v2.py
5d6abe36-b7ab-46ee-8bc8-8b128a0ced17
mhmoe_bwd.py
dtadpole/triton-playground
mhmoe_bwd.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def d_leacky_relu_inv_backward(x): return tl.where(x >= 0, 1.0, 0.01)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe_bwd.py
ded8c553-2ca2-4e7d-9912-b022a9f954cd
seqlen_utils.py
Kitsunetic/kitsu
kitsu/nn/seqlen_utils.py
826967a493c89753ac2cf1e28b52b79998fc9076
0
@triton.jit def code_to_seqlen_kernel(code_ptr, seqlen_ptr, B, N, BLK: tl.constexpr): pid = tl.program_id(0) out = tl.zeros((1,), dtype=tl.int32) for nidx in range(tl.cdiv(N, BLK)): offs_n = nidx * BLK + tl.arange(0, BLK) mask_n = offs_n < N code = tl.load(code_ptr + offs_n, mask=mask_n, other=32767 << 48) bidx = (code >> 48 & 32767).to(tl.int32) x = tl.min((bidx == pid).to(tl.int32) * (offs_n - 65535), axis=0) out = tl.minimum(out, x) out = tl.where(out == 0, -1, out + 65535) tl.store(seqlen_ptr + pid + tl.arange(0, 1), out) tl.store(seqlen_ptr + B, N, mask=pid == B - 1)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py
6138d269-eae2-4f97-9029-ca31d8e08751
triton_gather_gemv.py
pytorch-labs/tritonbench
tritonbench/operators/gather_gemv/triton_gather_gemv.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'XBLOCK': 1, 'RBLOCK': 2048}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 64, 'RBLOCK': 8}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 64, 'RBLOCK': 4}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 8, 'RBLOCK': 512}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 8, 'RBLOCK': 256}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 64, 'RBLOCK': 64}, num_stages=1, num_warps=8)], key=['xnumel', 'rnumel']) @triton.jit def triton_red_fused_mv_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xoffset = tl.program_id(0).to(tl.int64) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None].to(tl.int64) xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :].to(tl.int64) x0 = xindex tmp0 = tl.load(in_ptr0 + x0 // rnumel, None, eviction_policy='evict_last') _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp7 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last').to(tl .float32) tmp1 = tmp0 + 8 tmp2 = tmp0 < 0 tmp3 = tl.where(tmp2, tmp1, tmp0) tmp4 = tl.load(in_ptr1 + (r1 + rnumel * (x0 % rnumel) + rnumel * rnumel * tmp3), None, eviction_policy='evict_first') tmp5 = tmp4.to(tl.float32) tmp6 = tmp5.to(tl.float32) tmp8 = tmp7.to(tl.float32) tmp9 = tmp6 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tmp12 tmp11 = tl.sum(_tmp11, 1)[:, None] tmp13 = tmp11.to(tl.float32) tl.store(out_ptr1 + x0, tmp13, None)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/gather_gemv/triton_gather_gemv.py
98971ea6-db2e-4e5f-a174-4421a69d2430
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/retention/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def parallel_retention_bwd_kernel_dkv(i_bh, i_t, i_k, i_v, i_h, q, k, v, do, dk, dv, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl .constexpr, V: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr, BK: tl .constexpr, BV: tl.constexpr): b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0)) d_b = tl.math.exp2(b_b * BS) p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_dk = tl.zeros([BT, BK], dtype=tl.float32) b_v = tl.load(p_v, boundary_check=(0, 1)) b_dv = tl.zeros([BT, BV], dtype=tl.float32) NTS = tl.cdiv(T, BS) d_h = tl.math.exp2((BT - tl.arange(0, BT)) * b_b) b_kd = (b_k * d_h[:, None]).to(b_k.dtype) d_q = tl.math.exp2(tl.arange(0, BS) * b_b) for i in range(NTS * BS - BS, (i_t + 1) * BT - BS, -BS): p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i, i_k * BK), (BS, BK), (1, 0)) p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i, i_v * BV), (BS, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_do = (b_do * d_q[:, None]).to(b_do.dtype) b_dk *= d_b b_dv *= d_b b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False) b_s = tl.dot(b_kd, tl.trans(b_q), allow_tf32=False) b_dk += tl.dot(b_ds.to(b_q.dtype), b_q, allow_tf32=False) b_dv += tl.dot(b_s.to(b_do.dtype), b_do, allow_tf32=False) b_dk *= d_h[:, None] * scale b_dv *= scale tl.debug_barrier() o_q, o_k = tl.arange(0, BS), tl.arange(0, BT) for i in range(i_t * BT, (i_t + 1) * BT, BS): p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i, i_k * BK), (BS, BK), (1, 0)) p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i, i_v * BV), (BS, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) m_s = o_k[:, None] <= o_q[None, :] d_s = tl.where(m_s, tl.math.exp2((-o_k[:, None] + o_q[None, :]) * b_b.to(tl.float32)), 0) * scale b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False) * d_s b_s = tl.dot(b_k, tl.trans(b_q), allow_tf32=False) * d_s b_dk += tl.dot(b_ds.to(b_q.dtype), b_q, allow_tf32=False) b_dv += tl.dot(b_s.to(b_q.dtype), b_do, allow_tf32=False) o_q += BS p_dk = tl.make_block_ptr(dk + (i_v * B * H + i_bh) * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_k * B * H + i_bh) * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/parallel.py
1b335466-e870-434b-8c7d-160154dc930f
k_layer_norm.py
cpuhrsch/torchfused
torchfused/triton/k_layer_norm.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.jit def _layer_norm_bwd_dx_fused(DX, DY, DW, DB, Y, W, B, V, Lock, stride, N, **META): GROUP_SIZE_M = META['GROUP_SIZE_M'] BLOCK_SIZE_N = META['BLOCK_SIZE_N'] row = tl.program_id(0) cols = tl.arange(0, BLOCK_SIZE_N) y_ptrs = Y + row * stride + cols dy_ptrs = DY + row * stride + cols w_ptrs = W + cols b_ptrs = B + cols lock_id = row % GROUP_SIZE_M Lock += lock_id Count = Lock + GROUP_SIZE_M y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32) dy = tl.load(dy_ptrs, mask=cols < N, other=0).to(tl.float32) w = tl.load(w_ptrs, mask=cols < N, other=0).to(tl.float32) b = tl.load(b_ptrs, mask=cols < N, other=0).to(tl.float32) rstd = tl.load(V + row) xhat = (y - b) / w wdy = w * dy xhat = tl.where(cols < N, xhat, 0.0) wdy = tl.where(cols < N, wdy, 0.0) mean1 = tl.sum(xhat * wdy, axis=0) / N mean2 = tl.sum(wdy, axis=0) / N dx = (wdy - (xhat * mean1 + mean2)) * rstd _store(dx, DX, stride, N, META) partial_dw = (dy * xhat).to(w.dtype) partial_db = dy.to(w.dtype) while tl.atomic_cas(Lock, 0, 1) == 1: pass count = tl.load(Count) dw_ptrs = DW + lock_id * N + cols db_ptrs = DB + lock_id * N + cols if count == 0: tl.atomic_xchg(Count, 1) else: partial_dw += tl.load(dw_ptrs, mask=cols < N, other=0.0) partial_db += tl.load(db_ptrs, mask=cols < N, other=0.0) tl.store(dw_ptrs, partial_dw, mask=cols < N) tl.store(db_ptrs, partial_db, mask=cols < N) tl.atomic_xchg(Lock, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py
e5d45e57-5c7b-4d2f-bf3e-65b3b35a34ee
softmax_loop_along_reduce_axis_v1.py
iclementine/optimize_softmax
softmax_loop_along_reduce_axis_v1.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_loop_v1(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr): pid_m = tl.program_id(0) m = tl.full((), value=-float('inf'), dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) m = tl.maximum(m, tl.max(inp, 0)) z = tl.full((), value=0, dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) z += tl.sum(e) for start_n in range(0, N, TILE_N): n_offsets = start_n + tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .dtype.element_ty) e = tl.exp(inp - m) out = e / z output_ptrs = output_ptr + offset tl.store(output_ptrs, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_loop_along_reduce_axis_v1.py
5de2aa3e-085e-4400-b082-14c3014eba37
softplus.py
shawntan/stickbreaking-attention
stickbreaking_attention/sb_varlen/softplus.py
8dd32ad5e58f0ee0232fd4782dc53d354ff8d283
0
@triton.jit def softplus(x, is_compiling: tl.constexpr=False): if is_compiling: tl.static_print('Using triton softplus.') out = tl.where(x < 15.0, tl.math.log2(1 + tl.math.exp2(x)), x) return out else: out = tl.inline_asm_elementwise(asm=asm_str, constraints= constraints_str, pack=NUM_REG, args=[x], dtype=tl.float32, is_pure=True) return out
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Non-Tiled" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/softplus.py
a68d784e-b08f-44c8-afff-6f1cd84ee225
gemm_streamk_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.jit def mac_loop(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl. constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr, stride_cn: tl.constexpr, iters_per_tile, start_iter, end_iter, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): tile_id = start_iter // iters_per_tile remain_iters = start_iter % iters_per_tile if GROUP_SIZE_M > 0: pid_m, pid_n = swizzle_tile(tile_id, M, N, K, BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE_M) else: pid_m, pid_n = linear_tile(tile_id, M, N, K, BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE_M) a_ptr += BLOCK_SIZE_K * stride_ak * remain_iters a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=( stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0)) b_ptr += BLOCK_SIZE_K * stride_bk * remain_iters b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=( stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0)) acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for _ in range(start_iter, end_iter): a = tl.load(a_block_ptr, boundary_check=(0, 1)) b = tl.load(b_block_ptr, boundary_check=(0, 1)) acc += tl.dot(a, b) a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K)) b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0)) if remain_iters == 0 and end_iter % iters_per_tile == 0: c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=( stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order= (1, 0)) tl.store(c_block_ptr, acc, boundary_check=(0, 1)) else: rm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) rn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptr_ = c_ptr + rm[:, None] * stride_cm + rn[None, :] * stride_cn mask = (rm < M)[:, None] & (rn < N)[None, :] tl.atomic_add(c_ptr_, acc, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
a730fe03-8fd6-4e0a-9e86-85418a5bb767
memory.py
USC-NSL/DisagMoE
disagmoe/ops/memory.py
6e86ce027a9622109ce81e691af1a48c1d5dbaf2
0
@triton.jit def _permute_tokens_kernel(out_ptr, in_ptr, mapping, hidden_size, BLOCK_SIZE: tl.constexpr): token_id = tl.program_id(axis=0) block_id = tl.program_id(axis=1) target_pos = tl.load(mapping + token_id) src_start = token_id * hidden_size + block_id * BLOCK_SIZE src_offsets = src_start + tl.arange(0, BLOCK_SIZE) src_data = tl.load(in_ptr + src_offsets) target_start = target_pos * hidden_size + block_id * BLOCK_SIZE target_offsets = target_start + tl.arange(0, BLOCK_SIZE) tl.store(out_ptr + target_offsets, src_data)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Transposed Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/USC-NSL/DisagMoE/blob/6e86ce027a9622109ce81e691af1a48c1d5dbaf2/disagmoe/ops/memory.py
dec34255-4bf5-4a35-bd29-31122d76da6e
quantization.py
neuro-ml/kerops
kerops/kernels/quantization.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _DequantUint8Window_impl(input_ptr, output_ptr, numel, window, BLOCK_SIZE: tl.constexpr): tid = tl.program_id(0) input_ptr += tid * BLOCK_SIZE output_ptr += tid * BLOCK_SIZE offset = tl.arange(0, BLOCK_SIZE) mask = offset < numel - tid * BLOCK_SIZE input = tl.load(input_ptr + offset, mask=mask).to(tl.float32) input = input * (2 * window / 255) - window tl.store(output_ptr + offset, input, mask=mask)
{ "Data Type": [ "uint8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/quantization.py
6a43bf82-bfad-4982-8ad6-8c9316f953e6
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def cross_entropy_loss(input, pred): """ Measures the per-row cross entropy loss given input and predicted logits corresponding to target class. Args: input: Input. The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2]. pred: Predicted logits corresponding to target class. The predictions must be of shape [BLOCK_SIZE1]. Returns: Loss. """ input = input.to(tl.float32) pred = pred.to(tl.float32) mx = tl.max(input, axis=1) input -= mx[:, None] loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx return loss
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
115476f9-cb8c-4ec2-895c-340862360239
fp8_gemm.py
pytorch/FBGEMM
fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.autotune(configs=MATMUL_CONFIGS, key=['m_key', 'n_key', 'k_key'], prune_configs_by={'early_config_prune': prune_configs_block, 'perf_model': estimate_matmul_time, 'top_k': 10}) @triton.heuristics({'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0}) @triton.jit def _kernel_matmul_fp8_block_fastacc(A, B, C, M, N, K, m_key, n_key, k_key, A_scale, B_scale, scale_block_m: tl.constexpr, scale_block_n: tl. constexpr, scale_block_k: tl.constexpr, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, stride_scale_am, stride_scale_ak, stride_scale_bn, stride_scale_bk, dot_out_dtype: tl.constexpr, allow_tf32: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, AB_DTYPE: tl.constexpr) ->None: """Matmul kernel of [M, K] @ [N, K] with block-wise scales Performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles and A and B scaled by a scaling factor per [scale_block_m, scale_block_k] and [scale_block_n, scale_block_k] tiles respectively. Todo: * Support scale_block_{mnk} < BLOCK{MNK} for each dim. Args: A (TensorWrapper): [M, K] input tensor. B (TensorWrapper): [N, K] input tensor. C (TensorWrapper): [M, N] output tensor. M (int): M dimension of input tensor. N (int): N dimension of input tensor. K (int): K dimension of input tensor. m_key (int): Autotuning key for M dimension of input tensor. n_key (int): Autotuning key for N dimension of input tensor. k_key (int): Autotuning key for K dimension of input tensor. A_scale (TensorWrapper): [cdiv(M, scale_block_m), cdiv(K, scale_block_k)] reciprocal scale tensor per block. A * A_scale = original A B_scale (TensorWrapper): [cdiv(N, scale_block_n), cdiv(K, scale_block_k)] reciprocal scale tensor per block. B * B_scale = original B scale_block_m (int): Block size for M dimension of A_scale. scale_block_n (int): Block size for N dimension of B_scale. scale_block_k (int): Block size for K dimension of A_scale and B_scale. stride_am (int): Stride of M dimension of A. stride_ak (int): Stride of K dimension of A. stride_bn (int): Stride of N dimension of B. stride_bk (int): Stride of K dimension of B. stride_cm (int): Stride of M dimension of C. stride_cn (int): Stride of N dimension of C. stride_scale_am (int): Stride of M dimension of A_scale. stride_scale_ak (int): Stride of K dimension of A_scale. stride_scale_bn (int): Stride of N dimension of B_scale. stride_scale_bk (int): Stride of K dimension of B_scale. dot_out_dtype (torch.dtype): Output type of tensor core. allow_tf32 (bool): Whether to use TF32 for tensor core. fp8_fast_accum (bool): Whether to use fast accumulation for tensor core. BLOCK_M (int): Block size for M dimension. BLOCK_N (int): Block size for N dimension. BLOCK_K (int): Block size for K dimension. GROUP_M (int): Number of groups for M dimension swizzle. SPLIT_K (int): Number of SM's to launch per row. EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K. AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core. """ assert BLOCK_M < scale_block_m assert BLOCK_N < scale_block_n assert BLOCK_K < scale_block_k pid = tl.program_id(0) pid_z = tl.program_id(1) grid_m = tl.cdiv(M, BLOCK_M) grid_n = tl.cdiv(N, BLOCK_N) width = GROUP_M * grid_n group_id = pid // width group_size = min(grid_m - group_id * GROUP_M, GROUP_M) pid_m = group_id * GROUP_M + pid % group_size pid_n = pid % width // group_size rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype) _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) scale_m = pid_m * BLOCK_M // scale_block_m scale_n = pid_n * BLOCK_N // scale_block_n k_multiple = scale_block_k // BLOCK_K for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): k_remaining = K - k * (BLOCK_K * SPLIT_K) if EVEN_K: a = tl.load(A) b = tl.load(B) else: a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0) b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0) if AB_DTYPE: a = a.to(C.dtype.element_ty) b = b.to(C.dtype.element_ty) acc = tl.dot(a, b, acc, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) A += BLOCK_K * SPLIT_K * stride_ak B += BLOCK_K * SPLIT_K * stride_bk pid_k = k * SPLIT_K + pid_z if (pid_k + 1) % k_multiple == 0 or k_remaining < BLOCK_K * SPLIT_K: scale_k = pid_k // k_multiple scale_k_next = scale_k + 1 a_scale = tl.load(A_scale + scale_m * stride_scale_am + scale_k * stride_scale_ak) b_scale = tl.load(B_scale + scale_n * stride_scale_bn + scale_k * stride_scale_bk) scale = a_scale * b_scale if k + 1 == tl.cdiv(K, BLOCK_K * SPLIT_K): scale_next_inv_scale = scale else: a_scale_next = tl.load(A_scale + scale_m * stride_scale_am + scale_k_next * stride_scale_ak) b_scale_next = tl.load(B_scale + scale_n * stride_scale_bn + scale_k_next * stride_scale_bk) scale_next = a_scale_next * b_scale_next scale_next_inv_scale = scale / scale_next acc *= scale_next_inv_scale rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) acc = acc.to(C.dtype.element_ty) c = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) mask = (rm < M)[:, None] & (rn < N)[None, :] if SPLIT_K == 1: tl.store(c, acc, mask=mask) else: tl.atomic_add(c, acc, mask=mask)
{ "Data Type": [], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
5514dae5-9a0b-4b86-9296-8b765d29d9a5
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def update_welford(input, prev_count, prev_mean, prev_var, curr_count, mask: tl.constexpr): """ Updates count, mean, and variance (M2) statistics for Welford's algorithm. Args: input: Input used to update statistics. The input must be of the same shape as the mask. prev_count: Previous count statistic to update. prev_mean: Previous mean statistic to update. prev_var: Previous variance (M2) statistic to update. curr_count: Count of elements in current input. mask: Mask indicating which elements should be included in the calculations. The mask must be of the same shape as the input. Returns: Updated count, mean, and variance (M2) statistics """ input = input.to(tl.float32) count = prev_count + curr_count mean = (tl.sum(input) - curr_count * prev_mean) / count deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.0) var = prev_var + tl.sum(deltas) return count, mean, var
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
094af577-4f53-4a31-868b-1e8681830008
outer_softmax.py
iclementine/optimize_softmax
outer_softmax.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel(output_ptr, input_ptr, M, N, K, TILE_N: tl.constexpr, TILE_K: tl.constexpr): pid_k = tl.program_id(0) pid_m = tl.program_id(1) k_offsets = pid_k * TILE_K + tl.arange(0, TILE_K) n_offsets = tl.arange(0, TILE_N) offset = pid_m * N * K + n_offsets[:, None] * K + k_offsets mask = (n_offsets[:, None] < N) & (k_offsets < K) input_ptrs = input_ptr + offset inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr .type.element_ty) m = tl.max(inp, 0) e = tl.exp(inp - m[None, :]) z = tl.sum(e, 0) out = e / z output_ptrs = output_ptr + offset tl.store(output_ptrs, out, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/outer_softmax.py
0a7754cd-aa42-48c9-a0f9-486516dcd1a4
group_norm.py
chengzeyi/stable-fast
src/sfast/triton/ops/group_norm.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@eval( """triton.heuristics({ 'ROW_SIZE': lambda kwargs: triton.next_power_of_2(kwargs['C'] // kwargs['groups']), 'BLOCK_SIZE': lambda kwargs: max( 1, min(triton.next_power_of_2(kwargs['cluster_size']), 4096 // (triton.next_power_of_2(kwargs['C'] // kwargs['groups'])) )), })""" ) @eval( """triton.heuristics({ 'num_warps': lambda kwargs: max(1, min(16, kwargs['ROW_SIZE'] * kwargs['BLOCK_SIZE'] // 128)), 'C_G': lambda kwargs: kwargs['C'] // kwargs['groups'], })""" ) @triton.jit def group_norm_4d_channels_last_forward_collect_stats_kernel_stage_1(input_ptr, N, C, HxW, groups, cluster_size, cluster_num, cluster_mean_ptr, cluster_m2_ptr, cluster_weight_ptr, C_G, ROW_SIZE: tl.constexpr, BLOCK_SIZE: tl.constexpr): group = tl.program_id(0) cluster = tl.program_id(1) pid_batch = tl.program_id(2) offset = pid_batch * C * HxW + group * C_G X = input_ptr + offset _mean = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32) _m2 = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32) _weight = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32) row = tl.arange(0, ROW_SIZE) start = cluster * cluster_size end = start + cluster_size end = min(end, HxW) for off in range(start, end, BLOCK_SIZE): r = off + tl.arange(0, BLOCK_SIZE) m2_ = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32) mask = (r < end)[:, None] & (row[None, :] < C_G) weight_ = mask.to(tl.float32) x = tl.load(X + (r * C)[:, None] + row[None, :], mask=mask).to(tl. float32) _mean, _m2, _weight = welford_combine(_mean, _m2, _weight, x, m2_, weight_) _mean = tl.view(_mean, (BLOCK_SIZE * ROW_SIZE,)) _m2 = tl.view(_m2, (BLOCK_SIZE * ROW_SIZE,)) _weight = tl.view(_weight, (BLOCK_SIZE * ROW_SIZE,)) mean, m2, weight = tl.reduce((_mean, _m2, _weight), 0, welford_combine) offset = pid_batch * groups * cluster_num + group * cluster_num + cluster tl.store(cluster_mean_ptr + offset, mean) tl.store(cluster_m2_ptr + offset, m2) tl.store(cluster_weight_ptr + offset, weight)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/group_norm.py
fd13e143-89d0-45f8-b215-8f6705c789eb
blocksparse_matmul.py
kimiasa/Experiments
src/models/attention/blocksparse_matmul.py
c4e73bfefd8290695ec52b6386b6b81838ca94a1
0
@triton.jit def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc, stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta): TM = meta['TM'] TN = meta['TN'] TK = meta['TK'] TZ = meta['TZ'] BLOCK = meta['BLOCK'] pid0 = tl.program_id(0) pid1 = tl.program_id(1) pidz = tl.program_id(2) if meta['SDD']: pid1 = pid1 + SDD_off_width blockidm = tl.arange(0, TM) // BLOCK blockidn = tl.arange(0, TN) // BLOCK offlutm = blockidm * (TN // BLOCK) * 4 offlutn = blockidn * 4 header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4 z = tl.load(header + 0) i = tl.load(header + 1 + offlutm) j = tl.load(header + 2 + offlutn) AS1 = SDD_K // TZ lockid = tl.where(TZ > 1, 1, 0) offka = pid0 * AS1 offkb = pid0 * AS1 offmc = 0 offnc = 0 offpa = 0 offpb = 0 maxid = TZ offhc = 0 offha = z offhb = z ram = i * BLOCK + tl.arange(0, TM) % BLOCK rbn = j * BLOCK + tl.arange(0, TN) % BLOCK else: header = lut + pid0 * 6 offset = tl.load(header + 0) AS1 = tl.load(header + 1) column = tl.load(header + 2) depth = tl.load(header + 3) lockid = tl.load(header + 4) maxid = tl.load(header + 5) pinc = lut + offset offhc = depth if meta['DSD']: offnc = pid1 * TN offmc = column * TM offpc = 0 offnb = pid1 * TN offkb = tl.load(pinc) offkb = tl.multiple_of(offkb, 8) offpb = 0 offma = 0 offka = 0 offpa = tl.load(pinc + 1) offpa = tl.multiple_of(offpa, 8) offpa = offpa * BLOCK * BLOCK offha = 0 offhb = depth else: offmc = pid1 * TM offnc = column * TN offpc = 0 offma = pid1 * TM offka = tl.load(pinc) offka = tl.multiple_of(offka, 8) offpa = 0 offnb = 0 offkb = 0 offpb = tl.load(pinc + 1) offpb = tl.multiple_of(offpb, 8) offpb = offpb * BLOCK * BLOCK offha = depth offhb = 0 ram = offma + tl.arange(0, TM) rbn = offnb + tl.arange(0, TN) rka = offka + tl.arange(0, TK) rkb = offkb + tl.arange(0, TK) pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None ] * stride_ma + rka[None, :] * stride_ka pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, : ] * stride_nb + rkb[:, None] * stride_kb if meta['DDS']: checkam = ram[:, None] < DS0 else: checkam = AS1 > 0 if meta['DSD']: checkbn = rbn[None, :] < DS0 else: checkbn = AS1 > 0 a = tl.load(pa, mask=checkam, other=0.0) b = tl.load(pb, mask=checkbn, other=0.0) acc = tl.zeros((TM, TN), dtype=tl.float32) for k in range(AS1, 0, -TK): acc += tl.dot(a, b) if meta['SDD']: inc_a = TK * stride_ka inc_b = TK * stride_kb else: pinc += 2 if meta['DSD']: inc_b = tl.load(pinc) inc_a = tl.load(pinc + 1) inc_b = tl.multiple_of(inc_b, 8) inc_a = tl.multiple_of(inc_a, 8) inc_b = inc_b * stride_kb if meta['DDS']: inc_a = tl.load(pinc) inc_b = tl.load(pinc + 1) inc_a = tl.multiple_of(inc_a, 8) inc_b = tl.multiple_of(inc_b, 8) inc_a = inc_a * stride_ka pa += inc_a pb += inc_b checkak = k > TK checkbk = k > TK checka = checkam & checkak checkb = checkbn & checkbk a = tl.load(pa, mask=checka) b = tl.load(pb, mask=checkb) c = acc.to(C.dtype.element_ty) if meta['SDD']: checkc = True rr_blockidm = tl.arange(0, TM) // BLOCK rr_blockidn = tl.arange(0, TN) // BLOCK rr_offlutm = rr_blockidm * (TN // BLOCK) * 4 rr_offlutn = rr_blockidn * 4 off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :] bkid = tl.load(header + off_bkid) offpc = bkid * BLOCK * BLOCK rcm = tl.arange(0, TM) % BLOCK rcn = tl.arange(0, TN) % BLOCK else: rcm = offmc + tl.arange(0, TM) rcn = offnc + tl.arange(0, TN) if meta['DSD']: checkc = rcn[None, :] < DS0 if meta['DDS']: checkc = rcm[:, None] < DS0 pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None ] * stride_mc + rcn[None, :] * stride_nc if lockid == 0: tl.store(pc, c, mask=checkc) else: plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1 ) + tl.program_id(1) * nlocks + lockid - 1 pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks while tl.atomic_cas(plock, 0, 1) == 1: pass count = tl.load(pcount) if count == 0: tl.store(pc, c, mask=checkc) else: d = tl.load(pc, mask=checkc) tl.store(pc, d + c, mask=checkc) tl.atomic_xchg(pcount, (count + 1) % maxid) tl.atomic_xchg(plock, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/models/attention/blocksparse_matmul.py
9822ff51-7759-462c-94c3-f99295b342b9
seqlen_utils.py
Kitsunetic/kitsu
kitsu/nn/seqlen_utils.py
826967a493c89753ac2cf1e28b52b79998fc9076
0
@triton.jit def padding_index_kernel(seqlen_ptr, new_seqlen_ptr, new_max_seqlen, idx_ptr, window_size, BLK_N: tl.constexpr): pid_b = tl.program_id(0) i1 = tl.load(seqlen_ptr + pid_b).to(tl.int32) j1 = tl.load(seqlen_ptr + pid_b + 1).to(tl.int32) i2 = tl.load(new_seqlen_ptr + pid_b).to(tl.int32) j2 = tl.load(new_seqlen_ptr + pid_b + 1).to(tl.int32) for pid_n in range(tl.cdiv(new_max_seqlen, BLK_N)): offs_idx = pid_n * BLK_N + tl.arange(0, BLK_N) mask_idx = offs_idx < j2 - i2 idx_ptrs = idx_ptr + i2 + offs_idx idx = i1 + offs_idx.to(tl.int32) tmp = clamp(idx - window_size, i1, j1 - 1) idx_out = tl.where(idx < j1, idx, tmp) tl.store(idx_ptrs, idx_out, mask=mask_idx)
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/seqlen_utils.py
176b1708-5add-44dc-a5d9-163681f5d85f
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _multi_head_jagged_flash_attention_bwd_preprocess_kernel(o_ptr, o_offset_ptr, do_ptr, delta_ptr, stride_oh, stride_om, stride_od, stride_delta_h, num_heads: tl.constexpr, max_seq_len: tl.constexpr, D: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_D: tl.constexpr): pid_m = tl.program_id(axis=0) pid_bh = tl.program_id(axis=1) pid_batch = pid_bh // num_heads pid_head = pid_bh % num_heads begin_o = tl.load(o_offset_ptr + pid_batch) end_o = tl.load(o_offset_ptr + pid_batch + 1) M = end_o - begin_o M = tl.minimum(M, max_seq_len) if M == 0: return offs_om = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_od = tl.arange(0, BLOCK_D) o_offsets = offs_om[:, None] * stride_om + offs_od[None, : ] * stride_od + pid_head * stride_oh + begin_o * stride_om o_ptrs = o_ptr + o_offsets do_ptrs = do_ptr + o_offsets o_mask = (offs_om[:, None] < M) & (offs_od[None, :] < D) o = tl.load(o_ptrs, mask=o_mask) do = tl.load(do_ptrs, mask=o_mask) delta = tl.sum(o * do, axis=1) tl.store(delta_ptr + pid_head * stride_delta_h + begin_o + offs_om, delta, mask=offs_om < M)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
8f0a1ee8-5be7-4e8c-824f-b08939f27687
y_8.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_8.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def eighth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset < output_numel) g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask= output_row_offset + 1 < output_numel) g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask= output_row_offset + 2 < output_numel) g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask= output_row_offset + 3 < output_numel) g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask= output_row_offset + 4 < output_numel) g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask= output_row_offset + 5 < output_numel) g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask= output_row_offset + 6 < output_numel) g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask= output_row_offset + 7 < output_numel) g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask= output_row_offset + 8 < output_numel) g_9 = tl.load(sph_grad_ptr + output_row_offset + 9, mask= output_row_offset + 9 < output_numel) g_10 = tl.load(sph_grad_ptr + output_row_offset + 10, mask= output_row_offset + 10 < output_numel) g_11 = tl.load(sph_grad_ptr + output_row_offset + 11, mask= output_row_offset + 11 < output_numel) g_12 = tl.load(sph_grad_ptr + output_row_offset + 12, mask= output_row_offset + 12 < output_numel) g_13 = tl.load(sph_grad_ptr + output_row_offset + 13, mask= output_row_offset + 13 < output_numel) g_14 = tl.load(sph_grad_ptr + output_row_offset + 14, mask= output_row_offset + 14 < output_numel) g_15 = tl.load(sph_grad_ptr + output_row_offset + 15, mask= output_row_offset + 15 < output_numel) g_16 = tl.load(sph_grad_ptr + output_row_offset + 16, mask= output_row_offset + 16 < output_numel) CONST000 = 2.0 CONST001 = 3.0 CONST002 = 4.50964677801932 CONST004 = 5.0 CONST005 = 6.78376969317208 CONST006 = 4.0 CONST007 = 9.01929355603863 CONST008 = 6.76447016702898 CONST009 = 6.0 CONST011 = 13.5675393863442 CONST012 = 15.0965641786467 CONST013 = 13.136713523081 CONST015 = 13.136713523081 CONST017 = 19.4042118494929 CONST019 = -489.184589393411 CONST020 = 24.738633753706 CONST023 = 26.2734270461621 CONST024 = 27.0578806681159 CONST025 = 24.738633753706 CONST026 = 32.9848450049413 CONST027 = 33.9188484658604 CONST028 = 550.332663067587 CONST030 = -978.369178786822 CONST031 = 48.5105296237322 CONST033 = 51.744564931981 CONST035 = 48.9184589393411 CONST041 = 65.6835676154051 CONST043 = -1467.55376818023 CONST045 = -12.2296147348353 CONST047 = 582.126355484786 CONST048 = -437.890450769368 CONST049 = -434.108258927137 CONST050 = -434.108258927137 CONST052 = -432.926090689854 CONST054 = -1447.02752975712 CONST055 = 91.9569946615672 CONST056 = -420.374832738593 CONST057 = 6.46807061649763 CONST058 = 97.0210592474644 CONST061 = 103.489129863962 CONST062 = -407.026181590325 CONST063 = 108.231522672464 CONST065 = 110.066532613517 CONST066 = 110.066532613517 CONST067 = 620.934779183772 CONST068 = -396.284809689477 CONST070 = 132.094936563159 CONST071 = 434.108258927137 CONST073 = 649.389136034781 CONST076 = -366.888442045058 CONST077 = -366.888442045058 CONST078 = -361.756882439281 CONST080 = -6.78376969317208 CONST082 = -350.312360615494 CONST083 = -346.340872551883 CONST084 = -346.340872551883 CONST085 = 173.170436275942 CONST086 = 173.170436275942 CONST088 = 183.444221022529 CONST089 = 183.444221022529 CONST090 = -325.62094527226 CONST091 = -13.5289403340579 CONST092 = -13.5675393863442 CONST093 = 194.042118494929 CONST095 = 197.050702846215 CONST096 = -11.3224231339851 CONST097 = 203.513090795162 CONST098 = -814.05236318065 CONST102 = -814.05236318065 CONST104 = 217.054129463568 CONST105 = 216.463045344927 CONST106 = 220.133065227035 CONST107 = -291.063177742393 CONST108 = 220.133065227035 CONST109 = -792.569619378954 CONST111 = -271.350787726883 CONST112 = 244.592294696705 CONST113 = 244.592294696706 CONST114 = 244.592294696706 CONST115 = -776.168473979715 CONST116 = -262.734270461621 CONST117 = -259.755654413913 CONST118 = -258.722824659905 CONST120 = 262.734270461621 CONST121 = -244.215708954195 CONST122 = 271.350787726883 CONST124 = -236.460843415458 CONST127 = -217.054129463568 CONST128 = -216.463045344927 CONST129 = -216.463045344927 CONST130 = -216.463045344927 CONST131 = -723.513764878561 CONST133 = -210.187416369296 CONST134 = -210.187416369296 CONST135 = 814.05236318065 CONST136 = -197.050702846215 CONST137 = 317.027847751582 CONST138 = -194.042118494929 CONST139 = -13.136713523081 CONST140 = 324.694568017391 CONST142 = 324.694568017391 CONST143 = -175.156180307747 CONST146 = -162.81047263613 CONST147 = -162.347284008695 CONST148 = 865.852181379709 CONST149 = -158.513923875791 CONST151 = -144.702752975712 CONST152 = -649.389136034782 CONST153 = -129.877827206956 CONST154 = -129.361412329953 CONST155 = 388.084236989858 CONST157 = -115.446957517294 CONST158 = -108.231522672464 CONST159 = -108.231522672464 CONST160 = 407.026181590325 CONST161 = -103.489129863962 CONST162 = -97.0210592474644 CONST163 = -94.7025823384056 CONST165 = -91.9569946615672 CONST167 = -87.5780901538735 CONST168 = -85.6073031438469 CONST169 = -85.6073031438469 CONST170 = -81.1736420043477 CONST171 = 432.926090689854 CONST172 = -79.2569619378954 CONST173 = -81.1736420043477 CONST177 = -79.2569619378954 CONST178 = -72.3513764878561 CONST179 = -72.1543484483091 CONST180 = -70.0624721230988 CONST181 = -72.1543484483091 CONST182 = -67.8376969317208 CONST183 = -65.6835676154052 CONST184 = -61.1480736741764 CONST185 = -1085.27064731784 CONST186 = -61.1480736741764 CONST187 = -1085.40315090753 CONST188 = -57.7234787586472 CONST189 = -12.9361412329953 CONST190 = -1085.27064731784 CONST191 = -52.8379746252636 CONST192 = -51.744564931981 CONST193 = -1585.13923875791 CONST194 = -48.5105296237322 CONST195 = -47.4863878522046 CONST197 = 978.369178786822 CONST198 = -517.44564931981 CONST199 = -40.7026181590325 CONST200 = -40.5868210021738 CONST201 = -39.4101405692431 CONST202 = -40.7026181590325 CONST203 = -36.0771742241545 CONST204 = -1056.75949250527 CONST205 = -29.1063177742393 CONST206 = 485.105296237322 CONST207 = -26.2734270461621 CONST208 = -26.4189873126318 CONST209 = -1050.93708184648 CONST210 = -22.6382471577417 CONST211 = -20.6718218536732 CONST212 = -19.4042118494929 CONST213 = -20.3513090795162 CONST214 = -528.379746252636 CONST215 = -15.0965641786467 CONST216 = -13.5675393863442 CONST217 = -525.468540923241 CONST218 = -11.3224231339851 CONST219 = -13.5289403340579 CONST220 = -9.70210592474644 CONST221 = -10.3359109268366 CONST222 = -6.46807061649763 CONST223 = -13.136713523081 CONST224 = -12.2296147348353 CONST225 = -3.23403530824881 CONST226 = -1034.89129863962 VAR06 = x * x * x * x VAR07 = x * x * x VAR08 = x * x VAR03 = VAR06 * VAR07 VAR04 = VAR07 * VAR07 VAR05 = VAR07 * VAR08 VAR15 = y * y * y * y VAR16 = y * y * y VAR17 = y * y VAR12 = VAR15 * VAR16 VAR13 = VAR16 * VAR16 VAR14 = VAR16 * VAR17 VAR24 = z * z * z * z VAR25 = z * z * z VAR26 = z * z VAR21 = VAR24 * VAR25 VAR22 = VAR25 * VAR25 VAR23 = VAR25 * VAR26 g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask= coord_row_offset + 1 < coord_numel) g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask= coord_row_offset + 2 < coord_numel) g_x += g_0 * (CONST049 * VAR08 * VAR23 - CONST131 * VAR06 * VAR25 + CONST151 * VAR04 * z - CONST211 * VAR21) + g_1 * y * (CONST178 * VAR04 - CONST178 * VAR22 + CONST185 * VAR08 * VAR24 - CONST190 * VAR06 * VAR26) + g_10 * (CONST017 * VAR05 * VAR26 + CONST161 * VAR13 * x - CONST189 * VAR03 - CONST198 * VAR07 * VAR15 + CONST222 * VAR22 * x + VAR17 * (CONST058 * VAR24 * x + CONST107 * VAR05 + CONST138 * VAR07 * VAR26)) + g_11 * (CONST056 * VAR14 * x * z + VAR16 * (-CONST082 * VAR25 * x - CONST209 * VAR07 * z) + y * ( CONST116 * VAR07 * VAR25 + CONST124 * VAR05 * z + CONST207 * VAR23 * x) ) + g_12 * (CONST011 * VAR03 + CONST182 * VAR07 * VAR24 + CONST199 * VAR05 * VAR26 + CONST216 * VAR22 * x + VAR15 * (CONST098 * VAR26 * x + CONST122 * VAR07) + VAR17 * (-CONST102 * VAR07 * VAR26 + CONST121 * VAR05 + CONST160 * VAR24 * x)) + g_13 * (VAR16 * (- CONST030 * VAR07 * z + CONST030 * VAR25 * x) + y * (CONST076 * VAR05 * z + CONST106 * VAR23 * x + CONST112 * VAR07 * VAR25) ) + g_14 * (CONST012 * VAR03 + CONST149 * VAR05 * VAR26 - CONST191 * VAR22 * x + VAR17 * (CONST109 * VAR24 * x + CONST149 * VAR05 - CONST193 * VAR07 * VAR26)) + g_15 * y * (CONST050 * VAR05 * z + CONST050 * VAR23 * x - CONST054 * VAR07 * VAR25) + g_16 * (CONST050 * VAR05 * VAR26 - CONST131 * VAR07 * VAR24 + CONST151 * VAR22 * x - CONST211 * VAR03) + g_2 * (CONST001 * VAR08 * (-CONST208 * VAR23 + CONST214 * VAR17 * VAR25) + CONST004 * VAR06 * (-CONST149 * VAR17 * z - CONST208 * VAR25) - CONST149 * VAR17 * VAR23 + CONST172 * VAR04 * z + CONST218 * VAR21) + g_3 * (VAR16 * (CONST043 * VAR08 * VAR26 + CONST113 * VAR06 + CONST114 * VAR24) + y * (CONST028 * VAR06 * VAR26 + CONST088 * VAR08 * VAR24 + CONST168 * VAR04 + CONST184 * VAR22) ) + g_4 * (CONST001 * VAR08 * (CONST005 * VAR23 + CONST111 * VAR15 * z) + CONST004 * VAR06 * (CONST080 * VAR25 - CONST146 * VAR17 * z) + CONST005 * VAR21 - CONST111 * VAR15 * VAR25 + CONST146 * VAR17 * VAR23 + CONST195 * VAR04 * z) + g_5 * (VAR14 * (CONST133 * VAR08 - CONST134 * VAR26) + VAR16 * (-CONST048 * VAR06 + CONST116 * VAR24 + CONST217 * VAR08 * VAR26) + y * (CONST041 * VAR06 * VAR26 + CONST095 * VAR08 * VAR24 + CONST165 * VAR04 - CONST201 * VAR22) ) + g_6 * (CONST001 * VAR08 * (CONST093 * VAR17 * VAR25 + CONST118 * VAR15 * z + CONST220 * VAR23) + CONST004 * VAR06 * (-CONST162 * VAR17 * z + CONST220 * VAR25) + CONST118 * VAR15 * VAR25 - CONST161 * VAR13 * z - CONST162 * VAR17 * VAR23 + CONST210 * VAR04 * z + CONST225 * VAR21) + g_7 * (CONST001 * VAR08 * (-CONST128 * VAR16 * VAR26 + CONST153 * VAR14 + CONST200 * VAR24 * y) + CONST004 * VAR06 * (CONST063 * VAR16 + CONST200 * VAR26 * y) + CONST020 * VAR12 + CONST153 * VAR14 * VAR26 - CONST158 * VAR16 * VAR24 + CONST163 * VAR04 * y + CONST219 * VAR22 * y) + g_8 * (CONST000 * x * (CONST002 * VAR22 - CONST128 * VAR15 * VAR26 + CONST158 * VAR17 * VAR24 + CONST188 * VAR13) + CONST006 * VAR07 * (CONST008 * VAR24 - CONST158 * VAR15 + CONST159 * VAR17 * VAR26) + CONST007 * VAR03 + CONST009 * VAR05 * (CONST002 * VAR26 + CONST203 * VAR17)) + g_9 * (CONST173 * VAR23 * x * y + VAR25 * (CONST147 * VAR07 * y + CONST171 * VAR16 * x) + z * (CONST117 * VAR14 * x + CONST170 * VAR05 * y + CONST171 * VAR07 * VAR16)) g_y += CONST000 * g_14 * y * (-CONST068 * VAR06 * VAR26 + CONST068 * VAR08 * VAR24 + CONST208 * VAR04 - CONST208 * VAR22) + g_1 * ( CONST078 * VAR07 * VAR24 + CONST104 * VAR05 * VAR26 - CONST178 * VAR22 * x + CONST221 * VAR03) + g_10 * (CONST000 * y * (CONST031 * VAR08 * VAR24 + CONST031 * VAR22 + CONST194 * VAR04 + CONST194 * VAR06 * VAR26) + CONST006 * VAR16 * (-CONST154 * VAR06 + CONST154 * VAR24) + CONST009 * VAR14 * (CONST033 * VAR26 + CONST192 * VAR08) ) + g_11 * (CONST001 * VAR17 * (-CONST116 * VAR06 * z - CONST143 * VAR08 * VAR25 + CONST167 * VAR23) + CONST004 * VAR15 * (CONST134 * VAR08 * z - CONST180 * VAR25) + CONST013 * VAR21 + CONST183 * VAR06 * VAR25 + CONST201 * VAR04 * z + CONST223 * VAR08 * VAR23) + g_12 * ( CONST000 * y * (CONST097 * VAR06 * VAR26 + CONST097 * VAR08 * VAR24 + CONST199 * VAR04 + CONST199 * VAR22) + CONST006 * VAR16 * (CONST062 * VAR08 * VAR26 - CONST182 * VAR06 - CONST182 * VAR24)) + g_13 * ( CONST001 * VAR17 * (CONST019 * VAR08 * VAR25 + CONST035 * VAR23 + CONST113 * VAR06 * z) + CONST065 * VAR08 * VAR23 - CONST184 * VAR06 * VAR25 + CONST186 * VAR04 * z + CONST224 * VAR21) + g_15 * (- CONST078 * VAR06 * VAR25 + CONST127 * VAR08 * VAR23 + CONST178 * VAR04 * z - CONST221 * VAR21) + g_2 * (CONST137 * VAR05 * y * z + CONST137 * VAR23 * x * y + CONST204 * VAR07 * VAR25 * y) + g_3 * ( CONST001 * VAR17 * (CONST019 * VAR07 * VAR26 + CONST035 * VAR05 + CONST114 * VAR24 * x) + CONST045 * VAR03 + CONST066 * VAR05 * VAR26 + CONST184 * VAR22 * x - CONST186 * VAR07 * VAR24) + g_4 * (-CONST090 * VAR05 * y * z + CONST187 * VAR07 * VAR16 * z + x * (CONST090 * VAR23 * y - CONST187 * VAR16 * VAR25)) + g_5 * (CONST001 * VAR17 * (CONST116 * VAR24 * x + CONST143 * VAR07 * VAR26 - CONST167 * VAR05 ) + CONST004 * VAR15 * (-CONST134 * VAR26 * x + CONST180 * VAR07) + CONST015 * VAR05 * VAR26 + CONST041 * VAR07 * VAR24 + CONST139 * VAR03 - CONST201 * VAR22 * x) + g_6 * (-CONST138 * VAR05 * y * z + VAR07 * (CONST155 * VAR25 * y + CONST226 * VAR16 * z) + x * ( CONST067 * VAR14 * z - CONST138 * VAR23 * y + CONST226 * VAR16 * VAR25) ) + g_7 * (CONST219 * VAR03 + VAR05 * (CONST142 * VAR17 + CONST200 * VAR26) + VAR07 * (CONST152 * VAR15 - CONST152 * VAR17 * VAR26 + CONST200 * VAR24) + x * (CONST085 * VAR13 + CONST140 * VAR17 * VAR24 + CONST152 * VAR15 * VAR26 + CONST219 * VAR22)) + g_8 * ( CONST026 * VAR12 - CONST052 * VAR16 * VAR24 + CONST084 * VAR14 * VAR26 + CONST179 * VAR04 * y + CONST181 * VAR22 * y + VAR06 * (- CONST052 * VAR16 + CONST129 * VAR26 * y) + VAR08 * (CONST083 * VAR14 + CONST128 * VAR24 * y + CONST148 * VAR16 * VAR26)) + g_9 * ( CONST219 * VAR21 + VAR23 * (CONST142 * VAR17 + CONST200 * VAR08) + VAR25 * (CONST073 * VAR08 * VAR17 + CONST152 * VAR15 + CONST200 * VAR06) + z * (CONST086 * VAR13 + CONST091 * VAR04 + CONST142 * VAR06 * VAR17 + CONST152 * VAR08 * VAR15)) g_z += g_0 * (-CONST049 * VAR05 * VAR26 + CONST131 * VAR07 * VAR24 - CONST151 * VAR22 * x + CONST211 * VAR03) + g_1 * y * (-CONST050 * VAR23 * x + CONST054 * VAR07 * VAR25 + CONST071 * VAR05 * z) + g_10 * ( CONST057 * VAR04 * z + CONST061 * VAR13 * z + CONST189 * VAR21 + CONST198 * VAR15 * VAR25 + CONST212 * VAR08 * VAR23 + VAR17 * ( CONST093 * VAR08 * VAR25 - CONST107 * VAR23 + CONST162 * VAR06 * z) ) + g_11 * (VAR14 * (-CONST133 * VAR26 + CONST134 * VAR08) + VAR16 * (CONST048 * VAR24 - CONST116 * VAR06 - CONST217 * VAR08 * VAR26) + y * (CONST055 * VAR22 + CONST136 * VAR06 * VAR26 + CONST183 * VAR08 * VAR24 + CONST201 * VAR04)) + g_12 * (CONST011 * VAR21 + CONST092 * VAR04 * z + CONST182 * VAR06 * VAR25 + CONST202 * VAR08 * VAR23 + VAR15 * (CONST098 * VAR08 * z + CONST122 * VAR25) + VAR17 * (- CONST102 * VAR08 * VAR25 + CONST121 * VAR23 + CONST160 * VAR06 * z) ) + g_13 * (VAR16 * (CONST043 * VAR08 * VAR26 + CONST113 * VAR06 + CONST113 * VAR24) + y * (CONST028 * VAR08 * VAR24 + CONST089 * VAR06 * VAR26 + CONST169 * VAR22 + CONST186 * VAR04)) + g_14 * (- CONST149 * VAR08 * VAR23 + CONST191 * VAR04 * z + CONST215 * VAR21 + VAR17 * (-CONST109 * VAR06 * z - CONST149 * VAR23 + CONST193 * VAR08 * VAR25)) + g_15 * y * (CONST178 * VAR04 - CONST178 * VAR22 - CONST185 * VAR06 * VAR26 + CONST190 * VAR08 * VAR24) + g_16 * ( CONST050 * VAR08 * VAR23 - CONST131 * VAR06 * VAR25 + CONST151 * VAR04 * z - CONST211 * VAR21) + g_2 * (CONST096 * VAR03 + VAR05 * ( -CONST149 * VAR17 - CONST177 * VAR26) + VAR07 * (CONST070 * VAR24 + CONST193 * VAR17 * VAR26) + x * (-CONST109 * VAR17 * VAR24 + CONST177 * VAR22)) + g_3 * (VAR16 * (CONST030 * VAR07 * z + CONST197 * VAR25 * x) + y * (CONST077 * VAR23 * x + CONST108 * VAR05 * z + CONST114 * VAR07 * VAR25)) + g_4 * (CONST080 * VAR03 + VAR05 * (-CONST146 * VAR17 + CONST213 * VAR26) + VAR07 * (CONST027 * VAR24 + CONST111 * VAR15) + x * (CONST102 * VAR17 * VAR24 + CONST135 * VAR15 * VAR26 - CONST195 * VAR22)) + g_5 * (-CONST056 * VAR14 * x * z + VAR16 * (CONST082 * VAR07 * z + CONST209 * VAR25 * x) + y * (CONST023 * VAR05 * z + CONST120 * VAR07 * VAR25 - CONST124 * VAR23 * x)) + g_6 * (CONST225 * VAR03 + VAR05 * (- CONST162 * VAR17 + CONST205 * VAR26) + VAR07 * (CONST047 * VAR17 * VAR26 + CONST118 * VAR15 + CONST194 * VAR24) + x * (CONST115 * VAR15 * VAR26 - CONST161 * VAR13 + CONST206 * VAR17 * VAR24 + CONST210 * VAR22)) + g_7 * (CONST173 * VAR05 * y * z + VAR07 * (- CONST052 * VAR16 * z + CONST147 * VAR25 * y) + x * (-CONST052 * VAR16 * VAR25 + CONST117 * VAR14 * z + CONST173 * VAR23 * y)) + g_8 * ( CONST007 * VAR04 * z + CONST007 * VAR21 - CONST052 * VAR15 * VAR25 + CONST130 * VAR17 * VAR23 + CONST157 * VAR13 * z + VAR06 * (CONST024 * VAR25 + CONST129 * VAR17 * z) + VAR08 * (CONST024 * VAR23 - CONST052 * VAR15 * z + CONST052 * VAR17 * VAR25)) + g_9 * (CONST001 * VAR26 * (CONST105 * VAR08 * VAR16 + CONST153 * VAR14 + CONST200 * VAR06 * y) + CONST004 * VAR24 * (CONST063 * VAR16 + CONST200 * VAR08 * y) + CONST025 * VAR12 + CONST063 * VAR06 * VAR16 + CONST091 * VAR04 * y + CONST153 * VAR08 * VAR14 + CONST163 * VAR22 * y) tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask= coord_row_offset + 1 < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask= coord_row_offset + 2 < coord_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_8.py
e6ddf2c9-7f59-4c42-b78f-bc7715065c3e
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BT': BT}, num_warps=num_warps) for BT in [16, 32, 64] for num_warps in [2, 4, 8]], key=['S']) @triton.jit def chunk_global_reversed_cumsum_vector_kernel(s, z, offsets, T: tl. constexpr, H: tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl. constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr): i_s, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_b).to(tl.int32), tl.load(offsets + i_b + 1).to(tl.int32) else: bos, eos = i_b * T, i_b * T + T T = eos - bos o_i = tl.arange(0, BT) m_s = tl.where(o_i[:, None] <= o_i[None, :], 1.0, 0.0) b_z = tl.zeros([BS], dtype=tl.float32) for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): if HEAD_FIRST: p_s = tl.make_block_ptr(s + i_bh * T * S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) p_z = tl.make_block_ptr(z + i_bh * T * S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) else: p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H * S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) p_z = tl.make_block_ptr(z + (bos * H + i_h) * S, (T, S), (H * S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0)) b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32) b_c = b_z[None, :] + tl.dot(m_s, b_s, allow_tf32=False) tl.store(p_z, b_c.to(p_z.dtype.element_ty), boundary_check=(0, 1)) if i_t >= 0: b_z += tl.sum(b_s, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py
f0ab72b8-9381-4758-a492-d2577626d98d
flash_triton.py
MayDomine/Burst-Attention
burst_attn/flash_triton.py
b088c554072935074ea9c643de5ee363be5ab1f6
0
@triton.jit def _bwd_preprocess_do_o_dot(Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl. constexpr): start_m = tl.program_id(0) off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_d = tl.arange(0, BLOCK_HEADDIM) o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32) do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32) delta = tl.sum(o * do, axis=1) tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py
278158ef-9d13-42d9-9403-ad874e38674a
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def store_full_2d(vals, ptr, sz0: tl.constexpr, sz1: tl.constexpr, stride0= None, stride1=1): """Store 2d block into matrix (defined by ptr)""" stride0 = stride0 or sz1 offs = get_2d_offset(tl.arange(0, sz0), tl.arange(0, sz1), stride0, stride1 ) mask = get_2d_mask(tl.arange(0, sz0), tl.arange(0, sz1), sz0, sz1) tl.store(ptr + offs, vals, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
b0e2d0fb-4469-4b96-82eb-ab68f3187e7b
triton_kernel.py
yann-Choho/projet_PPML
notebooks/triton_kernel.py
9274e0561443b01f029ee6e0737f922f71d2da39
0
@triton.autotune(configs=get_autotune_config(), key=['M', 'N', 'K']) @triton.jit def ff_llama(a_ptr, w1_ptr, w3_ptr, out_ptr, M, N, K, stride_am, stride_ak, stride_w1k, stride_w1n, stride_w3k, stride_w3n, stride_outm, stride_outn, USE_FP8: tl.constexpr, EPS: tl.constexpr, BLOCK_SIZE_M: tl .constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr): """ w1 and w3 are weights (linear layers) F.silu(w1(x)) * w3(x) """ pid = tl.program_id(axis=0) pid_m = pid // tl.cdiv(N, BLOCK_SIZE_N) pid_n = pid % tl.cdiv(N, BLOCK_SIZE_N) offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) w1_ptrs = w1_ptr + (offs_k[:, None] * stride_w1k + offs_bn[None, :] * stride_w1n) w3_ptrs = w3_ptr + (offs_k[:, None] * stride_w3k + offs_bn[None, :] * stride_w3n) acc1 = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) acc2 = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for _ in range(0, tl.cdiv(K, BLOCK_SIZE_K)): a = tl.load(a_ptrs) b = tl.load(w1_ptrs) if USE_FP8: b = b.to(tl.float8e5, bitcast=True) b = b.to(tl.float32) b = b.to(tl.float16) acc1 += tl.dot(a, b) c = tl.load(w3_ptrs) if USE_FP8: c = c.to(tl.float8e5, bitcast=True) c = c.to(tl.float32) c = c.to(tl.float16) acc2 += tl.dot(a, c) a_ptrs += BLOCK_SIZE_K * stride_ak w1_ptrs += BLOCK_SIZE_K * stride_w1k w3_ptrs += BLOCK_SIZE_K * stride_w3k acc1 = acc1 acc2 = acc2 accumulator = acc1 * tl.sigmoid(acc1) * acc2 offs_outm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_outn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) out_ptrs = out_ptr + (stride_outm * offs_outm[:, None] + stride_outn * offs_outn[None, :]) out_mask = (offs_outm[:, None] < M) & (offs_outn[None, :] < N) tl.store(out_ptrs, accumulator, mask=out_mask)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Activation Functions", "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/yann-Choho/projet_PPML/blob/9274e0561443b01f029ee6e0737f922f71d2da39/notebooks/triton_kernel.py
ea665a0c-9ad0-4547-bcd9-8f5d72e5f94b
mlstm_matmul.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_matmul.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def mlstm_matmul_kernel(Q, K, V, F, I, M, B, H, NH: tl.constexpr, S: tl. constexpr, D: tl.constexpr, SB: tl.constexpr): bh_id = tl.program_id(0) sb_id = tl.program_id(1) batch_id = bh_id // NH head_id = bh_id % NH batch_offset_q = batch_id * NH * S * D + head_id * S * D batch_offset_f = batch_id * NH * S + head_id * S offset_q = tl.arange(0, SB) + sb_id * SB offset_k = tl.arange(0, SB) + sb_id * SB d_range = tl.arange(0, D) q_range = batch_offset_q + offset_q[:, None] * D + d_range[None, :] q_mask = (offset_q[:, None] < S) & (d_range[None, :] < D) q = tl.load(Q + q_range, q_mask) f = tl.load(F + batch_offset_f + offset_q, offset_q < S) f = tl.cumsum(tl.log(tl.sigmoid(f))) c_acc = tl.zeros((SB, D), dtype=tl.float32) b_acc = tl.zeros((SB,), dtype=tl.float32) m_acc = tl.zeros((SB,), dtype=tl.float32) - float('inf') for j in range(sb_id, -1, -1): kv_range = batch_offset_q + offset_k[:, None] * D + d_range[None, :] kv_mask = (offset_k[:, None] < S) & (d_range[None, :] < D) k = tl.load(K + kv_range, kv_mask) / tl.sqrt(tl.full((1,), D, dtype =tl.float32)) v = tl.load(V + kv_range, kv_mask) f_next = tl.load(F + batch_offset_f + offset_k, offset_k < S) i = tl.load(I + batch_offset_f + offset_k, offset_k < S) f_next = tl.log(tl.sigmoid(f_next)) if j == sb_id: f_next = tl.cumsum(f_next) d = f[:, None] - f_next[None, :] + i[None, :] mask = offset_q[:, None] >= offset_k[None, :] d = tl.where(mask, d, -float('inf')) else: f += tl.sum(f_next) f_next = tl.cumsum(f_next) d = f[:, None] - f_next[None, :] + i[None, :] m = tl.maximum(tl.max(d, 1), m_acc) d = tl.exp(d - m[:, None]) c = matrix_mult(q, tl.trans(k), SB) * d b_acc = b_acc * tl.exp(m_acc - m) + tl.sum(c, 1) c = matrix_mult(c, v, SB) c_acc = c_acc * tl.exp(m_acc - m)[:, None] + c m_acc = m offset_k -= SB n = tl.maximum(tl.abs(b_acc), tl.exp(-m_acc)) + 1e-06 h = c_acc / n[:, None] tl.store(H + q_range, h, q_mask) tl.store(B + batch_offset_f + offset_q, b_acc, offset_q < S) tl.store(M + batch_offset_f + offset_q, m_acc, offset_q < S)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py
27fc8e86-3d25-4ff7-b9cb-308bd627ca58
sequential_rnn_scan.py
TushaarGVS/linear-rnn
linear_rnn/triton/sequential_rnn_scan.py
48320589b73154484be7d09a144923a2b9e56b85
0
@triton.jit def _sequential_rnn_scan_fwd_kernel(x_ptr, a_ptr, h0_ptr, out_ptr, stride_x_batch, stride_x_len, stride_x_dim, stride_a_batch, stride_a_len, stride_a_dim, stride_h0_batch, stride_h0_dim, stride_out_batch, stride_out_dim, seq_len: tl.constexpr, BLOCK_SIZE: tl .constexpr): pid_batch = tl.program_id(0) pid_dim = tl.program_id(1) x_ptr += pid_batch * stride_x_batch a_ptr += pid_batch * stride_a_batch ht_ptr = h0_ptr + pid_batch * stride_h0_batch out_ptr += pid_batch * stride_out_batch offsets = pid_dim * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) x_ptrs = x_ptr + offsets * stride_x_dim a_ptrs = a_ptr + offsets * stride_a_dim ht_ptrs = ht_ptr + offsets * stride_h0_dim out_ptrs = out_ptr + offsets * stride_out_dim h_t = tl.load(ht_ptrs).to(tl.float32) for t in range(seq_len): x_t = tl.load(x_ptrs).to(tl.float32) a_t = tl.load(a_ptrs).to(tl.float32) h_t = a_t * h_t + x_t if t < seq_len - 1: x_ptrs += stride_x_len a_ptrs += stride_a_len tl.store(out_ptrs, h_t.to(out_ptr.dtype.element_ty))
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/sequential_rnn_scan.py
70061971-e534-482c-8c08-07c373e9ef4d
mse.py
l1351868270/implicit_gemm.triton
triton_kernel/mse.py
64eb8548ccf4576883c928f6315be8b24680a455
0
@triton.jit def _ld_mse_fwd_kernel(loss_ptr, input_ptr, target_ptr, loss_row_stride, input_row_stride, target_row_stride, n_rows, n_cols, BLOCK_SIZE: tl. constexpr): pid = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols input_ptrs = input_ptr + pid * input_row_stride + col_offsets target_ptrs = target_ptr + pid * target_row_stride + col_offsets input = tl.load(input_ptrs, mask=mask, other=0.0) target = tl.load(target_ptrs, mask=mask, other=0.0) loss = tl.sum((input - target) * (input - target)) / n_cols loss_ptrs = loss_ptr + pid tl.store(loss_ptrs, loss, mask=pid < n_rows)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_kernel/mse.py
e6c654b9-0a74-43ae-8353-03aef9101762
snake.py
falkaer/multi-scale-music
snake.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.autotune(configs=[triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16)], key=['C']) @triton.jit def _snake_fwd_triton(X, OUT, ALPHA, CR, X_stride1, X_stride2, X_stride3, OUT_stride1, OUT_stride2, OUT_stride3, A_stride, C_stride, C, N, CORR: tl.constexpr, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) batch_idx = pid // C channel_idx = pid % C block_start = tl.program_id(1) * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) X = X + batch_idx * X_stride1 + channel_idx * X_stride2 x = tl.load(X + offsets * X_stride3, mask=offsets < N) alpha = tl.load(ALPHA + channel_idx * A_stride) sinax = tl.sin((alpha * x).to(tl.float32)).to(x.type) out = x + sinax * sinax / alpha if CORR: cr = tl.load(CR + channel_idx * C_stride) out = out / cr OUT = OUT + batch_idx * OUT_stride1 + channel_idx * OUT_stride2 tl.store(OUT + offsets * OUT_stride3, out, mask=offsets < N)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/snake.py
b05a8075-2ff7-49c6-891a-44aefc01ecb3
softmax.py
sustcsonglin/flash-linear-attention
fla/ops/utils/softmax.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['D']) @triton.jit def softmax_bwd_kernel(p, dp, ds, D: tl.constexpr, B: tl.constexpr): i_n = tl.program_id(0) o_d = tl.arange(0, B) m_d = o_d < D b_p = tl.load(p + i_n * D + o_d, mask=m_d, other=0.0) b_dp = tl.load(dp + i_n * D + o_d, mask=m_d, other=0.0) b_pp = tl.sum(b_p * b_dp, 0) b_ds = b_p * b_dp - b_p * b_pp tl.store(ds + i_n * D + o_d, b_ds.to(ds.dtype.element_ty), mask=m_d)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/softmax.py