uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
84f5d3ce-86d9-4bd2-8886-537018fb3ecc | linear.py | neuro-ml/kerops | kerops/kernels/linear.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _ReLULinearAdd(input_ptr, weight_ptr, add_ptr, output_ptr,
numel_no_channels, in_channels: tl.constexpr, out_channels: tl.
constexpr, D_block: tl.constexpr, _ILP: tl.constexpr):
pid = tl.program_id(0)
input_ptr += pid * _ILP * in_channels * D_block
add_ptr += pid * _ILP * out_channels * D_block
output_ptr += pid * _ILP * out_channels * D_block
in_channels_offset = tl.arange(0, in_channels)
out_channels_offset = tl.arange(0, out_channels)
d_offset = tl.arange(0, D_block)
in_offset = d_offset[:, None] * in_channels + in_channels_offset[None, :]
out_offset = d_offset[:, None] * out_channels + out_channels_offset[None, :
]
weight_offset = in_channels_offset[:, None
] * out_channels + out_channels_offset[None, :]
weight = tl.load(weight_ptr + weight_offset)
for i in tl.static_range(0, _ILP):
mask = d_offset[:, None] < numel_no_channels - (pid * _ILP + i
) * D_block
x = tl.load(input_ptr + in_offset, mask=mask, other=0)
add = tl.load(add_ptr + out_offset, mask=mask, other=0)
x = tl.maximum(x, 0.0).to(tl.float16)
output = tl.dot(x, weight, out_dtype=tl.float32, allow_tf32=True).to(tl
.float16) + add
tl.store(output_ptr + out_offset, output, mask=mask)
input_ptr += in_channels * D_block
output_ptr += out_channels * D_block
add_ptr += out_channels * D_block
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Activation Functions",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/linear.py |
84cd1cd2-9462-473a-bb44-5b276d47af20 | bgmv_shrink.py | IBM/vllm | vllm/lora/ops/bgmv_shrink.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _bgmv_shrink_kernel(input_ptr, lora_ptr, out_ptr, N, K, lora_indices,
scaling, xm_stride, xk_stride, l0_stride, lora_k_stride, lora_n_stride,
cm_stride, cn_stride, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
SPLIT_K: tl.constexpr):
"""
GroupGEMV, additionally, introducing SPLIT-K can improve large hidden_size's
performance
"""
pid_sk = tl.program_id(axis=0)
cur_batch = tl.program_id(axis=1)
lora_index = tl.load(lora_indices + cur_batch)
if lora_index == -1:
return
offset_n = tl.arange(0, BLOCK_N)
offset_k = tl.arange(0, BLOCK_K) + pid_sk * BLOCK_K
a_ptr = input_ptr + cur_batch * xm_stride
b_ptr = lora_ptr + l0_stride * lora_index
accumulator = tl.zeros((BLOCK_N,), dtype=tl.float32)
for k in range(0, K, BLOCK_K * SPLIT_K):
current_k = k + offset_k
current_k_c = tl.max_contiguous(current_k, BLOCK_K)
tiled_a = tl.load(a_ptr + current_k_c, mask=current_k < K, other=0.0)
b_ptr_mask = (offset_n[:, None] < N) & (current_k[None, :] < K)
tiled_b = tl.load(b_ptr + offset_n[:, None] * lora_k_stride +
current_k[None, :] * lora_n_stride, mask=b_ptr_mask, other=0.0)
accumulator += tl.sum(tiled_a * tiled_b, 1)
accumulator *= scaling
offset_cn = tl.arange(0, BLOCK_N)
c_ptr = out_ptr + cur_batch * cm_stride + offset_cn * cn_stride
c_mask = offset_cn < N
if SPLIT_K == 1:
tl.store(c_ptr, accumulator, mask=c_mask)
else:
tl.atomic_add(c_ptr, accumulator, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/bgmv_shrink.py |
7aa394e6-e22f-4bc1-adb7-e9d132c66ff6 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_gated_delta_rule_fwd_kernel_prepare_dv(q, k, g, do, dv, offsets,
indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
b_A = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_A += tl.dot(b_k, b_q, allow_tf32=False)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,
), (0,))
else:
p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,),
(BT,), (0,))
b_g = tl.load(p_g, boundary_check=(0,))
b_A = b_A * tl.exp(b_g[None, :] - b_g[:, None]) * scale
b_A = tl.where(tl.arange(0, BT)[:, None] <= tl.arange(0, BT)[None, :],
b_A, 0).to(do.dtype.element_ty)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
else:
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dv = tl.dot(b_A, b_do, allow_tf32=False)
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py |
9945be86-5f3d-4188-944a-1ea2180faf6f | RzLinearForward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearForward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 16,
'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32,
'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4)],
key=['M', 'N', 'K'])
@triton.jit
def rz_linear_forward_kernel_tf32(a_ptr, b_ptr, c_ptr, init_factor, M, N, K,
H, stride_am, stride_ak, stride_cm, stride_cn, R7: int, R6: int, R5:
int, R4: int, R3: int, R2: int, R1: int, R0: int, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr):
rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=stride_am,
stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn,
allow_tf32=True, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1,
R0=R0, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearForward.py |
7c4f95a3-8744-4afb-a957-f1b3a27eedcc | gemm_streamk_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32)], key=['M', 'N', 'K'])
@triton.jit
def full_tiles(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl
.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk:
tl.constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr,
stride_cn: tl.constexpr, streamk_tiles, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr):
tile_id = tl.program_id(axis=0) + streamk_tiles
if GROUP_SIZE_M > 0:
pid_m, pid_n = swizzle_tile(tile_id, M, N, K, BLOCK_SIZE_M,
BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE_M)
else:
pid_m, pid_n = linear_tile(tile_id, M, N, K, BLOCK_SIZE_M,
BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE_M)
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(
stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(
stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
b = tl.load(b_block_ptr, boundary_check=(0, 1))
acc += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=(
stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n *
BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
tl.store(c_block_ptr, acc, boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py |
f6db9b7b-2ee0-44bc-9725-7ba9d1cba3c0 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BK', 'BT'])
@triton.jit
def chunk_gla_fwd_A_kernel_intra_sub_intra(q, k, g, A, offsets, indices,
scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.
constexpr, BC: tl.constexpr, BK: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_i, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
i_j = i_i
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if i_t * BT + i_i * BC >= T:
return
o_i = tl.arange(0, BC)
o_k = tl.arange(0, BK)
m_k = o_k < K
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
if HEAD_FIRST:
o_A = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC)
) * BT + i_j * BC
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT +
i_i * BC, 0), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT +
i_i * BC, 0), (BC, BK), (1, 0))
p_k = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
else:
o_A = (bos + i_t * BT + i_i * BC + tl.arange(0, BC)
) * H * BT + i_h * BT + i_j * BC
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
p_k = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0, 1))
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_k = tl.load(p_k, mask=m_k, other=0).to(tl.float32)
b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32)
b_A = tl.sum(b_q * b_k[None, :] * tl.exp(b_g - b_gk[None, :]), 1)
b_A = tl.where(o_i >= j, b_A * scale, 0.0)
tl.store(A + o_A + j, b_A, mask=m_A)
p_k += K if HEAD_FIRST else H * K
p_gk += K if HEAD_FIRST else H * K
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
3db707e0-ddf4-4545-8ffd-260cfb5291c6 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N':
32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=2, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_N':
32}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_N':
32}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64, 'BLOCK_SIZE_N':
32}, num_stages=2, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def rz_linear_backward_input_grad_kernel_tf32(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_an, stride_cm, stride_ck, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_an=stride_an, stride_cm=stride_cm, stride_ck=
stride_ck, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=True, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
c3518964-a1ec-4489-8219-cf05cf366207 | fused_softmax.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/fused_softmax.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'threads_per_warp': 32}, num_warps
=32), triton.Config({'threads_per_warp': 32}, num_warps=16), triton.
Config({'threads_per_warp': 32}, num_warps=8), triton.Config({
'threads_per_warp': 32}, num_warps=4), triton.Config({
'threads_per_warp': 16}, num_warps=64), triton.Config({
'threads_per_warp': 16}, num_warps=32), triton.Config({
'threads_per_warp': 16}, num_warps=16), triton.Config({
'threads_per_warp': 16}, num_warps=8), triton.Config({
'threads_per_warp': 16}, num_warps=4)], key=['BLOCK_SIZE_X',
'BLOCK_SIZE_Y'])
@triton.jit
def softmax_kernel(output_ptr, input_ptr, input_row_stride,
output_row_stride, n_cols, BLOCK_SIZE_X: tl.constexpr, BLOCK_SIZE_Y: tl
.constexpr):
row_idx = tl.program_id(0) * BLOCK_SIZE_Y
row_start_ptr = input_ptr + row_idx * input_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE_X)
row_offsets = tl.arange(0, BLOCK_SIZE_Y)
offsets = col_offsets[None, :] + row_offsets[:, None] * input_row_stride
input_ptrs = row_start_ptr + offsets
mask = col_offsets[None, :] < n_cols
row = tl.load(input_ptrs, mask=mask, other=-float('inf'))
row_minus_max = row - tl.max(row, axis=1)[:, None]
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=1)[:, None]
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + offsets
tl.store(output_ptrs, softmax_output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/fused_softmax.py |
faeab38a-3414-4adf-b42a-0d11426d5131 | test_triton.py | apd10/RzLinear | python/tests/test_triton.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def triton_tn_kernel(a_ptr, b_ptr, c_ptr, M, N, K, stride_am, stride_ak,
stride_bm, stride_bn, stride_ck, stride_cn, allow_tf32: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr):
"""Kernel for computing the matmul C = A^T x B.
A has shape (M, K), B has shape (M, N) and C has shape (K, N)
"""
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_k = pid // num_pid_n
pid_n = pid % num_pid_n
offs_ak = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_am = tl.arange(0, BLOCK_SIZE_M)
a_ptrs = a_ptr + offs_ak[:, None] * stride_am + offs_am[None, :
] * stride_ak
offs_bm = tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
b_ptrs = b_ptr + offs_bm[:, None] * stride_bm + offs_bn[None, :
] * stride_bn
c = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, M // BLOCK_SIZE_M):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_M * stride_ak
b_ptrs += BLOCK_SIZE_M * stride_bm
offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_ck * offs_ck[:, None] + stride_cn * offs_cn[None, :
]
c_mask = (offs_ck[:, None] < K) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/tests/test_triton.py |
d0b8c6a2-7f55-44e6-9e98-4a7950d8a027 | k_layer_norm.py | cpuhrsch/torchfused | torchfused/triton/k_layer_norm.py | 6c40ed160dcecbe7825f268f7c86bccd359e0ebf | 0 | @triton.jit
def _store(y, Y, stride, N, META):
row = tl.program_id(0)
cols = tl.arange(0, META['BLOCK_SIZE_N'])
y_ptrs = Y + row * stride + cols
tl.store(y_ptrs, y, mask=cols < N)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency"
]
} | [
"BSD"
] | https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py |
3a7b1bd1-f3ca-43bf-a7b4-0c9a9351f847 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_self_substraction_jagged_out_kernel(a_ptr, b_ptr, a_offsets_ptr,
b_offsets_ptr, max_seq_len, BLOCK_SIZE: tl.constexpr):
pid_batch = tl.program_id(0)
pid_index = tl.program_id(1)
a_offset = tl.load(a_offsets_ptr + pid_batch)
a_length = tl.load(a_offsets_ptr + pid_batch + 1) - a_offset
a_length = tl.minimum(a_length, max_seq_len + 1)
if a_length <= 1:
return
N = a_length - 1
if pid_index >= N:
return
a_cur = tl.load(a_ptr + a_offset + pid_index)
offs = tl.arange(0, BLOCK_SIZE)
mask = offs < N
a_row = tl.load(a_ptr + a_offset + offs + 1, mask=mask)
b = a_cur - a_row
b_offset = tl.load(b_offsets_ptr + pid_batch)
tl.store(b_ptr + b_offset + pid_index * N + offs, b, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
1785486b-2352-41b5-af96-46f69ff6c60e | mamba_ssm.py | Charlie-XIAO/sparse-vllm | vllm/model_executor/layers/mamba/ops/mamba_ssm.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.jit
def softplus(dt):
dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt)
return dt
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/mamba/ops/mamba_ssm.py |
dcc18d2e-fcbe-411e-948a-c0bd4f7e40c3 | softmax_online_v2_spec.py | iclementine/optimize_softmax | softmax_online_v2_spec.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr
):
pid_m = tl.program_id(0)
m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype.
element_ty)
z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty)
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, prev_multiple, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs).to(output_ptr.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
for start_n in range(prev_multiple, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
final_m = tl.max(m, 0)
z = tl.sum(tl.exp(m - final_m) * z)
m = final_m
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, prev_multiple, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs).to(output_ptr.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out)
for start_n in range(prev_multiple, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_spec.py |
4d7c81d2-85d8-4a4f-9e51-fda6146986f7 | lightning_attn2.py | OpenNLPLab/lightning-attention | lightning_attn/ops/triton/lightning_attn2.py | d7439519541e966084eeaaf3ffd63eecc216f414 | 0 | @triton.jit
def _bwd_inter_kernel(Q, K, V, S, DO, DQ, DK, DV, b: tl.constexpr, h: tl.
constexpr, n: tl.constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl
.constexpr, NUM_BLOCK: tl.constexpr, CBLOCK: tl.constexpr, NUM_CBLOCK:
tl.constexpr):
off_bh = tl.program_id(0)
off_h = off_bh % h
qk_offset = off_bh * n * d
v_offset = off_bh * n * e
o_offset = off_bh * n * e
S_block_ptr = S + off_h
DQ_block_ptr = DQ + qk_offset + tl.arange(0, CBLOCK)[:, None
] * d + tl.arange(0, d)[None, :]
K_block_ptr = K + qk_offset + tl.arange(0, CBLOCK)[:, None
] * d + tl.arange(0, d)[None, :]
V_trans_block_ptr = V + v_offset + tl.arange(0, CBLOCK)[None, :
] * e + tl.arange(0, e)[:, None]
DO_block_ptr = DO + o_offset + tl.arange(0, CBLOCK)[:, None
] * e + tl.arange(0, e)[None, :]
off_block1 = tl.arange(0, CBLOCK)
off_block2 = tl.arange(0, CBLOCK)
c_array = tl.arange(0, CBLOCK)
s = tl.load(S_block_ptr)
block_decay = tl.exp(-s.to(tl.float32) * BLOCK)
kv_trans = tl.zeros([e, d], dtype=tl.float32)
for i in range(NUM_BLOCK):
for j in range(NUM_CBLOCK):
if i > 0:
q_decay = tl.exp(-s.to(tl.float32) * (j * CBLOCK + c_array[
:, None]))
do = tl.load(DO_block_ptr, mask=off_block1[:, None] < n,
other=0.0).to(tl.float32)
dq_inter = tl.dot(do, kv_trans) * q_decay
dq = dq_inter + tl.load(DQ_block_ptr, mask=off_block1[:,
None] < n, other=0.0)
tl.store(DQ_block_ptr, dq.to(DQ_block_ptr.dtype.element_ty),
mask=off_block1[:, None] < n)
DQ_block_ptr += CBLOCK * d
DO_block_ptr += CBLOCK * e
off_block1 += CBLOCK
kv_trans_current = tl.zeros([e, d], dtype=tl.float32)
for j in range(NUM_CBLOCK):
v_trans = tl.load(V_trans_block_ptr, mask=off_block2[None, :] <
n, other=0.0).to(tl.float32)
k = tl.load(K_block_ptr, mask=off_block2[:, None] < n, other=0.0
).to(tl.float32)
k_decay = tl.exp(-s.to(tl.float32) * (BLOCK - (j * CBLOCK +
c_array[:, None])))
kv_trans_current += tl.dot(v_trans, k * k_decay)
K_block_ptr += CBLOCK * d
V_trans_block_ptr += CBLOCK * e
off_block2 += CBLOCK
kv_trans = block_decay * kv_trans + kv_trans_current
m = NUM_BLOCK * BLOCK
off_block1 = m + tl.arange(0, CBLOCK)
off_block2 = m + tl.arange(0, CBLOCK)
Q_trans_block_ptr = Q + qk_offset + m * d + tl.arange(0, CBLOCK)[None, :
] * d + tl.arange(0, d)[:, None]
K_block_ptr = K + qk_offset + m * d + tl.arange(0, CBLOCK)[:, None
] * d + tl.arange(0, d)[None, :]
V_trans_block_ptr = V + v_offset + m * e + tl.arange(0, CBLOCK)[None, :
] * e + tl.arange(0, e)[:, None]
DK_trans_block_ptr = DK + qk_offset + m * d + tl.arange(0, CBLOCK)[None, :
] * d + tl.arange(0, d)[:, None]
DV_block_ptr = DV + v_offset + m * e + tl.arange(0, CBLOCK)[:, None
] * e + tl.arange(0, e)[None, :]
DO_block_ptr = DO + o_offset + m * e + tl.arange(0, CBLOCK)[:, None
] * e + tl.arange(0, e)[None, :]
dkv = tl.zeros([d, e], dtype=tl.float32)
for i in range(NUM_BLOCK - 1, -1, -1):
for j in range(NUM_CBLOCK - 1, -1, -1):
K_block_ptr -= CBLOCK * d
V_trans_block_ptr -= CBLOCK * e
DK_trans_block_ptr -= CBLOCK * d
DV_block_ptr -= CBLOCK * e
off_block1 -= CBLOCK
if i < NUM_BLOCK - 1:
k = tl.load(K_block_ptr, mask=off_block1[:, None] < n,
other=0.0).to(tl.float32)
v_trans = tl.load(V_trans_block_ptr, mask=off_block1[None,
:] < n, other=0.0).to(tl.float32)
k_decay_trans = tl.exp(-s.to(tl.float32) * (BLOCK - (j *
CBLOCK + c_array[None, :])))
k_decay = tl.exp(-s.to(tl.float32) * (BLOCK - (j * CBLOCK +
c_array[:, None])))
dk_inter_trans = tl.dot(dkv, v_trans) * k_decay_trans
dv_inter = tl.dot(k, dkv) * k_decay
dk_trans = dk_inter_trans + tl.load(DK_trans_block_ptr,
mask=off_block1[None, :] < n, other=0.0)
dv = dv_inter + tl.load(DV_block_ptr, mask=off_block1[:,
None] < n, other=0.0)
tl.store(DK_trans_block_ptr, dk_trans.to(DK_trans_block_ptr
.dtype.element_ty), mask=off_block1[None, :] < n)
tl.store(DV_block_ptr, dv.to(DV_block_ptr.dtype.element_ty),
mask=off_block1[:, None] < n)
dkv_current = tl.zeros([d, e], dtype=tl.float32)
for j in range(NUM_CBLOCK - 1, -1, -1):
DO_block_ptr -= CBLOCK * e
Q_trans_block_ptr -= CBLOCK * d
off_block2 -= CBLOCK
do = tl.load(DO_block_ptr, mask=off_block2[:, None] < n, other=0.0
).to(tl.float32)
q_trans = tl.load(Q_trans_block_ptr, mask=off_block2[None, :] <
n, other=0.0).to(tl.float32)
q_decay_trans = tl.exp(-s.to(tl.float32) * (j * CBLOCK +
c_array[None, :]))
dkv_current += tl.dot(q_trans * q_decay_trans, do)
dkv = block_decay * dkv + dkv_current
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/OpenNLPLab/lightning-attention/blob/d7439519541e966084eeaaf3ffd63eecc216f414/lightning_attn/ops/triton/lightning_attn2.py |
a120d48c-11cf-4fd5-a8e7-b007acd4cd2e | softmax_kernels.py | BobMcDear/attorch | attorch/softmax_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])})
@triton.jit
def softmax_forward_kernel(input_pointer, output_pointer, batch_dim,
feat_dim, input_batch_stride, input_feat_stride, output_batch_stride,
output_feat_stride, log: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr,
BLOCK_SIZE_FEAT: tl.constexpr):
"""
Normalizes the input using softmax.
Args:
input_pointer: Pointer to the input to normalize.
The input must be of shape [batch_dim, feat_dim].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, feat_dim].
batch_dim: Batch dimension.
feat_dim: Dimensionality of the features.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
output_batch_stride: Stride necessary to jump one element along the
output container's batch dimension.
output_feat_stride: Stride necessary to jump one element along the
output container's feature dimension.
log: Flag for indicating if the log of softmax should be taken.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_FEAT: Block size across the feature dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
feat_offset = tl.arange(0, BLOCK_SIZE_FEAT)
batch_mask = batch_offset < batch_dim
feat_mask = feat_offset < feat_dim
input_pointer += input_batch_stride * batch_offset[:, None
] + input_feat_stride * feat_offset[None, :]
output_pointer += output_batch_stride * batch_offset[:, None
] + output_feat_stride * feat_offset[None, :]
input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[
None, :], other=-float('inf')).to(tl.float32)
input -= tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
tl.store(output_pointer, output, mask=batch_mask[:, None] & feat_mask[
None, :])
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/softmax_kernels.py |
5d6d3565-d6b7-4e2c-9a44-573d03809ba0 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4, 8]], key=['BT'])
@triton.jit
def chunk_gsa_bwd_k_kernel_dA(v, g, do, dA, indices, offsets, scale, B: tl.
constexpr, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl
.constexpr, NG: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl
.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
i_t, i_i, i_j = i_c // (NC * NC), i_c % (NC * NC) // NC, i_c % (NC * NC
) % NC
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
all = B * T
o_v = i_v * BV + tl.arange(0, BV)
m_v = o_v < V
if i_t * BT + i_i * BC > T:
return
if HEAD_FIRST:
p_dA = tl.make_block_ptr(dA + (i_v * B * H + i_bh) * T * BT, (T, BT
), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
else:
p_dA = tl.make_block_ptr(dA + ((i_v * all + bos) * HQ + i_hq) * BT,
(T, BT), (HQ * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC
), (1, 0))
b_dA = tl.zeros([BC, BC], dtype=tl.float32)
if i_i > i_j:
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bg * T * V, (V, T), (1, V), (i_v *
BV, i_t * BT + i_j * BC), (BV, BC), (0, 1))
p_gv = tl.make_block_ptr(g + i_bg * T * V, (V, T), (1, V), (i_v *
BV, i_t * BT + i_j * BC), (BV, BC), (0, 1))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (i_t *
BT + i_i * BC) * V + o_v, BV), BV)
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (V, T), (1, H *
V), (i_v * BV, i_t * BT + i_j * BC), (BV, BC), (0, 1))
p_gv = tl.make_block_ptr(g + (bos * H + i_h) * V, (V, T), (1, H *
V), (i_v * BV, i_t * BT + i_j * BC), (BV, BC), (0, 1))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT +
i_i * BC) * H * V + i_h * V + o_v, BV), BV)
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
b_gn = tl.load(p_gn, mask=m_v, other=0.0)
b_g = tl.load(p_g, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_g - b_gn[None, :]) * scale).to(b_do.dtype)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_vg = (b_v * tl.exp(b_gn[:, None] - b_gv)).to(b_v.dtype)
b_dA = tl.dot(b_do, b_vg)
elif i_i == i_j:
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.max_contiguous(tl.multiple_of(v + i_bg * T * V + (i_t *
BT + i_j * BC) * V + o_v, BV), BV)
p_gv = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (i_t *
BT + i_j * BC) * V + o_v, BV), BV)
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_v = tl.max_contiguous(tl.multiple_of(v + (bos + i_t * BT +
i_j * BC) * H * V + i_h * V + o_v, BV), BV)
p_gv = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT +
i_j * BC) * H * V + i_h * V + o_v, BV), BV)
b_g = tl.load(p_g, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)) * scale
m_v = o_v < V
o_i = tl.arange(0, BC)
m_dA = o_i[:, None] >= o_i[None, :]
for j in range(0, min(BC, T - i_t * BT - i_j * BC)):
b_v = tl.load(p_v, mask=m_v, other=0).to(tl.float32)
b_gv = tl.load(p_gv, mask=m_v, other=0).to(tl.float32)
b_dAj = tl.sum(b_do * b_v[None, :] * tl.exp(b_g - b_gv[None, :]), 1
)
b_dA = tl.where((o_i == j)[None, :], b_dAj[:, None], b_dA)
p_v += (1 if HEAD_FIRST else H) * V
p_gv += (1 if HEAD_FIRST else H) * V
b_dA = tl.where(m_dA, b_dA, 0.0)
tl.store(p_dA, b_dA.to(dA.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
735b3e92-e000-4d97-b785-9e46514b0726 | dropout_rng.py | ROCm/aotriton | tritonsrc/dropout_rng.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def debug_fill_dropout_rng_tensor(R, stride_rz, stride_rh, stride_rm,
stride_rn, seqlen_q, seqlen_k, philox_seed_ptr, philox_offset_base_ptr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
philox_seed = tl.load(philox_seed_ptr)
philox_offset_base = tl.load(philox_offset_base_ptr)
debug_fill_dropout_rng(R, stride_rz, stride_rh, stride_rm, stride_rn,
seqlen_q, seqlen_k, philox_seed, philox_offset_base, BLOCK_M, BLOCK_N)
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/dropout_rng.py |
0f4487e2-762b-4302-8603-dbcd1043dab6 | dequant_kernel.py | drisspg/transformer_nuggets | transformer_nuggets/quant/dequant_kernel.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def dequantize_scalers(quantized_scalers_ptr, quantization_factor_ptr,
scaler_mean_ptr, block_size, scaler_block_size):
"""Dequantizes the quantized scalers to bfloat16
Args:
quantized_scalers_ptr: Pointer to the quantized scalers
quantization_factor_ptr: Pointer to the quantization factor
scaler_mean_ptr: Pointer to the scaler mean
block_size: Size of the block
scaler_block_size: Size of the scaler block
"""
block_idx = tl.program_id(0)
quantization_factor_idx = block_idx // scaler_block_size
scaler_quantization_factor = tl.load(quantization_factor_ptr +
quantization_factor_idx)
block_scaler = tl.load(quantized_scalers_ptr + block_idx)
scaler_mean = tl.load(scaler_mean_ptr)
dequantized_block_scaler = (block_scaler / scaler_quantization_factor).to(
tl.bfloat16)
dequantized_block_scaler = dequantized_block_scaler + scaler_mean
return dequantized_block_scaler
| {
"Data Type": [
"bf16",
"int8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/quant/dequant_kernel.py |
f6685121-0e40-477c-b66b-4993da0134fc | chunk_h_parallel.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h_parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32,
64, 128] for num_warps in [2, 4, 8, 16] for num_stages in [2, 3]], key=
['BT', 'USE_G', 'USE_GK', 'USE_GV'])
@triton.jit
def chunk_bwd_kernel_dh_reduction(g, gk, gv, dh, doq0, dh0, offsets,
chunk_offsets, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV:
tl.constexpr, NG: tl.constexpr, USE_G: tl.constexpr, USE_GK: tl.
constexpr, USE_GV: tl.constexpr, STORE_INITIAL_STATE_GRADIENT: tl.
constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_nh // NG
i_n, i_hq = i_nh // HQ, i_nh % HQ
i_h = i_hq // NG
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
for i_t in range(NT - 1, -1, -1):
if HEAD_FIRST:
p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dh, boundary_check=(0, 1)).to(tl.float32)
if i_t < NT - 1:
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(
0, 1))
last_idx = min(i_t * BT + BT, T) - 1
if USE_G:
if HEAD_FIRST:
b_g_last = tl.load(g + i_bg * T + last_idx)
else:
b_g_last = tl.load(g + (bos + last_idx) * H + i_h)
b_dh *= tl.exp(b_g_last)
if USE_GK:
if HEAD_FIRST:
p_gk_last = gk + (i_bg * T + last_idx
) * K + i_k * BK + tl.arange(0, BK)
else:
p_gk_last = gk + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) <
K, other=0.0)
b_dh *= tl.exp(b_gk_last)[:, None]
if USE_GV:
if HEAD_FIRST:
p_gv_last = gv + (i_bg * T + last_idx
) * V + i_v * BV + tl.arange(0, BV)
else:
p_gv_last = gv + (bos + last_idx
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV)
b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) <
V, other=0.0)
b_dh *= tl.exp(b_gv_last)[None, :]
if STORE_INITIAL_STATE_GRADIENT:
p_doq0 = tl.make_block_ptr(doq0 + i_nh * K * V, (K, V), (V, 1), (
i_k * BK, i_v * BV), (BK, BV), (1, 0))
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_doq0, boundary_check=(0, 1)).to(tl.float32)
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_parallel.py |
06154394-82a9-4d57-b6ed-f27fc9bbaca5 | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def _bwd_preprocess(Out, DO, NDO, L, Delta, M_Q, stride_oz, stride_oh,
stride_om, stride_od, stride_doz, stride_doh, stride_dom, stride_dod,
stride_ndoz, stride_ndoh, stride_ndom, stride_ndod, stride_lz,
stride_lh, stride_lm, stride_dz, stride_dh, stride_dm, BLOCK_DMODEL: tl
.constexpr, BLOCK_M: tl.constexpr, EVEN_M: tl.constexpr):
off_h = tl.program_id(1)
off_z = tl.program_id(2)
offs_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_DMODEL)
Out = Out + off_z * stride_oz + off_h * stride_oh + offs_m[:, None
] * stride_om + offs_d[None, :] * stride_od
DO = DO + off_z * stride_doz + off_h * stride_doh + offs_m[:, None
] * stride_dom + offs_d[None, :] * stride_dod
NDO = NDO + off_z * stride_ndoz + off_h * stride_ndoh + offs_m[:, None
] * stride_ndom + offs_d[None, :] * stride_ndod
L = L + off_z * stride_lz + off_h * stride_lh + offs_m * stride_lm
Delta = Delta + off_z * stride_dz + off_h * stride_dh + offs_m * stride_dm
if EVEN_M:
o = tl.load(Out).to(tl.float32)
do = tl.load(DO).to(tl.float32)
denom = tl.load(L).to(tl.float32)
else:
o = tl.load(Out, mask=offs_m[:, None] < M_Q).to(tl.float32)
do = tl.load(DO, mask=offs_m[:, None] < M_Q).to(tl.float32)
denom = tl.load(L, mask=offs_m < M_Q).to(tl.float32)
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
if EVEN_M:
tl.store(NDO, do)
tl.store(Delta, delta)
else:
tl.store(NDO, do, mask=offs_m[:, None] < M_Q)
tl.store(Delta, delta, mask=offs_m < M_Q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
e5746bd4-5ff0-429c-abaa-ebb35c0d4af0 | fused_norm_gate.py | sustcsonglin/flash-linear-attention | fla/modules/fused_norm_gate.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_RESIDUAL', 'STORE_RESIDUAL_OUT',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_fwd_kernel(X, O, Y, W, B, RESIDUAL, RESIDUAL_OUT, Mean, Rstd,
stride_x_row, stride_y_row, stride_res_row, stride_res_out_row, N, eps,
IS_RMS_NORM: tl.constexpr, BLOCK_N: tl.constexpr, HAS_RESIDUAL: tl.
constexpr, STORE_RESIDUAL_OUT: tl.constexpr, HAS_WEIGHT: tl.constexpr,
HAS_BIAS: tl.constexpr):
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_y_row
O += row * stride_x_row
if HAS_RESIDUAL:
RESIDUAL += row * stride_res_row
if STORE_RESIDUAL_OUT:
RESIDUAL_OUT += row * stride_res_out_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
if HAS_RESIDUAL:
residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl
.float32)
x += residual
if STORE_RESIDUAL_OUT:
tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
if not IS_RMS_NORM:
mean = tl.sum(x, axis=0) / N
tl.store(Mean + row, mean)
xbar = tl.where(cols < N, x - mean, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
else:
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
tl.store(Rstd + row, rstd)
mask = cols < N
if HAS_WEIGHT:
w = tl.load(W + cols, mask=mask).to(tl.float32)
if HAS_BIAS:
b = tl.load(B + cols, mask=mask).to(tl.float32)
x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
y = x_hat * w if HAS_WEIGHT else x_hat
if HAS_BIAS:
y = y + b
o = tl.load(O + cols, mask=cols < N, other=0.0).to(tl.float32)
y = y * o * tl.sigmoid(o)
tl.store(Y + cols, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_norm_gate.py |
1867f9a5-2505-4dea-85b4-7eec68d369de | nll_loss_kernels.py | BobMcDear/attorch | attorch/nll_loss_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=warps_kernel_configs(), key=['batch_dim',
'spatial_dim'])
@triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic,
'BLOCK_SIZE_SPATIAL': lambda args: next_power_of_2(args['spatial_dim'])})
@triton.jit
def nll_loss_backward_kernel(output_grad_pointer, target_pointer,
weight_pointer, sum_weights_pointer, input_grad_pointer, batch_dim,
spatial_dim, output_grad_batch_stride, output_grad_feat_stride,
target_batch_stride, target_spatial_stride, input_grad_batch_stride,
input_grad_feat_stride, input_grad_spatial_stride, reduction: tl.
constexpr, weighted: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr,
BLOCK_SIZE_SPATIAL: tl.constexpr):
"""
Calculates the input gradient of negative log likelihood loss.
Args:
output_grad_pointer: Pointer to the loss's output gradients.
The output gradients must be of shape [batch_dim, spatial_dim]
if reduction is 'none', and otherwise [batch_dim/BLOCK_SIZE_BATCH].
target_pointer: Pointer to the target.
The target must be of shape [batch_dim, spatial_dim].
weight_pointer: Pointer to an optional class weight vector.
The class weight vector, if provided, must be of shape [feat_dim].
sum_weights_pointer: Pointer to the sum of the class weights if the classes were weighed.
The sum of weights must be a scalar.
input_grad_pointer: Pointer to a container the input's gradients are written to.
The container must be of shape [batch_dim, feat_dim, spatial_dim] and zeroed.
batch_dim: Batch dimension.
spatial_dim: Spatial dimension.
output_grad_batch_stride: Stride necessary to jump one element along the
output gradients' batch dimension.
output_grad_feat_stride: Stride necessary to jump one element along the
output gradients' feature dimension.
input_spatial_stride: Stride necessary to jump one element along the
input's spatial dimension.
target_batch_stride: Stride necessary to jump one element along the
target's batch dimension.
target_spatial_stride: Stride necessary to jump one element along the
target's spatial dimension.
input_grad_batch_stride: Stride necessary to jump one element along the
input gradient container's batch dimension.
input_grad_feat_stride: Stride necessary to jump one element along the
input gradient container's feature dimension.
input_grad_spatial_stride: Stride necessary to jump one element along the
input gradient container's spatial dimension.
reduction: Reduction strategy for the output whose gradient is calculated.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
weighted: Flag for weighing each class.
BLOCK_SIZE_BATCH: Block size across the batch dimension.
BLOCK_SIZE_SPATIAL: Block size across the spatial dimension.
"""
batch_pid = tl.program_id(axis=0)
batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH
)
spatial_offset = tl.arange(0, BLOCK_SIZE_SPATIAL)
batch_mask = batch_offset < batch_dim
spatial_mask = spatial_offset < spatial_dim
output_grad_mask = None
if reduction == 'none':
output_grad_pointer += output_grad_batch_stride * batch_offset[:, None
] + output_grad_feat_stride * spatial_offset[None, :]
output_grad_mask = batch_mask[:, None] & spatial_mask[None, :]
output_grad = tl.load(output_grad_pointer, mask=output_grad_mask).to(tl
.float32)
input_grad = -output_grad
target_pointer += target_batch_stride * batch_offset[:, None
] + target_spatial_stride * spatial_offset[None, :]
target = tl.load(target_pointer, mask=batch_mask[:, None] &
spatial_mask[None, :])
if weighted:
weight = tl.load(weight_pointer + target, mask=batch_mask[:, None] &
spatial_mask[None, :]).to(tl.float32)
input_grad *= weight
if reduction == 'mean':
input_grad /= tl.load(sum_weights_pointer)
elif reduction == 'mean':
input_grad /= batch_dim * spatial_dim
input_grad_pointer += (input_grad_feat_stride * target +
input_grad_batch_stride * batch_offset[:, None] +
input_grad_spatial_stride * spatial_offset[None, :])
tl.store(input_grad_pointer, input_grad, mask=batch_mask[:, None] &
spatial_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/nll_loss_kernels.py |
13add3b4-2ac9-4b8e-9860-79b4012d9a64 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_backward_weight_grad_core(a_ptr, b_ptr, c_ptr, init_factor, M,
N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7: int, R6: int,
R5: int, R4: int, R3: int, R2: int, R1: int, R0: int, allow_tf32: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr):
"""Kernel for computing the matmul C = A^T x B.
A has shape (M, K), B has shape (M, N) and C has shape (K, N)
"""
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE * num_pid_n
group_id = pid // num_pid_in_group
first_pid_k = group_id * GROUP_SIZE
group_size_k = min(num_pid_k - first_pid_k, GROUP_SIZE)
pid_k = first_pid_k + pid % group_size_k
pid_n = pid % num_pid_in_group // group_size_k
offs_ak = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_am = tl.arange(0, BLOCK_SIZE_M)
a_ptrs = a_ptr + offs_ak[:, None] * stride_am + offs_am[None, :
] * stride_ak
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_bm = tl.arange(0, BLOCK_SIZE_M)
b_ptrs = b_ptr + offs_bm[:, None] * stride_bm + offs_bn[None, :
] * stride_bn
offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
a_zero = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_M), dtype=tl.float32)
b_zero = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
c = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for m in range(0, tl.cdiv(M, BLOCK_SIZE_M)):
offs_m = m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
a_mask = (offs_ck[:, None] < K) & (offs_m[None, :] < M)
b_mask = (offs_m[:, None] < M) & (offs_cn[None, :] < N)
a = tl.load(a_ptrs, mask=a_mask, other=a_zero)
b = tl.load(b_ptrs, mask=b_mask, other=b_zero)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_M * stride_ak
b_ptrs += BLOCK_SIZE_M * stride_bm
c_offset = c_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
c_ptrs = c_offset + ((pid_k * R3 + pid_n * R2 + R1) % R0 * R0 + (pid_k *
R7 + pid_n * R5 + R4) % R0) % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
tl.atomic_add(c_ptrs, c * init_factor)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
74718d3f-c518-4407-8ec5-e202d737b762 | chunk_fuse.py | elephantmipt/rebased_minimal | flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py | e7b945509972fab9f9c1c7be431abf7d6bf62c95 | 0 | @triton.jit
def chunk_abc_fwd_kernel_s(q, k, s, rk, ck, pk, s_qk_h, s_qk_t, s_qk_d,
s_sk_h, s_sk_t, s_sk_m, T, scale, BT: tl.constexpr, BK: tl.constexpr,
BM: tl.constexpr, DK: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr):
i_m, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
n_bh = tl.num_programs(2)
p_q = tl.make_block_ptr(q + i_bh * s_qk_h, (T, DK), (s_qk_t, s_qk_d), (
0, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_qk_h, (DK, T), (s_qk_d, s_qk_t), (
i_k * BK, 0), (BK, BT), (0, 1))
p_s = tl.make_block_ptr(s + (i_k * n_bh + i_bh) * s_sk_h, (T, DM), (
s_sk_t, s_sk_m), (0, i_m * BM), (BT, BM), (1, 0))
p_rk = tl.make_block_ptr(rk + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,),
(i_m * BM,), (BM,), (0,))
p_ck = tl.make_block_ptr(ck + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m),
(0, i_m * BM), (BT, BM), (1, 0))
p_pk = tl.make_block_ptr(pk + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m),
(0, i_m * BM), (BT, BM), (1, 0))
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_hk = tl.zeros([BK, BM], dtype=tl.float32)
for _ in range(NT):
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_rk = tl.load(p_rk, boundary_check=(0,))
b_ck = tl.load(p_ck, boundary_check=(0, 1))
b_pk = tl.load(p_pk, boundary_check=(0, 1))
b_inter = tl.dot(b_q, b_hk.to(b_q.dtype), allow_tf32=False) * b_rk[
None, :]
b_intra = tl.dot(tl.where(m_s, tl.dot(b_q, b_k, allow_tf32=False),
0).to(b_q.dtype), b_ck, allow_tf32=False)
b_s = (b_inter + b_intra) * b_pk
b_hk = b_hk * b_rk[None, :] + tl.dot(b_k, b_ck, allow_tf32=False)
tl.store(p_s, b_s.to(p_s.dtype.element_ty), boundary_check=(0, 1))
p_q = tl.advance(p_q, (BT, 0))
p_k = tl.advance(p_k, (0, BT))
p_s = tl.advance(p_s, (BT, 0))
p_rk = tl.advance(p_rk, (DM,))
p_ck = tl.advance(p_ck, (BT, 0))
p_pk = tl.advance(p_pk, (BT, 0))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py |
0d55d92f-0a9b-4b89-9f91-5898bd40e024 | geglu.py | Kitsunetic/kitsu | kitsu/nn/geglu.py | 826967a493c89753ac2cf1e28b52b79998fc9076 | 0 | @triton.jit
def geglu_forward_kernel(x_ptr, y_ptr, N, C, C2, BLK_C: tl.constexpr, BLK_N:
tl.constexpr):
pid_n = tl.program_id(0)
pid_c = tl.program_id(1)
offs_n = pid_n * BLK_N + tl.arange(0, BLK_N)
offs_c = pid_c * BLK_C + tl.arange(0, BLK_C)
mask_n = offs_n < N
mask_c = offs_c < C2
mask = mask_n[:, None] & mask_c[None, :]
x_ptrs = x_ptr + offs_n[:, None] * C + offs_c[None, :]
x1 = tl.load(x_ptrs, mask=mask)
x2 = tl.load(x_ptrs + C2, mask=mask)
y = x1 * gelu_forward(x2)
y_ptrs = y_ptr + offs_n[:, None] * C2 + offs_c[None, :]
tl.store(y_ptrs, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/geglu.py |
eca0e629-398b-4f13-a441-1f45f6e88d23 | stats.py | neuro-ml/kerops | kerops/kernels/stats.py | 735336775e825d5cb06b8850d25423661b12d1ac | 0 | @triton.jit
def _Stats_cl3d_backward_impl(X_ptr, Meangrad_ptr, Sqmeangrad_ptr,
Outputgrad_ptr, numel_no_channels, num_channels: tl.constexpr,
block_other: tl.constexpr):
pid = tl.program_id(0)
X_ptr += pid * num_channels * block_other
Outputgrad_ptr += pid * num_channels * block_other
channels_offset = tl.arange(0, num_channels)
other_offset = tl.arange(0, block_other)
offset = other_offset[:, None] * num_channels + channels_offset[None, :]
mask = other_offset[:, None] < numel_no_channels - pid * block_other
x = tl.load(X_ptr + offset, mask=mask, other=0.0).to(tl.float32)
mean_grad = tl.load(Meangrad_ptr + channels_offset)
sqmean_grad = tl.load(Sqmeangrad_ptr + channels_offset)
grad = (2 * x * sqmean_grad / numel_no_channels + mean_grad /
numel_no_channels)
tl.store(Outputgrad_ptr + offset, grad, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Normalization"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/stats.py |
10275dc6-1d3c-4562-9324-771303bd1166 | sb_varlen_bwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_bwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.autotune(configs=get_configs(), key=['token_size', 'head_size'],
reset_to_zero=['DK_ptr', 'DV_ptr'])
@triton.jit
def _backward(DO_ptr, stride_doh: tl.constexpr, stride_dom, stride_dod: tl.
constexpr, DR_ptr, stride_drh, stride_drm, A_ptr, stride_ah, stride_am,
Q_ptr, stride_qh: tl.constexpr, stride_qm, stride_qd: tl.constexpr,
K_ptr, stride_kh: tl.constexpr, stride_kn, stride_kd: tl.constexpr,
V_ptr, stride_vh: tl.constexpr, stride_vn, stride_vd: tl.constexpr,
DQ_ptr, stride_dqh: tl.constexpr, stride_dqm, stride_dqd: tl.constexpr,
DK_ptr, stride_dkh: tl.constexpr, stride_dkn, stride_dkd: tl.constexpr,
DV_ptr, stride_dvh: tl.constexpr, stride_dvn, stride_dvd: tl.constexpr,
KV_Lock_ptr, KV_Count_ptr, stride_kvs, stride_kvh, CSL_ptr, logit_scale,
batch_size, token_size, head_size: tl.constexpr, num_heads: tl.
constexpr, BLOCK_D: tl.constexpr, BLOCK_CSL: tl.constexpr, NO_D_MASK:
tl.constexpr, NO_M_MASK: tl.constexpr, NO_N_MASK: tl.constexpr,
ALLOW_TF32: tl.constexpr, inv_log2: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, acc_dtype: tl.constexpr=tl.float32,
attend_current: tl.constexpr=False):
tl.static_assert(BLOCK_M % BLOCK_N == 0)
seq_id = tl.program_id(0)
fhead_id = tl.program_id(1)
seq_alloc_prog_id = tl.program_id(2)
num_seq_alloc_progs = tl.num_programs(2)
if seq_id == 0:
seq_start_offset = 0
else:
seq_start_offset = tl.load(CSL_ptr + seq_id - 1).to(tl.int32)
seq_end_offset = tl.load(CSL_ptr + seq_id).to(tl.int32)
seq_length = seq_end_offset - seq_start_offset
num_seq_blocks = tl.cdiv(seq_length, BLOCK_M)
seq_a_block_id = num_seq_blocks - seq_alloc_prog_id - 1
seq_b_block_id = seq_alloc_prog_id - (num_seq_alloc_progs - num_seq_blocks)
if seq_a_block_id >= 0 or seq_b_block_id >= 0:
qk_scale = inv_log2 * logit_scale
M_range = tl.arange(0, BLOCK_M)
N_range = tl.arange(0, BLOCK_N)
D_range = tl.arange(0, BLOCK_D)
D_mask = D_range < head_size
cm = tl.where(N_range[:, None] >= N_range[None, :], 1.0, 0.0).to(Q_ptr
.type.element_ty)
if seq_a_block_id >= 0:
head_id = fhead_id * 2
DO_head_seq_ptr = (DO_ptr + stride_doh * head_id + stride_dom *
seq_start_offset)
DR_head_seq_ptr = (DR_ptr + stride_drh * head_id + stride_drm *
seq_start_offset)
A_head_seq_ptr = (A_ptr + stride_ah * head_id + stride_am *
seq_start_offset)
Q_head_seq_ptr = (Q_ptr + stride_qh * head_id + stride_qm *
seq_start_offset)
K_head_seq_ptr = (K_ptr + stride_kh * head_id + stride_kn *
seq_start_offset)
V_head_seq_ptr = (V_ptr + stride_vh * head_id + stride_vn *
seq_start_offset)
DQ_head_seq_ptr = (DQ_ptr + stride_dqh * head_id + stride_dqm *
seq_start_offset)
DK_head_seq_ptr = (DK_ptr + stride_dkh * head_id + stride_dkn *
seq_start_offset)
DV_head_seq_ptr = (DV_ptr + stride_dvh * head_id + stride_dvn *
seq_start_offset)
KV_Lock_head_seq_ptr = (KV_Lock_ptr + stride_kvs * seq_id +
stride_kvh * head_id)
KV_Count_head_seq_ptr = (KV_Count_ptr + stride_kvs * seq_id +
stride_kvh * head_id)
_backward_one_row(seq_a_block_id, seq_length, qk_scale, M_range,
N_range, D_range, D_mask, cm, DO_head_seq_ptr, stride_dom,
stride_dod, DR_head_seq_ptr, stride_drm, A_head_seq_ptr,
stride_am, Q_head_seq_ptr, stride_qm, stride_qd,
K_head_seq_ptr, stride_kn, stride_kd, V_head_seq_ptr,
stride_vn, stride_vd, DQ_head_seq_ptr, stride_dqm,
stride_dqd, DK_head_seq_ptr, stride_dkn, stride_dkd,
DV_head_seq_ptr, stride_dvn, stride_dvd,
KV_Lock_head_seq_ptr, KV_Count_head_seq_ptr, logit_scale,
BLOCK_D, NO_D_MASK, NO_M_MASK, ALLOW_TF32, BLOCK_M, BLOCK_N,
acc_dtype, attend_current=attend_current)
if seq_b_block_id >= 0 and fhead_id * 2 + 1 < num_heads:
head_id = fhead_id * 2 + 1
DO_head_seq_ptr = (DO_ptr + stride_doh * head_id + stride_dom *
seq_start_offset)
DR_head_seq_ptr = (DR_ptr + stride_drh * head_id + stride_drm *
seq_start_offset)
A_head_seq_ptr = (A_ptr + stride_ah * head_id + stride_am *
seq_start_offset)
Q_head_seq_ptr = (Q_ptr + stride_qh * head_id + stride_qm *
seq_start_offset)
K_head_seq_ptr = (K_ptr + stride_kh * head_id + stride_kn *
seq_start_offset)
V_head_seq_ptr = (V_ptr + stride_vh * head_id + stride_vn *
seq_start_offset)
DQ_head_seq_ptr = (DQ_ptr + stride_dqh * head_id + stride_dqm *
seq_start_offset)
DK_head_seq_ptr = (DK_ptr + stride_dkh * head_id + stride_dkn *
seq_start_offset)
DV_head_seq_ptr = (DV_ptr + stride_dvh * head_id + stride_dvn *
seq_start_offset)
KV_Lock_head_seq_ptr = (KV_Lock_ptr + stride_kvs * seq_id +
stride_kvh * head_id)
KV_Count_head_seq_ptr = (KV_Count_ptr + stride_kvs * seq_id +
stride_kvh * head_id)
_backward_one_row(seq_b_block_id, seq_length, qk_scale, M_range,
N_range, D_range, D_mask, cm, DO_head_seq_ptr, stride_dom,
stride_dod, DR_head_seq_ptr, stride_drm, A_head_seq_ptr,
stride_am, Q_head_seq_ptr, stride_qm, stride_qd,
K_head_seq_ptr, stride_kn, stride_kd, V_head_seq_ptr,
stride_vn, stride_vd, DQ_head_seq_ptr, stride_dqm,
stride_dqd, DK_head_seq_ptr, stride_dkn, stride_dkd,
DV_head_seq_ptr, stride_dvn, stride_dvd,
KV_Lock_head_seq_ptr, KV_Count_head_seq_ptr, logit_scale,
BLOCK_D, NO_D_MASK, NO_M_MASK, ALLOW_TF32, BLOCK_M, BLOCK_N,
acc_dtype, attend_current=attend_current)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_bwd.py |
07efee37-5fc4-487f-907f-99cc5df92ca4 | chunk_fuse.py | elephantmipt/rebased_minimal | flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py | e7b945509972fab9f9c1c7be431abf7d6bf62c95 | 0 | @triton.jit
def chunk_abc_bwd_kernel_rcum(s, r, c, o, s_sk_h, s_sk_t, s_sk_m, T, BT: tl
.constexpr, BM: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr):
i_m, i_bh = tl.program_id(0), tl.program_id(1)
p_s = tl.make_block_ptr(s + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (
(NT - 1) * BT, i_m * BM), (BT, BM), (1, 0))
p_c = tl.make_block_ptr(c + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (
(NT - 1) * BT, i_m * BM), (BT, BM), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (
(NT - 1) * BT, i_m * BM), (BT, BM), (1, 0))
o_i = tl.arange(0, BT)
m_t = tl.where(o_i[:, None] <= o_i[None, :], 1.0, 0.0)
b_z = tl.zeros([BM], dtype=tl.float32)
for i in range(NT):
p_r = tl.make_block_ptr(r + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m
,), ((NT - i) % NT * DM + i_m * BM,), (BM,), (0,))
b_s = tl.load(p_s, boundary_check=(0, 1))
b_r = tl.load(p_r, boundary_check=(0,))
b_c = tl.load(p_c, boundary_check=(0, 1))
b_o = tl.load(p_o, boundary_check=(0, 1))
b_z = b_z * b_r
b_o -= b_c * (b_z[None, :] + tl.dot(m_t.to(b_s.dtype), b_s,
allow_tf32=False))
b_z += tl.sum(b_s, 0)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
p_s = tl.advance(p_s, (-BT, 0))
p_c = tl.advance(p_c, (-BT, 0))
p_o = tl.advance(p_o, (-BT, 0))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py |
67e4c93c-b727-4fc8-a953-27e3c96d1539 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4)], key=['BT', 'K', 'V'])
@triton.jit
def chunk_transform_qk_fwd_kernel(q, k, v, beta, o, A, q_new, k_new,
A_local, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, T: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr, BT: tl.constexpr, OUTPUT_ATTENTIONS: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t *
BT, 0), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t *
BT, 0), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, 0), (BT, BV), (1, 0))
b_q = (tl.load(p_q, boundary_check=(0, 1)) * scale).to(p_q.dtype.element_ty
)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
p_T = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT,
0), (BT, BT), (1, 0))
b_T = tl.load(p_T, boundary_check=(0, 1))
o_i = tl.arange(0, BT)
m_t = o_i[:, None] >= o_i[None, :]
b_qk = tl.where(m_t, tl.dot(b_q, tl.trans(b_k), allow_tf32=False), 0).to(
b_q.dtype)
m_t = o_i[:, None] > o_i[None, :]
b_kk = tl.where(m_t, tl.dot(b_k, tl.trans(b_k), allow_tf32=False), 0).to(
b_k.dtype)
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,), (
BT,), (0,))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype)
b_qkT = tl.dot(b_qk, b_T, allow_tf32=False).to(b_k.dtype)
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(A_local + i_bh * T * BT, (T, BT), (BT, 1),
(i_t * BT, 0), (BT, BT), (1, 0))
tl.store(p_a, b_qkT.to(p_a.dtype.element_ty), boundary_check=(0, 1))
b_kkT = tl.dot(b_kk, b_T, allow_tf32=False).to(b_k.dtype)
p_o = tl.make_block_ptr(o + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t *
BT, 0), (BT, BV), (1, 0))
tl.store(p_o, tl.dot(b_qkT, b_v).to(p_o.dtype.element_ty),
boundary_check=(0, 1))
p_q_new = tl.make_block_ptr(q_new + i_bh * s_k_h, (T, K), (s_k_t, s_k_d
), (i_t * BT, 0), (BT, BK), (1, 0))
tl.store(p_q_new, (b_q - tl.dot(b_qkT, b_k_beta, allow_tf32=False)).to(
p_q_new.dtype.element_ty), boundary_check=(0, 1))
p_k_new = tl.make_block_ptr(k_new + i_bh * s_k_h, (T, K), (s_k_t, s_k_d
), (i_t * BT, 0), (BT, BK), (1, 0))
tl.store(p_k_new, (b_k - tl.dot(tl.trans(b_kkT), b_k_beta, allow_tf32=
False)).to(p_k_new.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/parallel.py |
f9c0d792-543f-40b9-b98d-def82c9bbbb9 | sb_varlen_fwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_fwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.autotune(configs=get_configs(), key=['head_size'])
@triton.jit
def _forward(Q_ptr, stride_qh: tl.constexpr, stride_qm, stride_qd: tl.
constexpr, K_ptr, stride_kh: tl.constexpr, stride_kn, stride_kd: tl.
constexpr, V_ptr, stride_vh: tl.constexpr, stride_vn, stride_vd: tl.
constexpr, O_ptr, stride_oh: tl.constexpr, stride_om, stride_od: tl.
constexpr, R_ptr, stride_rh, stride_rm: tl.constexpr, A_ptr, stride_ah,
stride_am: tl.constexpr, W_ptr, stride_wh, stride_wm, stride_wn,
CSL_ptr, logit_scale: tl.constexpr, batch_size, token_size, head_size:
tl.constexpr, num_heads: tl.constexpr, BLOCK_D: tl.constexpr, NO_D_MASK:
tl.constexpr, NO_M_MASK: tl.constexpr, NO_N_MASK: tl.constexpr,
ALLOW_TF32: tl.constexpr, inv_log2: tl.constexpr, BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr, no_grad: tl.constexpr=False, acc_dtype: tl.
constexpr=tl.float32, return_attention: tl.constexpr=False, use_cumsum:
tl.constexpr=False, attend_current: tl.constexpr=False):
tl.static_assert(BLOCK_M % BLOCK_N == 0)
seq_id = tl.program_id(0)
fhead_id = tl.program_id(1)
seq_alloc_prog_id = tl.program_id(2)
num_seq_alloc_progs = tl.num_programs(2)
if seq_id == 0:
seq_start_offset = 0
else:
seq_start_offset = tl.load(CSL_ptr + seq_id - 1).to(tl.int32)
seq_end_offset = tl.load(CSL_ptr + seq_id).to(tl.int32)
seq_length = seq_end_offset - seq_start_offset
num_seq_blocks = tl.cdiv(seq_length, BLOCK_M)
seq_a_block_id = num_seq_blocks - seq_alloc_prog_id - 1
seq_b_block_id = seq_alloc_prog_id - (num_seq_alloc_progs - num_seq_blocks)
if seq_a_block_id >= 0 or seq_b_block_id >= 0:
qk_scale = inv_log2 * logit_scale
M_range = tl.arange(0, BLOCK_M)
N_range = tl.arange(0, BLOCK_N)
D_range = tl.arange(0, BLOCK_D)
D_mask = D_range < head_size
if not use_cumsum:
cm = tl.where(N_range[:, None] >= N_range[None, :], 1.0, 0.0).to(
Q_ptr.type.element_ty)
else:
cm = None
if seq_a_block_id >= 0:
head_id = fhead_id * 2
Q_head_seq_ptr = (Q_ptr + stride_qh * head_id + stride_qm *
seq_start_offset)
K_head_seq_ptr = (K_ptr + stride_kh * head_id + stride_kn *
seq_start_offset)
V_head_seq_ptr = (V_ptr + stride_vh * head_id + stride_vn *
seq_start_offset)
O_head_seq_ptr = (O_ptr + stride_oh * head_id + stride_om *
seq_start_offset)
R_head_seq_ptr = (R_ptr + stride_rh * head_id + stride_rm *
seq_start_offset)
A_head_seq_ptr = (A_ptr + stride_ah * head_id + stride_am *
seq_start_offset)
W_head_seq_ptr = (W_ptr + stride_wh * head_id + stride_am *
seq_start_offset)
_forward_one_row(seq_a_block_id, seq_length, qk_scale, M_range,
N_range, D_range, D_mask, cm, Q_head_seq_ptr, stride_qm,
stride_qd, K_head_seq_ptr, stride_kn, stride_kd,
V_head_seq_ptr, stride_vn, stride_vd, O_head_seq_ptr,
stride_om, stride_od, R_head_seq_ptr, stride_rm,
A_head_seq_ptr, stride_am, W_head_seq_ptr, stride_wm,
stride_wn, BLOCK_D, NO_D_MASK, NO_M_MASK, NO_N_MASK,
ALLOW_TF32, BLOCK_M, BLOCK_N, no_grad, acc_dtype,
return_attention, use_cumsum=use_cumsum, attend_current=
attend_current)
if seq_b_block_id >= 0 and fhead_id * 2 + 1 < num_heads:
head_id = fhead_id * 2 + 1
Q_head_seq_ptr = (Q_ptr + stride_qh * head_id + stride_qm *
seq_start_offset)
K_head_seq_ptr = (K_ptr + stride_kh * head_id + stride_kn *
seq_start_offset)
V_head_seq_ptr = (V_ptr + stride_vh * head_id + stride_vn *
seq_start_offset)
O_head_seq_ptr = (O_ptr + stride_oh * head_id + stride_om *
seq_start_offset)
R_head_seq_ptr = (R_ptr + stride_rh * head_id + stride_rm *
seq_start_offset)
A_head_seq_ptr = (A_ptr + stride_ah * head_id + stride_am *
seq_start_offset)
W_head_seq_ptr = (W_ptr + stride_wh * head_id + stride_am *
seq_start_offset)
_forward_one_row(seq_b_block_id, seq_length, qk_scale, M_range,
N_range, D_range, D_mask, cm, Q_head_seq_ptr, stride_qm,
stride_qd, K_head_seq_ptr, stride_kn, stride_kd,
V_head_seq_ptr, stride_vn, stride_vd, O_head_seq_ptr,
stride_om, stride_od, R_head_seq_ptr, stride_rm,
A_head_seq_ptr, stride_am, W_head_seq_ptr, stride_wm,
stride_wn, BLOCK_D, NO_D_MASK, NO_M_MASK, NO_N_MASK,
ALLOW_TF32, BLOCK_M, BLOCK_N, no_grad, acc_dtype,
return_attention, use_cumsum=use_cumsum, attend_current=
attend_current)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py |
fdbc848d-92d3-499e-a813-fa9e22d5993a | l2norm.py | sustcsonglin/flash-linear-attention | fla/modules/l2norm.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16, 32]], key=['N'])
@triton.jit
def l2norm_bwd_kernel(X, DY, DX, stride_x_row, N, eps, BLOCK_N: tl.constexpr):
row = tl.program_id(0)
X += row * stride_x_row
DX += row * stride_x_row
DY += row * stride_x_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
var = tl.sum(x * x)
rstd = 1 / tl.sqrt(var + eps)
mask = cols < N
dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32)
dy = tl.where(cols < N, dy, 0.0)
dx = dy * rstd - tl.sum(dy * x) * (1 / (var + eps)) * rstd * x
tl.store(DX + cols, dx, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/l2norm.py |
e3c53215-9ddd-4e38-942f-2dfc120fb36c | shape.py | 2niuhe/triton_utils | src/triton_utils/shape.py | 6184906ac3b86dac3ccbfac128ec393ccecde5df | 0 | @triton.jit
def load_full_1d(ptr, sz: tl.constexpr, stride=1):
"""Load 1d block [0,...,sz-1]"""
offs = get_1d_offest(sz)
mask = get_1d_mask(offs, sz)
return tl.load(ptr + offs, mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py |
78bedff7-31b2-401f-b494-a038e6470b98 | glu_kernels.py | BobMcDear/attorch | attorch/glu_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=element_wise_kernel_configs(), key=['size'])
@triton.jit
def glu_forward_kernel(input1_pointer, input2_pointer, output_pointer, size,
param, act_func: tl.constexpr, BLOCK_SIZE: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1_pointer: Pointer to the first half of the input to gate.
The first half must be contiguous and contain size elements.
input2_pointer: Pointer to the second half of the input to gate.
The second half must be contiguous and contain size elements.
output_pointer: Pointer to a container the result is written to.
The container must be contiguous and contain size elements.
size: Number of elements in each half of the input.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardswish', 'selu', 'mish', and 'leaky_relu'.
BLOCK_SIZE: Block size.
"""
pid = tl.program_id(axis=0)
offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offset < size
input1 = tl.load(input1_pointer + offset, mask=mask)
input2 = tl.load(input2_pointer + offset, mask=mask)
output = input1 * apply_act_func(input2, None, None, None, param,
act_func, False)
tl.store(output_pointer + offset, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/glu_kernels.py |
87d0765e-cecf-486a-95fb-7d23c0b6a3f0 | bucketed_argmax.py | graphcore-research/pytorch-approx-topk | approx_topk/experimental/bucketed_argmax.py | 339eea971f17bf810e2eec746a06b9c93dc4cce0 | 0 | @triton.jit
def _topk_triton_kernel__parallel_bk(xs_ptr, values_out_ptr,
indices_out_ptr, b: int, k: int, n: int, n_chunk: int, xs_stride: int,
BLOCK_SIZE: tl.constexpr, PAD_VALUE: tl.constexpr, INTERLEAVED: tl.
constexpr):
pidx = tl.program_id(axis=0).to(tl.int64)
bk_idx = BLOCK_SIZE * pidx + tl.arange(0, BLOCK_SIZE)
b_idx, k_idx = bk_idx // k, bk_idx % k
xs_ptr += b_idx * xs_stride
if INTERLEAVED:
k_stride, i_stride = 1, k
else:
k_stride, i_stride = n_chunk, 1
mask = (b_idx < b) & (k_idx * k_stride < n)
max_value = tl.load(xs_ptr + k_idx * k_stride, mask=mask, other=PAD_VALUE)
max_i = tl.zeros((BLOCK_SIZE,), tl.int64)
for i in tl.range(1, n_chunk):
mask = (b_idx < b) & (k_idx * k_stride + i * i_stride < n)
block = tl.load(xs_ptr + k_idx * k_stride + i * i_stride, mask=mask,
other=PAD_VALUE)
mask &= max_value < block
max_value = tl.where(mask, block, max_value)
max_i = tl.where(mask, i, max_i)
max_index = k_idx * k_stride + max_i * i_stride
tl.store(values_out_ptr + b_idx * k + k_idx, max_value, mask=b_idx < b)
tl.store(indices_out_ptr + b_idx * k + k_idx, max_index, mask=b_idx < b)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Top-K Selection"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/graphcore-research/pytorch-approx-topk/blob/339eea971f17bf810e2eec746a06b9c93dc4cce0/approx_topk/experimental/bucketed_argmax.py |
c66dd256-37a5-4e14-9622-5ef0661c9e4c | decay.py | huyz2023/2by4-pretrain | sparse/decay.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def masked_add_kernel(grad_ptr, p_ptr, p_mask_ptr, n_elements, alpha,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
p_mask = tl.load(p_mask_ptr + offsets, mask=mask).to(tl.int1)
mask = mask & ~p_mask
p = tl.load(p_ptr + offsets, mask=mask)
grad = tl.load(grad_ptr + offsets, mask=mask)
grad += p * alpha
tl.store(grad_ptr + offsets, grad, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/decay.py |
64df009f-59cf-495e-b5c2-0456d955bdc3 | linear.py | ai-compiler-study/triton-kernels | triton_kernels/kernels/linear.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def gelu(x):
c = 0.7978845608028654
x_cubed = x * x * x
tanh_arg = c * (x + 0.044715 * x_cubed)
tanh_result = tanh(tanh_arg)
return 0.5 * x * (1 + tanh_result)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/linear.py |
e786250d-5d3e-44b4-8ec6-304092edb0a2 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/linear_attn/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_linear_attn_bwd_kernel(q, k, v, do, dq, dk, dv, h0, s_k_h,
s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B, H, T, K: tl.constexpr, V:
tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, CHECK: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (V, K), (1, V), (i_v *
BV, i_k * BK), (BV, BK), (0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
for i in range(0, tl.cdiv(T, BT)):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
i * BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (
i_v * BV, i * BT), (BV, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(i * BT, i_v * BV), (BT, BV), (1, 0))
p_dq = tl.make_block_ptr(dq + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (i * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_ds = tl.dot(b_do, b_v, allow_tf32=False)
b_ds = tl.where(m_s, b_ds, 0)
b_dq = tl.dot(b_ds.to(b_k.dtype), b_k, allow_tf32=False)
if CHECK and i == 0:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_v, b_k, allow_tf32=False)
else:
b_dq += tl.dot(b_do, b_h.to(b_do.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_v, b_k, allow_tf32=False)
b_dq *= scale
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
b_h = None
tl.debug_barrier()
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
m_s = o_i[:, None] <= o_i[None, :]
for i in range(1, tl.cdiv(T, BT) + 1):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, T - i * BT), (BK, BT), (0, 1))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (
T - i * BT, i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (
T - i * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d),
(T - i * BT, i_v * BV), (BT, BV), (1, 0))
p_dk = tl.make_block_ptr(dk + (i_bh + i_v * B * H) * s_k_h, (T, K),
(s_k_t, s_k_d), (T - i * BT, i_k * BK), (BT, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + i_k * B * H) * s_v_h, (T, V),
(s_v_t, s_v_d), (T - i * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_s = tl.dot(b_k, b_q, allow_tf32=False)
b_s = tl.where(m_s, b_s, 0).to(b_q.dtype)
b_ds = tl.dot(b_v, tl.trans(b_do), allow_tf32=False)
b_ds = tl.where(m_s, b_ds, 0).to(b_q.dtype)
b_dk = tl.dot(b_ds, tl.trans(b_q), allow_tf32=False)
b_dv = tl.dot(b_s, b_do, allow_tf32=False)
if CHECK and i == 1:
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False)
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False)
b_dh += tl.dot(b_q, b_do, allow_tf32=False)
else:
b_dk += tl.dot(b_v, tl.trans(b_dh).to(b_v.dtype), allow_tf32=False)
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False)
b_dh += tl.dot(b_q, b_do, allow_tf32=False)
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"bf16",
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/fused_chunk.py |
36a84d74-811a-47c3-b0f6-b6e9716f4768 | partition_k.py | pytorch-labs/tritonbench | tritonbench/operators/gemm/partition_k.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.jit
def _reduce(c_ptr, c_buf_ptr, M, N, stride_cm, stride_cn, stride_cb_m,
stride_cb_n, stride_cb_k, PK: tl.constexpr, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr):
pid = tl.program_id(0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_m = pid // num_pid_m
pid_n = pid % num_pid_n
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, PK)
c_buf_ptrs = c_buf_ptr + (offs_m[:, None, None] * stride_cb_m + offs_n[
None, :, None] * stride_cb_n + offs_k[None, None, :] * stride_cb_k)
c_buf = tl.load(c_buf_ptrs)
reduced_k = tl.sum(c_buf, axis=2)
c_ptrs = c_ptr + (offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn
)
tl.store(c_ptrs, reduced_k)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/gemm/partition_k.py |
d0677865-01e5-4cad-8f2a-27ff2419f1c7 | _semi_structured_conversions.py | huyz2023/2by4-pretrain | sparse/_semi_structured_conversions.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _MVUE24_approx(x0, x1, x2, x3, random0, random1):
eps = 1.19209e-07
a0 = tl.abs(x0) + eps
a1 = tl.abs(x1) + eps
a2 = tl.abs(x2) + eps
a3 = tl.abs(x3) + eps
sum = a0 + a1 + a2 + a3
t0 = a0 / sum
t1 = a1 / sum
t2 = a2 / sum
t3 = a3 / sum
s0 = sum - a0
s1 = sum - a1
s2 = sum - a2
s3 = sum - a3
k0 = t0 / s0
k1 = t1 / s1
k2 = t2 / s2
k3 = t3 / s3
k = k0 + k1 + k2 + k3
p0 = t0 + a0 * (k - k0)
p1 = t1 + a1 * (k - k1)
p2 = t2 + a2 * (k - k2)
p3 = t3 + a3 * (k - k3)
m0 = random0 <= t0
m1 = (random0 <= t0 + t1) & ~m0
m2 = (random0 <= t0 + t1 + t2) & ~m1 & ~m0
m3 = ~m2 & ~m1 & ~m0
d_a0 = ~m0 * a0
d_a1 = ~m1 * a1
d_a2 = ~m2 * a2
d_a3 = ~m3 * a3
d_sum = d_a0 + d_a1 + d_a2 + d_a3
t = random1 * d_sum
d_m0 = t <= d_a0
d_m1 = (t <= d_a0 + d_a1) & ~d_m0
d_m2 = (t <= d_a0 + d_a1 + d_a2) & ~d_m1 & ~d_m0
d_m3 = ~d_m2 & ~d_m1 & ~d_m0
m0, m1, m2, m3 = m0 | d_m0, m1 | d_m1, m2 | d_m2, m3 | d_m3
a0 = x0 / p0
a1 = x1 / p1
a2 = x2 / p2
a3 = x3 / p3
return a0, a1, a2, a3, m0, m1, m2, m3
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/_semi_structured_conversions.py |
ccc604f8-18e9-45f5-b245-d3cc28061db6 | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8]], key=['BT', 'BK', 'BV'])
@triton.jit
def fwd_recompute_w_u_kernel(k, v, beta, w, u, A, offsets, indices, T: tl.
constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.
constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BT,), (0,))
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BT, BT), (1, 0))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BT,), (0,))
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BT, BT), (1, 0))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_A = tl.load(p_A, boundary_check=(0, 1))
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_u = tl.make_block_ptr(u + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_vb = (b_v * b_beta[:, None]).to(b_v.dtype)
b_u = tl.dot(b_A.to(b_vb.dtype), b_vb, allow_tf32=False)
tl.store(p_u, b_u.to(p_u.dtype.element_ty), boundary_check=(0, 1))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_w = tl.make_block_ptr(w + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_w = tl.dot(b_A.to(b_kb.dtype), b_kb, allow_tf32=False)
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"bf16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
326e20e4-4217-4f1d-919d-85d0480d4692 | test_triton.py | pytorch/xla | test/test_triton.py | 40efdb7b6571ce92797b5ba42619b79c1b147b3e | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m,
qk_scale, BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.
constexpr, STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.
constexpr, N_CTX: tl.constexpr, fp8_v: tl.constexpr):
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk = qk * qk_scale + tl.where(mask, 0, -1000000.0)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
acc = acc * alpha[:, None]
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch/xla/blob/40efdb7b6571ce92797b5ba42619b79c1b147b3e/test/test_triton.py |
49f3c6b9-1c5a-473c-9559-2dfc1c27c665 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_gla_fwd_kernel(q, k, v, g, o, h0, ht, s_k_h, s_k_t, s_k_d,
s_v_h, s_v_t, s_v_d, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr,
K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr,
BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE:
tl.constexpr, CHECK: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (0,
i_k * BK), (BT, BK), (1, 0))
p_db = g + i_bh * s_k_h + (BT - 1) * s_k_t + i_k * BK + tl.arange(0, BK)
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k *
BK, 0), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (0,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (i_bh + i_k * B * H) * s_v_h, (T, V), (
s_v_t, s_v_d), (0, i_v * BV), (BT, BV), (1, 0))
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h += tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
mask = i_k * BK + tl.arange(0, BK) < K
for i in range(0, tl.cdiv(T, BT)):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
d_b = tl.load(p_db, mask=mask, other=0).to(tl.float32)
if CHECK and i == 0:
b_o = tl.dot(b_q.to(b_v.dtype), b_h.to(b_v.dtype), allow_tf32=False
)
b_h = b_h * tl.exp(d_b)[:, None] + tl.dot(b_k.to(b_v.dtype),
b_v, allow_tf32=False)
else:
b_o = tl.dot(b_q.to(b_v.dtype), b_h.to(b_v.dtype), allow_tf32=False
)
b_h = b_h * tl.exp(d_b)[:, None] + tl.dot(b_k.to(b_v.dtype),
b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
p_q = tl.advance(p_q, (BT, 0))
p_k = tl.advance(p_k, (0, BT))
p_v = tl.advance(p_v, (BT, 0))
p_o = tl.advance(p_o, (BT, 0))
p_db += BT * K
if STORE_FINAL_STATE:
p_final = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_final, b_h.to(p_final.dtype.element_ty), boundary_check=
(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py |
9e707ccc-8655-4571-8d88-7225cd973c4c | triton_kernel.py | yann-Choho/projet_PPML | notebooks/triton_kernel.py | 9274e0561443b01f029ee6e0737f922f71d2da39 | 0 | @triton.autotune(configs=get_autotune_config(), key=['M', 'N', 'K'])
@triton.jit
def ff_llama_with_rmsnorm(a_ptr, w1_ptr, w3_ptr, out_ptr, rms_w_ptr, M, N,
K, stride_am, stride_ak, stride_w1k, stride_w1n, stride_w3k, stride_w3n,
stride_outm, stride_outn, stride_rms_w, USE_FP8: tl.constexpr, EPS: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr):
"""
A Triton kernel for performing feed-forward operations in a LLaMA model.
This kernel computes the feed-forward transformation using the following operations:
w1 and w3 are weights (linear layers)
F.silu(w1(x)) * w3(x)
Args:
a_ptr: Pointer to the input tensor.
w1_ptr: Pointer to the first weight tensor.
w3_ptr: Pointer to the third weight tensor.
out_ptr: Pointer to the output tensor.
rms_w_ptr: Pointer to the RMS normalization weights.
M: Number of rows in the input tensor.
N: Number of columns in the weight tensors.
K: Number of columns in the input tensor.
stride_am: Stride of the input tensor in the first dimension.
stride_ak: Stride of the input tensor in the second dimension.
stride_w1k: Stride of the first weight tensor in the first dimension.
stride_w1n: Stride of the first weight tensor in the second dimension.
stride_w3k: Stride of the third weight tensor in the first dimension.
stride_w3n: Stride of the third weight tensor in the second dimension.
stride_outm: Stride of the output tensor in the first dimension.
stride_outn: Stride of the output tensor in the second dimension.
stride_rms_w: Stride of the RMS normalization weights.
USE_FP8: Constant specifying whether to use FP8 precision.
EPS: Constant epsilon value for numerical stability in RMS normalization.
BLOCK_SIZE_M: Constant block size in the M dimension.
BLOCK_SIZE_N: Constant block size in the N dimension.
BLOCK_SIZE_K: Constant block size in the K dimension.
"""
pid = tl.program_id(axis=0)
pid_m = pid // tl.cdiv(N, BLOCK_SIZE_N)
pid_n = pid % tl.cdiv(N, BLOCK_SIZE_N)
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
w1_ptrs = w1_ptr + (offs_k[:, None] * stride_w1k + offs_bn[None, :] *
stride_w1n)
w3_ptrs = w3_ptr + (offs_k[:, None] * stride_w3k + offs_bn[None, :] *
stride_w3n)
acc1 = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
acc2 = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
rms_w_ptrs = rms_w_ptr + tl.arange(0, BLOCK_SIZE_K)[None, :] * stride_rms_w
a_sum = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for _ in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs)
a_sum += tl.math.pow(a.to(tl.float32), 2)
rms_w = tl.load(rms_w_ptrs)
if USE_FP8:
rms_w = rms_w.to(tl.float8e5, bitcast=True)
rms_w = rms_w.to(tl.float16)
a = a * rms_w
b = tl.load(w1_ptrs)
if USE_FP8:
b = b.to(tl.float8e5, bitcast=True)
b = b.to(tl.float32)
b = b.to(tl.float16)
acc1 += tl.dot(a, b)
c = tl.load(w3_ptrs)
if USE_FP8:
c = c.to(tl.float8e5, bitcast=True)
c = c.to(tl.float32)
c = c.to(tl.float16)
acc2 += tl.dot(a, c)
a_ptrs += BLOCK_SIZE_K * stride_ak
w1_ptrs += BLOCK_SIZE_K * stride_w1k
w3_ptrs += BLOCK_SIZE_K * stride_w3k
rms_w_ptrs += BLOCK_SIZE_K * stride_rms_w
a_mean = tl.sum(a_sum, axis=1) / K + EPS
a_norm = tl.math.rsqrt(a_mean)
acc1 = acc1 * a_norm[:, None]
acc2 = acc2 * a_norm[:, None]
accumulator = acc1 * tl.sigmoid(acc1) * acc2
offs_outm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_outn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
out_ptrs = out_ptr + (stride_outm * offs_outm[:, None] + stride_outn *
offs_outn[None, :])
out_mask = (offs_outm[:, None] < M) & (offs_outn[None, :] < N)
tl.store(out_ptrs, accumulator, mask=out_mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Normalization",
"Activation Functions"
],
"Memory Access Pattern": [
"Coalesced",
"Strided Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/yann-Choho/projet_PPML/blob/9274e0561443b01f029ee6e0737f922f71d2da39/notebooks/triton_kernel.py |
2f039813-0a4f-434e-8684-dfa2962e6c20 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/rebased/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def parallel_rebased_bwd_kernel(q, k, v, do, dz, dq, dk, dv, s_k_h, s_k_t,
s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T:
tl.constexpr, K: tl.constexpr, V: tl.constexpr, BTL: tl.constexpr, BTS:
tl.constexpr, BK: tl.constexpr, BV: tl.constexpr):
i_kv, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
NV = tl.cdiv(V, BV)
i_k = i_kv // NV
i_v = i_kv % NV
i_h = i_bh % H
_parallel_rebased_bwd_dq(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dq,
s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B=B, H=H, T=T, K=K,
V=V, BTL=BTL, BTS=BTS, BK=BK, BV=BV)
tl.debug_barrier()
_parallel_rebased_bwd_dkv(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dk,
dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B=B, H=H, T=T,
K=K, V=V, BTL=BTL, BTS=BTS, BK=BK, BV=BV)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rebased/parallel.py |
fb256f38-277f-4402-8b55-cf02270b1533 | mlstm_matmul.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_matmul.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def matrix_mult(x, y, B):
return tl.dot(x, y) if B >= 16 else tl.sum(x[:, :, None] * y, 1)
| {
"Data Type": [
"fp32",
"int8"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py |
c186aa78-7e7f-494d-bcc8-d002861dceb2 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BK', 'BT'])
@triton.jit
def chunk_rwkv6_fwd_A_kernel_intra_sub_intra(q, k, gi, ge, u, A, offsets,
indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, BT:
tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_t, i_i, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
i_j = i_i
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if i_t * BT + i_i * BC >= T:
return
o_i = tl.arange(0, BC)
o_k = tl.arange(0, BK)
m_k = o_k < K
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
if HEAD_FIRST:
o_A = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC)
) * BT + i_j * BC
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT +
i_i * BC, 0), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (i_t *
BT + i_i * BC, 0), (BC, BK), (1, 0))
p_qj = tl.max_contiguous(tl.multiple_of(q + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
p_kj = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(gi + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
else:
o_A = (bos + i_t * BT + i_i * BC + tl.arange(0, BC)
) * H * BT + i_h * BT + i_j * BC
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H * K, 1
), (i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
p_qj = tl.max_contiguous(tl.multiple_of(q + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
p_kj = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(gi + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0, 1))
p_u = tl.make_block_ptr(u + i_h * K, (K,), (1,), (0,), (BK,), (0,))
b_u = tl.load(p_u, boundary_check=(0,))
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_qj = tl.load(p_qj, mask=m_k, other=0).to(tl.float32)
b_kj = tl.load(p_kj, mask=m_k, other=0).to(tl.float32)
b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32)
b_A = tl.sum(b_q * b_kj[None, :] * tl.exp(b_g - b_gk[None, :]), 1)
b_A = tl.where(o_i > j, b_A * scale, 0.0)
b_A = tl.where(o_i != j, b_A, tl.sum(b_qj * b_kj * b_u * scale))
tl.store(A + o_A + j, b_A, mask=m_A)
p_qj += K if HEAD_FIRST else H * K
p_kj += K if HEAD_FIRST else H * K
p_gk += K if HEAD_FIRST else H * K
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py |
48379ee8-f2ff-4497-a0e7-85d8537a7560 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_delta_rule_fwd_kernel_o(q, k, v, h, o, offsets, indices, scale, T:
tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl
.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl.
constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_o = tl.zeros([BT, BV], dtype=tl.float32)
b_s = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (
V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_o += tl.dot(b_q, b_h, allow_tf32=False)
b_s += tl.dot(b_q, b_k, allow_tf32=False)
b_s = tl.where(m_s, b_s, 0)
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_o = b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py |
beebe0e4-2407-465c-b381-0707292d593f | conv_kernels.py | BobMcDear/attorch | attorch/conv_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=[conv2d_forward_config(128, 32, 128, n_warps=8,
n_stages=2), conv2d_forward_config(256, 32, 64, n_warps=8, n_stages=2),
conv2d_forward_config(256, 32, 32, n_warps=4, n_stages=4),
conv2d_forward_config(256, 64, 32, n_warps=4, n_stages=4),
conv2d_forward_config(256, 32, 16, n_warps=2, n_stages=4),
conv2d_forward_config(64, 32, 128, n_warps=8, n_stages=4),
conv2d_forward_config(128, 32, 64, n_warps=4, n_stages=4),
conv2d_forward_config(64, 32, 64, n_warps=4, n_stages=4),
conv2d_forward_config(128, 32, 16, n_warps=4, n_stages=4),
conv2d_forward_config(128, 128, 128, n_warps=8, n_stages=3),
conv2d_forward_config(256, 128, 64, n_warps=8, n_stages=3),
conv2d_forward_config(256, 128, 32, n_warps=4, n_stages=4),
conv2d_forward_config(64, 128, 128, n_warps=4, n_stages=4),
conv2d_forward_config(128, 128, 64, n_warps=4, n_stages=4),
conv2d_forward_config(128, 64, 32, n_warps=2, n_stages=4),
conv2d_forward_config(64, 64, 64, n_warps=2, n_stages=4)], key=[
'batch_dim', 'in_feat_dim', 'in_height', 'in_width', 'out_feat_dim',
'out_height', 'out_width', 'kernel_height', 'kernel_width',
'stride_height', 'stride_width', 'padding_height', 'padding_width',
'groups', 'fp16'])
@triton.heuristics({'tf32': lambda _: allow_tf32()})
@triton.jit
def conv2d_forward_kernel(input_pointer, weight_pointer, output_pointer,
batch_dim, in_feat_dim, in_height, in_width, out_feat_dim, out_height,
out_width, input_batch_stride, input_in_feat_stride,
input_height_stride, input_width_stride, weight_out_feat_stride,
weight_in_feat_stride, weight_height_stride, weight_width_stride,
output_batch_stride, output_out_feat_stride, output_height_stride,
output_width_stride, kernel_height: tl.constexpr, kernel_width: tl.
constexpr, stride_height: tl.constexpr, stride_width: tl.constexpr,
padding_height: tl.constexpr, padding_width: tl.constexpr, groups: tl.
constexpr, fp16: tl.constexpr, tf32: tl.constexpr,
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: tl.constexpr, BLOCK_SIZE_IN_FEAT: tl.
constexpr, BLOCK_SIZE_OUT_FEAT: tl.constexpr):
"""
2D-convolves over the input using weights.
Args:
input_pointer: Pointer to the input to convolve over.
The input must be of shape [batch_dim, in_feat_dim, in_height, in_width].
weight_pointer: Pointer to the weights input is convolved over by.
The weights must be of shape [out_feat_dim, in_feat_dim, kernel_height, kernel_width].
output_pointer: Pointer to a container the result is written to.
The container must be of shape [batch_dim, out_feat_dim, out_height, out_width].
batch_dim: Batch dimension of the input and output.
in_feat_dim: Dimensionality of the input features.
in_height: Input height.
in_width: Input width.
out_feat_dim: Dimensionality of the output features.
out_height: Output height.
out_width: Output width.
input_batch_stride: Stride necessary to jump one element along the
input's batch dimension.
input_in_feat_stride: Stride necessary to jump one element along the
input's feature dimension.
input_height_stride: Stride necessary to jump one element along the
input's height dimension.
input_width_stride: Stride necessary to jump one element along the
input's width dimension.
weight_out_feat_stride: Stride necessary to jump one element along the
weights' output feature dimension.
weight_in_feat_stride: Stride necessary to jump one element along the
weights' input feature dimension.
weight_height_stride: Stride necessary to jump one element along the
weights' height dimension.
weight_width_stride: Stride necessary to jump one element along the
weights' width dimension.
output_batch_stride: Stride necessary to jump one element along the
output's batch dimension.
output_out_feat_stride: Stride necessary to jump one element along the
output's feature dimension.
output_height_stride: Stride necessary to jump one element along the
output's height dimension.
output_width_stride: Stride necessary to jump one element along the
output's width dimension.
kernel_height: Kernel height.
kernel_width: Kernel width.
stride_height: Stride of kernel across the height dimension.
stride_width: Stride of kernel across the width dimension.
padding_height: Padding applied to the input across the height dimension.
padding_width: Padding applied to the input across the width dimension.
groups: Number of groups for the convolution.
fp16: Flag for loading the input and weights in FP16.
tf32: Flag for performing matrix products in TF32.
BLOCK_SIZE_BATCH_HEIGHT_WIDTH: Block size across the batch, height, and
width dimensions.
BLOCK_SIZE_IN_FEAT: Block size across the input feature dimension.
BLOCK_SIZE_OUT_FEAT: Block size across the output feature dimension.
"""
batch_height_width_pid = tl.program_id(0)
out_feat_pid = tl.program_id(1)
group_pid = tl.program_id(2)
in_group_dim = in_feat_dim // groups
out_group_dim = out_feat_dim // groups
batch_height_width_offset = (batch_height_width_pid *
BLOCK_SIZE_BATCH_HEIGHT_WIDTH + tl.arange(0,
BLOCK_SIZE_BATCH_HEIGHT_WIDTH))
batch_height_offset = batch_height_width_offset // out_width
batch_offset = batch_height_offset // out_height
output_feat_offset = out_feat_pid * BLOCK_SIZE_OUT_FEAT + tl.arange(0,
BLOCK_SIZE_OUT_FEAT)
output_height_offset = batch_height_offset % out_height
output_width_offset = batch_height_width_offset % out_width
input_pointer += (input_batch_stride * batch_offset +
input_in_feat_stride * group_pid * in_group_dim)[:, None]
weight_pointer += (weight_out_feat_stride * output_feat_offset +
weight_out_feat_stride * group_pid * out_group_dim)[None, :]
accum = tl.zeros((BLOCK_SIZE_BATCH_HEIGHT_WIDTH, BLOCK_SIZE_OUT_FEAT),
dtype=tl.float32)
for h in range(kernel_height):
for w in range(kernel_width):
for c in range(0, in_group_dim, BLOCK_SIZE_IN_FEAT):
input_feat_offset = c + tl.arange(0, BLOCK_SIZE_IN_FEAT)
input_height_offset = (h - padding_height + stride_height *
output_height_offset)
input_width_offset = (w - padding_width + stride_width *
output_width_offset)
curr_input_pointer = input_pointer + (input_in_feat_stride *
input_feat_offset)[None, :] + (input_height_stride *
input_height_offset)[:, None] + (input_width_stride *
input_width_offset)[:, None]
curr_weight_pointer = weight_pointer + (weight_in_feat_stride *
input_feat_offset)[:, None
] + weight_height_stride * h + weight_width_stride * w
input_mask = (batch_offset < batch_dim)[:, None] & (
input_feat_offset < in_group_dim)[None, :] & (0 <=
input_height_offset)[:, None] & (input_height_offset <
in_height)[:, None] & (0 <= input_width_offset)[:, None
] & (input_width_offset < in_width)[:, None]
weight_mask = (input_feat_offset < in_group_dim)[:, None] & (
output_feat_offset < out_group_dim)[None, :]
input_block = tl.load(curr_input_pointer, mask=input_mask)
weight_block = tl.load(curr_weight_pointer, mask=weight_mask)
if fp16:
input_block = input_block.to(tl.float16)
weight_block = weight_block.to(tl.float16)
accum += tl.dot(input_block, weight_block, allow_tf32=tf32)
output_pointer += (output_batch_stride * batch_offset)[:, None] + (
output_out_feat_stride * (group_pid * out_group_dim +
output_feat_offset))[None, :] + (output_height_stride *
output_height_offset)[:, None] + (output_width_stride *
output_width_offset)[:, None]
output_mask = (batch_offset < batch_dim)[:, None] & (output_feat_offset <
out_group_dim)[None, :] & (output_height_offset < out_height)[:, None
] & (output_width_offset < out_width)[:, None]
tl.store(output_pointer, accum, mask=output_mask)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/conv_kernels.py |
54b68535-dd6a-428b-ba2c-aacf68f8e026 | triton_rms_norm.py | vladmandic/dcae | dcae/nn/triton_rms_norm.py | 5223970c7e6c6acfe282e18be7e3821b61511673 | 0 | @triton.jit
def _rms_norm_2d_fwd_fused(X, Y, W, B, Rrms, M, C, N, num_blocks, eps,
BLOCK_SIZE: tl.constexpr):
m_n = tl.program_id(0)
m, n = m_n // num_blocks, m_n % num_blocks
Y += m * C * N
X += m * C * N
cols = n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = cols < N
x_sum_square = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, C):
x = tl.load(X + off * N + cols, mask=mask, other=0.0).to(tl.float32)
x_sum_square += x * x
mean_square = x_sum_square / C
rrms = 1 / tl.sqrt(mean_square + eps)
tl.store(Rrms + m * N + cols, rrms, mask=mask)
for off in range(0, C):
pos = off * N + cols
w = tl.load(W + off)
b = tl.load(B + off)
x = tl.load(X + pos, mask=mask, other=0.0).to(tl.float32)
x_hat = x * rrms
y = x_hat * w + b
tl.store(Y + pos, y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/vladmandic/dcae/blob/5223970c7e6c6acfe282e18be7e3821b61511673/dcae/nn/triton_rms_norm.py |
01a67365-f163-429d-bbd8-8c27946656e2 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/generalized_delta_rule/iplr/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_recurrent_bwd_kernel(q, k, v, alpha, beta, ha, dht, dh0, do, dq,
dk, dv, dalpha, dbeta, dha, h0, s_k_h, s_v_h, NK, scale, B, H, T, K: tl
.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, USE_DH0: tl.constexpr, USE_DHT: tl.
constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
mask_bk = i_k * BK + tl.arange(0, BK) < K
mask_bv = i_v * BV + tl.arange(0, BV) < V
p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_do = do + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V
p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V
p_ha = ha + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) + (T - 1) * V
p_alpha = alpha + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_beta = beta + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) + (T - 1) * K
p_dk = dk + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK) + (T
- 1) * K
p_dbeta = dbeta + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK
) + (T - 1) * K
p_dv = dv + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV) + (T
- 1) * V
p_dha = dha + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV
) + (T - 1) * V
d_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_DHT:
p_ht = dht + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
d_h += tl.load(p_ht, mask=mask_bk[:, None] & mask_bv[None, :], other=0
).to(tl.float32)
for _ in range(T):
b_q = tl.load(p_q, mask=mask_bk, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_bv, other=0).to(tl.float32)
b_beta = tl.load(p_beta, mask=mask_bk, other=0).to(tl.float32)
b_alpha = tl.load(p_alpha, mask=mask_bk, other=0).to(tl.float32)
b_ha = tl.load(p_ha, mask=mask_bv, other=0).to(tl.float32)
d_h += b_q[:, None] * b_do[None, :]
d_k = tl.sum(d_h * b_v[None, :], axis=1)
d_v = tl.sum(d_h * b_k[:, None], axis=0)
tl.store(p_dk, d_k.to(p_dk.dtype.element_ty), mask=mask_bk)
tl.store(p_dv, d_v.to(p_dv.dtype.element_ty), mask=mask_bv)
b_dha = tl.sum(d_h * b_beta[:, None], axis=0)
tl.store(p_dha, b_dha.to(p_dha.dtype.element_ty), mask=mask_bv)
b_dbeta = tl.sum(d_h * b_ha[None, :], axis=1)
tl.store(p_dbeta, b_dbeta.to(p_dbeta.dtype.element_ty), mask=mask_bk)
d_h += b_dha[None, :] * b_alpha[:, None]
p_do -= V
p_q -= K
p_k -= K
p_v -= V
p_dk -= K
p_dv -= V
p_beta -= K
p_dbeta -= K
p_alpha -= K
p_dha -= V
p_ha -= V
if USE_DH0:
p_dh0 = dh0 + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
tl.store(p_dh0, d_h.to(p_dh0.dtype.element_ty), mask=mask_bk[:,
None] & mask_bv[None, :])
tl.debug_barrier()
h = tl.zeros([BK, BV], dtype=tl.float32)
p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK)
p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK)
p_beta = beta + i_bh * s_k_h + i_k * BK + tl.arange(0, BK)
p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV)
p_ha = ha + i_bh * s_v_h + i_v * BV + tl.arange(0, BV)
p_do = do + i_bh * s_v_h + i_v * BV + tl.arange(0, BV)
p_dq = dq + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK)
p_dv = dv + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV)
p_dha = dha + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV)
p_alpha = alpha + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(0, BK
)
p_dalpha = dalpha + (i_bh + i_v * B * H) * s_k_h + i_k * BK + tl.arange(
0, BK)
if USE_INITIAL_STATE:
mask_kv = mask_bk[:, None] & mask_bv[None, :]
p_h0 = h0 + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[:, None]
) * V + (i_v * BV + tl.arange(0, BV)[None, :])
h += tl.load(p_h0, mask=mask_kv, other=0).to(tl.float32)
for i in range(0, T):
d_ha = tl.load(p_dha, mask=mask_bv, other=0).to(tl.float32)
d_alpha = tl.sum(d_ha[None, :] * h, axis=1)
tl.store(p_dalpha, d_alpha.to(p_dalpha.dtype.element_ty), mask=mask_bk)
b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask_bv, other=0).to(tl.float32)
b_beta = tl.load(p_beta, mask=mask_bk, other=0).to(tl.float32)
b_ha = tl.load(p_ha, mask=mask_bv, other=0).to(tl.float32)
h += b_k[:, None] * b_v[None, :] + b_beta[:, None] * b_ha[None, :]
_d_q = h * b_do[None, :]
d_q = tl.sum(_d_q, axis=1) * scale
tl.store(p_dq, d_q.to(p_dq.dtype.element_ty), mask=mask_bk)
p_k += K
p_do += V
p_v += V
p_dk += K
p_dalpha += K
p_dha += V
p_ha += V
p_dq += K
p_beta += K
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/generalized_delta_rule/iplr/fused_recurrent.py |
7e5bd5a2-7393-4fdc-8835-64d5a5604ecc | triton_kernels.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/triton_kernels.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def _triton_second_order_fwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl.
tensor, sh_1_0_ptr: tl.tensor, sh_1_1_ptr: tl.tensor, sh_1_2_ptr: tl.
tensor, sh_2_0_ptr: tl.tensor, sh_2_1_ptr: tl.tensor, sh_2_2_ptr: tl.
tensor, sh_2_3_ptr: tl.tensor, sh_2_4_ptr: tl.tensor, BLOCK_SIZE: tl.
constexpr, vector_length: tl.constexpr):
sqrt_3 = 3 ** 0.5
block_id = tl.program_id(0)
offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id
x_row_start = x_ptr + offset
y_row_start = y_ptr + offset
z_row_start = z_ptr + offset
x = tl.load(x_row_start, mask=offset < vector_length)
y = tl.load(y_row_start, mask=offset < vector_length)
z = tl.load(z_row_start, mask=offset < vector_length)
sh_1_0 = x * sqrt_3
sh_1_1 = y * sqrt_3
sh_1_2 = z * sqrt_3
sqrt_15 = 15 ** 0.5
sqrt_5 = 5 ** 0.5
sq_x = x * x
sq_y = y * y
sq_z = z * z
sh_2_0 = sqrt_15 * x * z
sh_2_1 = sqrt_15 * x * y
sh_2_2 = sqrt_5 * (sq_y - 0.5 * (sq_x + sq_z))
sh_2_3 = sqrt_15 * y * z
sh_2_4 = 0.5 * sqrt_15 * (sq_z - sq_x)
sh_1_0_start = sh_1_0_ptr + offset
sh_1_1_start = sh_1_1_ptr + offset
sh_1_2_start = sh_1_2_ptr + offset
sh_2_0_start = sh_2_0_ptr + offset
sh_2_1_start = sh_2_1_ptr + offset
sh_2_2_start = sh_2_2_ptr + offset
sh_2_3_start = sh_2_3_ptr + offset
sh_2_4_start = sh_2_4_ptr + offset
tl.store(sh_1_0_start, sh_1_0, mask=offset < vector_length)
tl.store(sh_1_1_start, sh_1_1, mask=offset < vector_length)
tl.store(sh_1_2_start, sh_1_2, mask=offset < vector_length)
tl.store(sh_2_0_start, sh_2_0, mask=offset < vector_length)
tl.store(sh_2_1_start, sh_2_1, mask=offset < vector_length)
tl.store(sh_2_2_start, sh_2_2, mask=offset < vector_length)
tl.store(sh_2_3_start, sh_2_3, mask=offset < vector_length)
tl.store(sh_2_4_start, sh_2_4, mask=offset < vector_length)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py |
2692045b-1381-44b0-bcd7-ec5e84568124 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for
num_warps in [1, 2, 4, 8] for num_stages in [2, 3, 4]], key=['BT'])
@triton.jit
def chunk_rwkv6_bwd_kernel_dh(q, gi, ge, do, dh, dht, dh0, offsets,
chunk_offsets, scale, T: tl.constexpr, HQ: tl.constexpr, H: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, NG: tl.constexpr,
STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_FINAL_STATE_GRADIENT:
tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_nh // NG
i_n, i_hq = i_nh // HQ, i_nh % HQ
i_h = i_hq // NG
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT - 1, -1, -1):
if HEAD_FIRST:
p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
last_idx = min(i_t * BT + BT, T) - 1
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (K, T), (1,
HQ * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
if HEAD_FIRST:
p_gk = tl.make_block_ptr(ge + i_bg * T * K, (K, T), (1, K), (
i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = gi + (i_bg * T + last_idx) * K + i_k * BK + tl.arange(
0, BK)
else:
p_gk = tl.make_block_ptr(ge + (bos * H + i_h) * K, (K, T), (1,
H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = gi + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK)
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_q = (b_q * tl.exp(b_gk) * scale).to(b_q.dtype)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) < K,
other=0.0)
b_dh *= tl.exp(b_gk_last)[:, None]
b_dh += tl.dot(b_q, b_do)
if STORE_INITIAL_STATE_GRADIENT:
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Backpropagation"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py |
2845735e-85d3-4315-9d21-ce129b242704 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/based/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def _parallel_based_bwd_dkv(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dk,
dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale, BTL: tl.
constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, K: tl
.constexpr, V: tl.constexpr):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c *
BTL, i_k * BK), (BTL, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_c *
BTL, i_v * BV), (BTL, BV), (1, 0))
b_k, b_v = tl.load(p_k, boundary_check=(0, 1)), tl.load(p_v,
boundary_check=(0, 1))
b_dk, b_dv = tl.zeros([BTL, BK], dtype=tl.float32), tl.zeros([BTL, BV],
dtype=tl.float32)
for i in range(tl.cdiv(T, BTS) * BTS - BTS, (i_c + 1) * BTL - BTS, -BTS):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i), (BK, BTS), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (V, T), (s_v_d, s_v_t),
(i_v * BV, i), (BV, BTS), (0, 1))
p_dz = dz + i_bh * T + i + tl.arange(0, BTS)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_dz = tl.load(p_dz, mask=i + tl.arange(0, BTS) < T)
b_s = tl.dot(b_k.to(b_q.dtype), b_q, allow_tf32=False) * scale
b_s2 = 1 + b_s + 0.5 * b_s * b_s
b_dv += tl.dot(b_s2.to(b_q.dtype), tl.trans(b_do), allow_tf32=False)
b_ds = tl.dot(b_v, b_do, allow_tf32=False) * scale
if i_v == 0:
b_ds += b_dz[None, :] * scale
else:
b_ds = b_ds
b_dk += tl.dot((b_ds + b_ds * b_s).to(b_q.dtype), tl.trans(b_q),
allow_tf32=False)
tl.debug_barrier()
o_q, o_k = tl.arange(0, BTS), tl.arange(0, BTL)
for i in range(i_c * BTL, (i_c + 1) * BTL, BTS):
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (
i_k * BK, i), (BK, BTS), (0, 1))
p_do = tl.make_block_ptr(do + i_bh * s_v_h, (V, T), (s_v_d, s_v_t),
(i_v * BV, i), (BV, BTS), (0, 1))
p_dz = dz + i_bh * T + i + tl.arange(0, BTS)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype)
b_dz = tl.load(p_dz, mask=i + tl.arange(0, BTS) < T)
m_s = o_k[:, None] <= o_q[None, :]
b_s = tl.dot(b_k, b_q, allow_tf32=False) * scale
b_s2 = 1 + b_s + 0.5 * b_s * b_s
b_s = tl.where(m_s, b_s, 0)
b_s2 = tl.where(m_s, b_s2, 0)
b_ds = tl.dot(b_v, b_do, allow_tf32=False)
if i_v == 0:
b_ds += b_dz[None, :]
else:
b_ds = b_ds
b_ds = tl.where(m_s, b_ds, 0) * scale
b_dv += tl.dot(b_s2.to(b_q.dtype), tl.trans(b_do), allow_tf32=False)
b_dk += tl.dot((b_ds + b_ds * b_s).to(b_q.dtype), tl.trans(b_q),
allow_tf32=False)
o_q += BTS
p_dk = tl.make_block_ptr(dk + (i_bh + B * H * i_v) * s_k_h, (T, K), (
s_k_t, s_k_d), (i_c * BTL, i_k * BK), (BTL, BK), (1, 0))
p_dv = tl.make_block_ptr(dv + (i_bh + B * H * i_k) * s_v_h, (T, V), (
s_v_t, s_v_d), (i_c * BTL, i_v * BV), (BTL, BV), (1, 0))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/based/parallel.py |
86340ed0-7aa9-45cf-8b18-cb8988e9b602 | masks.py | drisspg/transformer_nuggets | transformer_nuggets/flash/masks.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def inverse_causal_mask_triton(score, batch, head, seq_len_q, seq_len_kv):
score = tl.where(seq_len_q > seq_len_kv, float('-inf'), score)
return score
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Low Latency"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py |
aed79a3c-9b2f-4cb8-809d-b5eb89f04608 | associative_rnn_scan.py | TushaarGVS/linear-rnn | linear_rnn/triton/associative_rnn_scan.py | 48320589b73154484be7d09a144923a2b9e56b85 | 0 | @triton.jit
def _associative_rnn_scan_fwd_kernel(x_ptr, a_ptr, cum_a_ptr, out_ptr,
stride_x_batch, stride_x_len, stride_x_dim, stride_a_batch,
stride_a_len, stride_a_dim, stride_out_batch, stride_out_len,
stride_out_dim, stride_cum_a_batch, stride_cum_a_len, stride_cum_a_dim,
BLOCK_SIZE_LEN: tl.constexpr, BLOCK_SIZE_DIM: tl.constexpr):
pid_batch = tl.program_id(0)
pid_len = tl.program_id(1)
pid_dim = tl.program_id(2)
x_ptr += pid_batch * stride_x_batch
a_ptr += pid_batch * stride_a_batch
if cum_a_ptr is not None:
cum_a_ptr += pid_batch * stride_cum_a_batch
out_ptr += pid_batch * stride_out_batch
offsets_dim = pid_dim * BLOCK_SIZE_DIM + tl.arange(0, BLOCK_SIZE_DIM)
offsets_len = pid_len * BLOCK_SIZE_LEN + tl.arange(0, BLOCK_SIZE_LEN)
x_ptrs = x_ptr + offsets_dim[None, :] * stride_x_dim + offsets_len[:, None
] * stride_x_len
a_ptrs = a_ptr + offsets_dim[None, :] * stride_a_dim + offsets_len[:, None
] * stride_a_len
out_ptrs = out_ptr + offsets_dim[None, :] * stride_out_dim + offsets_len[
:, None] * stride_out_len
if cum_a_ptr is not None:
cum_a_ptrs = cum_a_ptr + offsets_dim[None, :
] * stride_cum_a_dim + offsets_len[:, None] * stride_cum_a_len
x = tl.load(x_ptrs).to(tl.float32)
a = tl.load(a_ptrs).to(tl.float32)
cum_a, all_hiddens = tl.associative_scan(input=(a, x), axis=0,
combine_fn=_associative_scan_op)
mask = (offsets_len == (pid_len + 1) * BLOCK_SIZE_LEN - 1)[:, None]
if cum_a_ptr is not None:
tl.store(cum_a_ptrs, cum_a.to(cum_a_ptr.dtype.element_ty), mask=mask)
tl.store(out_ptrs, all_hiddens.to(out_ptr.dtype.element_ty), mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/associative_rnn_scan.py |
4df492d6-8632-47e9-80d8-2308db2c2a20 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
2e8a3dc0-e0ec-484c-ad3a-59cdc79b11ae | sgmv_shrink.py | IBM/vllm | vllm/lora/ops/sgmv_shrink.py | 99523dd62be2ecf6c6db15e8133aaaf7855e7e86 | 0 | @triton.jit
def _sgmv_shrink_kernel(input_ptr, lora_ptr, out_ptr, N, K, b_seq_start_loc,
seq_lens, lora_indices, scaling, xm_stride, xk_stride, l0_stride,
lora_k_stride, lora_n_stride, cm_stride, cn_stride, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, EVEN_K: tl.
constexpr, SPLIT_K: tl.constexpr):
"""
The sgmv's shrink triton kernel is based on GroupGEMM+SPLIT-K.
The GEMM of Multi-LoRA can be considered as GroupGEMM. Additionally,
introducing SPLIT-K can improve performance
"""
pid = tl.program_id(axis=0)
pid_sk = tl.program_id(axis=1)
cur_batch = tl.program_id(axis=2)
cta_n_num = tl.cdiv(N, BLOCK_N)
pid_m = pid // cta_n_num
pid_n = pid % cta_n_num
M = tl.load(seq_lens + cur_batch)
if pid_m * BLOCK_M > M:
return
lora_index = tl.load(lora_indices + cur_batch)
if lora_index == -1:
return
cur_seq_start = tl.load(b_seq_start_loc + cur_batch)
offset_m = tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_n = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
offset_k = pid_sk * BLOCK_K + tl.arange(0, BLOCK_K)
ram = tl.max_contiguous(tl.multiple_of(offset_m % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N)
a_ptr = input_ptr + cur_seq_start * xm_stride + ram[:, None
] * xm_stride + offset_k[None, :] * xk_stride
b_ptr = lora_ptr + l0_stride * lora_index + rbn[None, :
] * lora_k_stride + offset_k[:, None] * lora_n_stride
accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
if EVEN_K:
tiled_a = tl.load(a_ptr)
tiled_b = tl.load(b_ptr)
else:
k_remaining = K - k * (BLOCK_K * SPLIT_K)
tiled_a = tl.load(a_ptr, mask=offset_k[None, :] < k_remaining,
other=0.0)
tiled_b = tl.load(b_ptr, mask=offset_k[:, None] < k_remaining,
other=0.0)
accumulator += tl.dot(tiled_a, tiled_b)
a_ptr += BLOCK_K * SPLIT_K * xk_stride
b_ptr += BLOCK_K * SPLIT_K * lora_n_stride
offset_cm = cur_seq_start + tl.arange(0, BLOCK_M) + pid_m * BLOCK_M
offset_cn = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
c_ptr = out_ptr + offset_cm[:, None] * cm_stride + offset_cn[None, :
] * cn_stride
c_mask = (offset_cm[:, None] < cur_seq_start + M) & (offset_cn[None, :] < N
)
accumulator *= scaling
if SPLIT_K == 1:
tl.store(c_ptr, accumulator, mask=c_mask)
else:
tl.atomic_add(c_ptr, accumulator, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/sgmv_shrink.py |
b6a99fcb-ac7f-4a0f-ab1a-1f9af95e6c52 | sparse_linear.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_linear.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.autotune(configs=autotune_configs, key=['col_dim',
'inner_sparse_dim', 'sparse_dim'])
@triton.jit
def input_inner_sparse_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr,
expert_ends_ptr, row_dim: tl.constexpr, col_dim: tl.constexpr,
inner_sparse_dim: tl.constexpr, sparse_dim: tl.constexpr,
padded_sparse_dim: tl.constexpr, lhs_stride_row: tl.constexpr,
lhs_stride_inner: tl.constexpr, rhs_stride_inner: tl.constexpr,
rhs_stride_col: tl.constexpr, out_stride_row: tl.constexpr,
out_stride_col: tl.constexpr, accumulate: tl.constexpr, block_size_row:
tl.constexpr, block_size_col: tl.constexpr, block_size_inner: tl.
constexpr, group_size_row: tl.constexpr):
tl.static_assert(row_dim % block_size_row == 0)
tl.static_assert(col_dim % block_size_col == 0)
tl.static_assert(inner_sparse_dim % block_size_inner == 0)
pid_row, pid_col = tl.swizzle2d(tl.program_id(axis=0), tl.program_id(
axis=1), row_dim // block_size_row, col_dim // block_size_col,
group_size_row)
row_offset = pid_row * block_size_row
sparse_range = tl.arange(0, padded_sparse_dim)
expert_ends = tl.load(expert_ends_ptr + sparse_range, mask=sparse_range <
sparse_dim, other=row_dim)
sparse_index = tl.sum((expert_ends <= row_offset).to(tl.int64))
if sparse_index == sparse_dim:
return
inner_dense_offset = sparse_index * inner_sparse_dim
col_offset = pid_col * block_size_col
row_range = tl.arange(0, block_size_row)[:, None]
col_range = tl.arange(0, block_size_col)[None, :]
inner_range = tl.arange(0, block_size_inner)
lhs_ptr += (row_offset + row_range) * lhs_stride_row + inner_range[None, :
] * lhs_stride_inner
rhs_ptr += (inner_dense_offset + inner_range[:, None]
) * rhs_stride_inner + (col_offset + col_range) * rhs_stride_col
out_ptr += (row_offset + row_range) * out_stride_row + (col_offset +
col_range) * out_stride_col
out = tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr), out_dtype=tl.float32)
for k in range(1, inner_sparse_dim // block_size_inner):
lhs_ptr += block_size_inner * lhs_stride_inner
rhs_ptr += block_size_inner * rhs_stride_inner
out += tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr))
if accumulate:
out += tl.load(out_ptr)
tl.store(out_ptr, out)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Coalesced",
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py |
7118da2e-e1df-45f5-99f0-a46403452599 | gemm2.py | vedantroy/awq | examples/gemm2.py | a0e638f269862a78da4ea6a7f4c08bc54486018e | 0 | @triton.jit
def matmul_kernel_simple(a_ptr, qw_ptr, c_ptr, scales_ptr, zeros_ptr,
dbg_qwpacked_ptr, dbg_qwunpacked_ptr, dbg_dequant_ptr, dbg_scales_ptr,
dbg_unpacked_zeros_ptr, dbg_to_add_ptr, M, N, K, BLOCK_SIZE_M: tl.
constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, QUANT_GROUP_SIZE: tl.constexpr):
"""Kernel for computing the matmul C = A x qw (qweights). """
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
qw_shifter = offs_k % 8 * 4
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, 1):
a_offs = k * BLOCK_SIZE_K + (offs_am[:, None] * K + offs_k[None, :])
a = tl.load(a_ptr + a_offs, mask=offs_k[None, :] < K - k *
BLOCK_SIZE_K, other=0.0)
qw_offs = (k * BLOCK_SIZE_K + offs_k[:, None]) // 8 * N + offs_bn[
None, :]
qw_packed = tl.load(qw_ptr + qw_offs)
if pid == 0 and k == 0:
k_x_n = tl.arange(0, BLOCK_SIZE_K)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
tl.store(dbg_qwpacked_ptr + k_x_n, qw_packed)
qw_unpacked = qw_packed >> qw_shifter[:, None] & 15
if pid == 0 and k == 0:
k_x_n = tl.arange(0, BLOCK_SIZE_K)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
tl.store(dbg_qwunpacked_ptr + k_x_n, qw_unpacked)
k_iters_per_quant_group = QUANT_GROUP_SIZE // BLOCK_SIZE_K
grp_idx = k // k_iters_per_quant_group
grp_row_off = N * grp_idx
col_offs = offs_bn
scales = tl.load(scales_ptr + grp_row_off + col_offs)
if pid == 0 and k == 0:
tl.store(dbg_scales_ptr + tl.arange(0, BLOCK_SIZE_N), scales)
zeros_row_off = grp_row_off // 8
idx_within_packed = grp_idx % 8
packed_zeros = tl.load(zeros_ptr + zeros_row_off + col_offs)
unpacked_zeros = packed_zeros >> idx_within_packed * 4 & 15
if pid == 0 and k == 0:
tl.store(dbg_unpacked_zeros_ptr + tl.arange(0, BLOCK_SIZE_N),
unpacked_zeros)
dequantized = scales[None, :].to(tl.float32) * (qw_unpacked.to(tl.
float32) - unpacked_zeros[None, :].to(tl.float32))
if pid == 0 and k == 0:
k_x_n = tl.arange(0, BLOCK_SIZE_K)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
tl.store(dbg_dequant_ptr + k_x_n, dequantized)
to_add = tl.dot(a, dequantized.to(tl.float16))
if pid == 0 and k == 0:
m_x_n = tl.arange(0, BLOCK_SIZE_M)[:, None
] * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
tl.store(dbg_to_add_ptr + m_x_n, to_add)
accumulator += to_add
c = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
stride_cm = N
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/vedantroy/awq/blob/a0e638f269862a78da4ea6a7f4c08bc54486018e/examples/gemm2.py |
cce543b6-f8b2-4791-8e56-9a8d72b6369f | dequant_kernel.py | drisspg/transformer_nuggets | transformer_nuggets/quant/dequant_kernel.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def dequant_nf4_tensor_kernel(inpt_ptr, output_ptr, quantized_scalers_ptr,
quantization_factor_ptr, scaler_mean_ptr, nf4_lut_ptr,
scaler_block_size: tl.constexpr, XBLOCK: tl.constexpr):
"""Dequantizes a tensor from nf4 to bfloat16"""
offset = tl.program_id(0) * XBLOCK
index = offset + tl.arange(0, XBLOCK)[:]
index = tl.max_contiguous(tl.multiple_of(index, XBLOCK), XBLOCK)
inpt = tl.load(inpt_ptr + index)
first_elements = inpt >> 4
second_elements = inpt & 15
dequantized_first = dequantize(first_elements, nf4_lut_ptr)
dequantized_second = dequantize(second_elements, nf4_lut_ptr)
block_scaler = dequantize_scalers(quantized_scalers_ptr,
quantization_factor_ptr, scaler_mean_ptr, XBLOCK, scaler_block_size)
scaled_first = dequantized_first * block_scaler
scaled_second = dequantized_second * block_scaler
store_indices = offset * 2 + tl.arange(0, XBLOCK * 2)[:]
interleaved = tl.interleave(scaled_first, scaled_second)
tl.store(output_ptr + store_indices, interleaved)
| {
"Data Type": [
"int8",
"bf16"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/quant/dequant_kernel.py |
75190c07-be32-4efd-b8f2-d8a6bee1648a | quantize.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/quantize.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def _kernel_quantize_mx4(A, out, rand_bits, M, K, GROUPS_PER_ROW,
GROUPS_PER_THREAD, ROW_PADDING, GROUP_SIZE: tl.constexpr, EBITS: tl.
constexpr, MBITS: tl.constexpr, ROUNDING_MODE: tl.constexpr,
STOCHASTIC_CASTING: tl.constexpr, FP4_EXP_BIAS: tl.constexpr,
GROUP_LOAD: tl.constexpr, USE_INT64: tl.constexpr) ->None:
"""Quantize a 1D float tensor into a packed MX4 tensor.
Args:
A (Tensor): [M] float tensor to be quantized.
out (Tensor): [M / 2 + M / GROUP_SIZE] output containing packed mx4 values.
rand_bits (Optional Tensor): [M, K / 2] random integers used for stochastic rounding.
M (int): Number of input rows.
K (int): Number of input columns.
GROUPS_PER_ROW (int): Number of groups in each row of the input.
GROUPS_PER_THREAD (int): Number of groups to process per thread.
ROW_PADDING (int): Number of elements of padding to insert into each row.
GROUP_SIZE (int): Size of chunks that use the same shared exponent.
EBITS (int): Number of exponent bits in target mx4 format.
MBITS (int): Number of mantissa bits in target mx4 format.
ROUNDING_MODE (int): Which rounding method to use when calculating shared exponent.
STOCHASTIC_CASTING (bool): Whether to use stochastic rounding when downcasting.
FP4_EXP_BIAS (int): Exponent bias of target mx4 format.
GROUP_LOAD (int): Number of groups to process simultaneously.
USE_INT64 (bool): Whether to use int64 for indexing. This is needed for large tensors.
"""
FP32_EXP_MASK: tl.constexpr = 2139095040
FP32_EXP_OFFSET: tl.constexpr = 23
FP32_EXP_BIAS: tl.constexpr = 127
FP32_SIGN_OFFSET: tl.constexpr = 31
SIGN_MASK: tl.constexpr = 1
FP32_MANTISSA_MASK: tl.constexpr = 8388607
MBITS_IMPLICIT: tl.constexpr = MBITS + 1
MAX_FP32_MANTISSA_BITS: tl.constexpr = 24
IMPLIED_1_BIT: tl.constexpr = 1 << 23
FP32_MIN_NORMAL: tl.constexpr = 2 ** -126
MANTISSA_OVERFLOW_THRESHOLD: tl.constexpr = (1 << MBITS_IMPLICIT) - 1
EXPONENT_OVERFLOW_THRESHOLD: tl.constexpr = (1 << EBITS) - 1
IMPLICIT_1_MASK = (1 << MBITS_IMPLICIT - 1) - 1
RAND_MASK: tl.constexpr = (1 << FP32_EXP_OFFSET - MBITS) - 1
pid = tl.program_id(0)
if USE_INT64:
pid = pid.to(tl.int64)
M = tl.cast(M, tl.int64)
K = tl.cast(K, tl.int64)
GROUPS_PER_THREAD = tl.cast(GROUPS_PER_THREAD, tl.int64)
PACKED_GROUP_SIZE: tl.constexpr = GROUP_SIZE // 2 + 1
NUM_GROUPS = M * GROUPS_PER_ROW
OUTPUT_CHUNK_SIZE = GROUPS_PER_THREAD * GROUP_SIZE // 2 + GROUPS_PER_THREAD
OUTPUT_SIZE = GROUP_SIZE * NUM_GROUPS // 2 + NUM_GROUPS
input_start = pid * (GROUPS_PER_THREAD * GROUP_SIZE)
output_start = pid * OUTPUT_CHUNK_SIZE
exp_start = output_start + GROUP_SIZE // 2
input_offset = tl.arange(0, GROUP_LOAD * GROUP_SIZE) + input_start
output_offset = tl.arange(0, GROUP_LOAD * (GROUP_SIZE // 2))
if ROUNDING_MODE == 3:
rand_bits_offset = tl.arange(0, GROUP_LOAD) + pid * GROUPS_PER_THREAD
else:
rand_bits_offset = pid * GROUPS_PER_THREAD
output_offset += output_offset // (GROUP_SIZE // 2) + output_start
exp_offset = tl.arange(0, GROUP_LOAD) * PACKED_GROUP_SIZE + exp_start
for _k in range(0, tl.cdiv(GROUPS_PER_THREAD, GROUP_LOAD)):
pad_mask = input_offset % (GROUPS_PER_ROW * GROUP_SIZE) < K
if ROW_PADDING != 0:
padded_input_offset = input_offset - input_offset // (
GROUPS_PER_ROW * GROUP_SIZE) * ROW_PADDING
else:
padded_input_offset = input_offset
a = tl.load(A + padded_input_offset, mask=(padded_input_offset < M *
K) & (padded_input_offset < (pid + 1) * GROUPS_PER_THREAD *
GROUP_SIZE) & pad_mask, other=0)
a_groups = tl.reshape(a, [GROUP_LOAD, GROUP_SIZE])
group_max = tl.max(tl.abs(a_groups), axis=1)
group_max = tl.where(group_max == 0, FP32_MIN_NORMAL, group_max)
group_rand_bits = None
if ROUNDING_MODE == 3 or STOCHASTIC_CASTING:
group_rand_bits = tl.load(rand_bits + rand_bits_offset, mask=
rand_bits_offset < K // GROUP_SIZE, other=0)
rand_bits_offset += GROUP_LOAD
group_exp = _compute_exp(group_max, ROUNDING_MODE, group_rand_bits,
MBITS)
group_exp = group_exp - EBITS
group_exp = tl.clamp(group_exp, -127, 125)
scale = tl.exp2(group_exp.to(tl.float64)).to(tl.float32)
scaled_a = tl.reshape(a, [GROUP_LOAD, GROUP_SIZE]) / tl.reshape(scale,
[GROUP_LOAD, 1])
scaled_a = tl.reshape(scaled_a, [GROUP_LOAD * GROUP_SIZE])
tl.store(out + exp_offset, (group_exp + FP32_EXP_BIAS).to(tl.int8),
mask=(exp_offset < OUTPUT_SIZE) & (exp_offset <
OUTPUT_CHUNK_SIZE * (pid + 1)))
scaled_a = scaled_a.to(tl.int32, bitcast=True)
if STOCHASTIC_CASTING:
philox_4x_offset = tl.split(tl.reshape(input_offset, [
GROUP_LOAD * GROUP_SIZE // 2, 2], can_reorder=True))
philox_4x_offset = tl.split(tl.reshape(philox_4x_offset, [
GROUP_LOAD * GROUP_SIZE // 4, 2], can_reorder=True))
a_4x, b_4x, c_4x, d_4x = tl.randint4x(group_rand_bits,
philox_4x_offset, n_rounds=7)
stochastic_round_bits = tl.join(tl.join(a_4x, b_4x), tl.join(
c_4x, d_4x))
stochastic_round_bits = tl.reshape(stochastic_round_bits, [
GROUP_LOAD * GROUP_SIZE]).to(tl.int32, bitcast=True)
scaled_a = scaled_a + (stochastic_round_bits & RAND_MASK)
sign_bit = scaled_a >> FP32_SIGN_OFFSET & SIGN_MASK
biased_exp = (scaled_a & FP32_EXP_MASK) >> FP32_EXP_OFFSET
trailing_mantissa = scaled_a & FP32_MANTISSA_MASK
new_biased_exp = biased_exp - FP32_EXP_BIAS + FP4_EXP_BIAS
exp_diff = tl.where(new_biased_exp <= 0, 1 - new_biased_exp, 0)
exp_diff = tl.minimum(exp_diff, MAX_FP32_MANTISSA_BITS)
is_subnorm = biased_exp == 0
mantissa = tl.where(is_subnorm, trailing_mantissa,
trailing_mantissa + IMPLIED_1_BIT)
fp32_sig_bits = tl.where(is_subnorm, 23, 24).to(tl.int32)
mantissa = mantissa >> fp32_sig_bits + exp_diff - MBITS_IMPLICIT - 1
mantissa = mantissa + 1 >> 1
overflow = mantissa > MANTISSA_OVERFLOW_THRESHOLD
mantissa = tl.where(overflow and not is_subnorm, mantissa >> 1,
mantissa)
new_biased_exp = tl.where(new_biased_exp <= 0 and mantissa == 2, 1,
new_biased_exp)
mantissa = mantissa & IMPLICIT_1_MASK
new_biased_exp = tl.where(overflow, new_biased_exp + 1, new_biased_exp)
mantissa = tl.where(new_biased_exp > EXPONENT_OVERFLOW_THRESHOLD, 1,
mantissa)
new_biased_exp = tl.maximum(tl.minimum(new_biased_exp,
EXPONENT_OVERFLOW_THRESHOLD), 0)
mx4_value = new_biased_exp << MBITS_IMPLICIT - 1 | mantissa
mx4_value = sign_bit << EBITS + MBITS | mx4_value
low_mx4, high_mx4 = tl.split(tl.reshape(mx4_value, [GROUP_LOAD *
GROUP_SIZE // 2, 2]))
packed_mx4 = (high_mx4 << 4 | low_mx4).to(tl.int8)
tl.store(out + output_offset, packed_mx4, mask=(output_offset <
OUTPUT_SIZE) & (output_offset < OUTPUT_CHUNK_SIZE * (pid + 1)))
input_offset += GROUP_LOAD * GROUP_SIZE
exp_offset += GROUP_LOAD * PACKED_GROUP_SIZE
output_offset += GROUP_LOAD * PACKED_GROUP_SIZE
| {
"Data Type": [
"int8"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/quantize.py |
e433ab40-e94b-424b-8781-6a02f5a372a2 | dx.py | Forkxz/TritonDeepLearningKernel | kernel/dropconnect/dx.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.jit
def dropconnect_dx_kernel(dy_ptr, w_ptr, dx_ptr, seed, M, K, N, stride_dym,
stride_dyn, stride_wk, stride_wn, stride_dm, stride_dk, stride_dn,
stride_xm, stride_xk, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, ALLOWTF32: tl.constexpr):
"""
dY_m = Y.grad
dO_m = dY_m.view(M,1,N).broadcast_to(M,K,N)
dx_m_cast = dO_m * WD_m
dx_m = dx_m_cast.sum(dim=2) """
pid_m = tl.program_id(0)
pid_k = tl.program_id(1)
offset_m = pid_m * BLOCK_SIZE_M
offset_n = 0
offset_k = pid_k * BLOCK_SIZE_K
dy_offsets = block_offsets_2d(M, N, stride_dym, stride_dyn, offset_m,
offset_n, BLOCK_SIZE_M, BLOCK_SIZE_N)
w_offsets = block_offsets_2d(K, N, stride_wk, stride_wn, offset_k,
offset_n, BLOCK_SIZE_K, BLOCK_SIZE_N)
d_offsets = block_offsets_3d(M, K, N, stride_dm, stride_dk, stride_dn,
offset_m, offset_k, offset_n, BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N)
dy_offsets = dy_offsets.reshape(BLOCK_SIZE_M, 1, BLOCK_SIZE_N)
w_offsets = w_offsets.reshape(1, BLOCK_SIZE_K, BLOCK_SIZE_N)
offs_n = tl.arange(0, BLOCK_SIZE_N)
dy_tile = dy_ptr + dy_offsets
w_tile = w_ptr + w_offsets
ASM: tl.constexpr = 'cvt.rna.tf32.f32 $0, $1;'
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for n in range(0, tl.cdiv(N, BLOCK_SIZE_N)):
random_masks = tl.random.rand(seed, d_offsets) > 0.5
n_mask = offs_n[None, None, :] < N - n * BLOCK_SIZE_N
dy_load = tl.load(dy_tile, mask=n_mask, other=0.0)
w_load = tl.load(w_tile, mask=n_mask, other=0.0)
dy = tl.where(random_masks, dy_load, 0.0)
wd = tl.where(random_masks, w_load, 0.0)
mul = dy * wd
accumulator += tl.sum(mul, axis=2)
dy_tile += BLOCK_SIZE_N * stride_dyn
w_tile += BLOCK_SIZE_N * stride_wn
d_offsets += BLOCK_SIZE_N * stride_dn
dx_offset, dx_mask = block_offsets_2d(M, K, stride_xm, stride_xk,
offset_m, offset_k, BLOCK_SIZE_M, BLOCK_SIZE_K, True)
dx_tile = dx_ptr + dx_offset
dx = accumulator.to(dx_tile.dtype.element_ty)
tl.store(dx_tile, dx, mask=dx_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/dx.py |
65eed763-e3c9-4f09-8b89-130070dd79d3 | sized_tuned_bwd.py | ROCm/aotriton | tritonsrc/sized_tuned_bwd.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_BWD_SIZED, key=['BLOCK_DMODEL',
'max_seqlen_q', 'max_seqlen_k'])
@triton.jit
def sized_tuned_bwd_kernel_dq(Q, K, V, B, sm_scale, Out, DO, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dqz, stride_dqh, stride_dqm, stride_dqk,
stride_dbz, stride_dbh, stride_dbm, stride_dbn, cu_seqlens_q,
cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k, head_dim,
dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.
constexpr):
bare_bwd_kernel_dq(Q, K, V, B, sm_scale, Out, DO, DQ, DB, L, D,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_bz, stride_bh, stride_bm, stride_bn, stride_oz, stride_oh,
stride_om, stride_ok, stride_dqz, stride_dqh, stride_dqm,
stride_dqk, stride_dbz, stride_dbh, stride_dbm, stride_dbn,
cu_seqlens_q, cu_seqlens_k, num_seqlens, max_seqlen_q, max_seqlen_k,
head_dim, dropout_p, philox_seed, philox_offset_base, BLOCK_M,
BLOCK_DMODEL, BLOCK_N, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD=
PADDED_HEAD, BIAS_TYPE=BIAS_TYPE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/sized_tuned_bwd.py |
29eb6850-1766-413b-8e3d-01a3060c34f7 | chunk_h.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0'
] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not
None, 'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for
num_warps in [1, 2, 4, 8] for num_stages in [2, 3, 4]], key=['BT',
'USE_G', 'USE_GK', 'USE_GV'])
@triton.jit
def chunk_bwd_kernel_dh(q, g, gk, gv, do, dh, dht, dh0, offsets,
chunk_offsets, scale, T: tl.constexpr, HQ: tl.constexpr, H: tl.
constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, NG: tl.constexpr, USE_G: tl.constexpr,
USE_GK: tl.constexpr, USE_GV: tl.constexpr,
STORE_INITIAL_STATE_GRADIENT: tl.constexpr, USE_FINAL_STATE_GRADIENT:
tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_nh // NG
i_n, i_hq = i_nh // HQ, i_nh % HQ
i_h = i_hq // NG
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dht, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT - 1, -1, -1):
if HEAD_FIRST:
p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
last_idx = min(i_t * BT + BT, T) - 1
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * HQ + i_hq) * K, (K, T), (1,
HQ * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_do = tl.make_block_ptr(do + (bos * HQ + i_hq) * V, (T, V), (
HQ * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_do = tl.load(p_do, boundary_check=(0, 1))
if USE_G:
if HEAD_FIRST:
p_g = g + i_bg * T + i_t * BT + tl.arange(0, BT)
p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT)
b_g_last = tl.load(g + i_bg * T + last_idx)
else:
p_g = g + (bos + i_t * BT + tl.arange(0, BT)) * H + i_h
b_g_last = tl.load(g + (bos + last_idx) * H + i_h)
b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0)
b_q = (b_q * tl.exp(b_g)[None, :]).to(b_q.dtype)
b_dh *= tl.exp(b_g_last)
if USE_GK:
if HEAD_FIRST:
p_gk = tl.make_block_ptr(gk + i_bg * T * K, (K, T), (1, K),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = gk + (i_bg * T + last_idx
) * K + i_k * BK + tl.arange(0, BK)
else:
p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T),
(1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = gk + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK)
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_q = (b_q * tl.exp(b_gk)).to(b_q.dtype)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) <
K, other=0.0)
b_dh *= tl.exp(b_gk_last)[:, None]
if USE_GV:
if HEAD_FIRST:
p_gv = tl.make_block_ptr(gv + i_bg * T * V, (T, V), (V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_gv_last = gv + (i_bg * T + last_idx
) * V + i_v * BV + tl.arange(0, BV)
else:
p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V),
(H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_gv_last = gv + (bos + last_idx
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV)
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_do = (b_do * tl.exp(b_gv)).to(b_do.dtype)
b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) <
V, other=0.0)
b_dh *= tl.exp(b_gv_last)[None, :]
b_dh += tl.dot(b_q, b_do)
if STORE_INITIAL_STATE_GRADIENT:
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h.py |
1e1944d8-6b04-41df-b0ee-4bdf17a83a50 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv4/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_recurrent_rwkv4_backward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr,
k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b,
state_s_abe, state_s_t, state_s_c, gwkv_ptr, gwkv_s_b, gwkv_s_t,
gwkv_s_c, gstate_out_ptr, gstate_out_s_b, gstate_out_s_abe,
gstate_out_s_c, gw_ptr, gw_s_c, gu_ptr, gu_s_c, gk_ptr, gk_s_b, gk_s_t,
gk_s_c, gv_ptr, gv_s_b, gv_s_t, gv_s_c, gstate_ptr, gstate_s_b,
gstate_s_abe, gstate_s_c, tsz, chans, BLOCK_SIZE_C: tl.constexpr):
b_idx = tl.program_id(0)
c_idx = tl.program_id(1)
cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C)
cmask = cs < chans
k_ptr = k_ptr + b_idx * k_s_b
v_ptr = v_ptr + b_idx * v_s_b
alpha_ptr = state_ptr + b_idx * state_s_b
beta_ptr = state_ptr + b_idx * state_s_b + state_s_abe
eps_ptr = state_ptr + b_idx * state_s_b + 2 * state_s_abe
gk_ptr = gk_ptr + b_idx * gk_s_b
gv_ptr = gv_ptr + b_idx * gv_s_b
gwkv_ptr = gwkv_ptr + b_idx * gwkv_s_b
galpha_out_ptr = gstate_out_ptr + b_idx * gstate_out_s_b
gbeta_out_ptr = gstate_out_ptr + b_idx * gstate_out_s_b + gstate_out_s_abe
geps_out_ptr = (gstate_out_ptr + b_idx * gstate_out_s_b + 2 *
gstate_out_s_abe)
galpha = tl.load(galpha_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl
.float32)
gbeta = tl.load(gbeta_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl.
float32)
geps = tl.load(geps_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl.
float32)
w = tl.load(w_ptr + w_s_c * cs, mask=cmask).to(tl.float32)
u = tl.load(u_ptr + u_s_c * cs, mask=cmask).to(tl.float32)
gw = tl.zeros_like(w)
gu = tl.zeros_like(u)
alpha_prev = tl.load(alpha_ptr + tsz * state_s_t + state_s_c * cs, mask
=cmask).to(tl.float32)
beta_prev = tl.load(beta_ptr + tsz * state_s_t + state_s_c * cs, mask=cmask
).to(tl.float32)
eps_prev = tl.load(eps_ptr + tsz * state_s_t + state_s_c * cs, mask=cmask
).to(tl.float32)
for t in range(tsz):
tc = tsz - t - 1
kt = tl.load(k_ptr + tc * k_s_t + k_s_c * cs, mask=cmask).to(tl.float32
)
vt = tl.load(v_ptr + tc * v_s_t + v_s_c * cs, mask=cmask).to(tl.float32
)
alpha_curr = alpha_prev
beta_curr = beta_prev
eps_curr = eps_prev
alpha_prev = tl.load(alpha_ptr + tc * state_s_t + state_s_c * cs,
mask=cmask).to(tl.float32)
beta_prev = tl.load(beta_ptr + tc * state_s_t + state_s_c * cs,
mask=cmask).to(tl.float32)
eps_prev = tl.load(eps_ptr + tc * state_s_t + state_s_c * cs, mask=
cmask).to(tl.float32)
ukt = u + kt
tau = tl.maximum(ukt, eps_prev)
e1 = tl.exp(eps_prev - tau)
e2 = tl.exp(ukt - tau)
euke = tl.exp(ukt + eps_prev - 2 * tau)
denom = e1 * beta_prev + e2
denom_sq = denom * denom
gwkvt = tl.load(gwkv_ptr + tc * gwkv_s_t + gwkv_s_c * cs, mask=cmask
).to(tl.float32)
guk = gwkvt * e2 * (e1 * beta_prev * vt - e1 * alpha_prev) / denom_sq
gu += guk
gk = guk
gv = gwkvt * e2 / denom
galpha_wkv = gwkvt * e1 / denom
gbeta_wkv = -gwkvt * e1 * (e2 * vt + e1 * alpha_prev) / denom_sq
geps_wkv_denom = e1 * beta_prev + e2
geps_wkv = gwkvt * euke * (alpha_prev - vt * beta_prev) / (
geps_wkv_denom * geps_wkv_denom)
e1 = tl.exp(w + eps_prev - eps_curr)
e2 = tl.exp(kt - eps_curr)
galpha_we = galpha * e1 * alpha_prev
gw += galpha_we
gk += galpha * e2 * vt
gv += galpha * e2
geps += galpha * -alpha_curr
gbeta_we = gbeta * e1 * beta_prev
gw += gbeta_we
gk += gbeta * e2
geps += gbeta * -beta_curr
geps_mask = w + eps_prev > kt
geps_we = tl.where(geps_mask, geps, tl.zeros_like(geps))
gw += geps_we
gk += tl.where(geps_mask, tl.zeros_like(geps), geps)
tl.store(gk_ptr + tc * gk_s_t + gk_s_c * cs, gk, mask=cmask)
tl.store(gv_ptr + tc * gv_s_t + gv_s_c * cs, gv, mask=cmask)
galpha = galpha * e1 + galpha_wkv
gbeta = gbeta * e1 + gbeta_wkv
geps = galpha_we + gbeta_we + geps_we + geps_wkv
galpha_ptr = gstate_ptr + b_idx * gstate_s_b
gbeta_ptr = gstate_ptr + b_idx * gstate_s_b + gstate_s_abe
geps_ptr = gstate_ptr + b_idx * gstate_s_b + 2 * gstate_s_abe
tl.store(galpha_ptr + gstate_s_c * cs, galpha, mask=cmask)
tl.store(gbeta_ptr + gstate_s_c * cs, gbeta, mask=cmask)
tl.store(geps_ptr + gstate_s_c * cs, geps, mask=cmask)
gw_temp = tl.load(gw_ptr + gw_s_c * cs, mask=cmask).to(tl.float32)
gw_temp += gw
tl.store(gw_ptr + gw_s_c * cs, gw_temp, mask=cmask)
gu_temp = tl.load(gu_ptr + gu_s_c * cs, mask=cmask).to(tl.float32)
gu_temp += gu
tl.store(gu_ptr + gu_s_c * cs, gu_temp, mask=cmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv4/fused_recurrent.py |
4a398122-1442-4248-8842-7e8a278b8424 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/hgrn/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({'BD': 32}, num_warps=1), triton.
Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64},
num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({
'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton
.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps
=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128},
num_warps=8)], key=['D'])
@triton.jit
def chunk_hgrn_fwd_kernel_h(x, g, gc, o, h0, T: tl.constexpr, D: tl.
constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.
constexpr):
i_d, i_t, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_b * T * D + i_t * BT * D + o_d
p_g = g + i_b * T * D + i_t * BT * D + o_d
p_gc = gc + i_b * T * D + i_t * BT * D + o_d
p_o = o + i_b * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_b * D + o_d, mask=mask, other=0).to(tl.
float32)
for i in range(0, BT):
mask_t = mask & (i_t * BT + i < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/chunk.py |
67bfb3db-8a20-4dc2-925e-39148ea3e6d9 | rms_norm.py | dame-cell/Triformer | triformer/rms_norm.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE': 128, 'NUM_WARPS': 4}
), triton.Config({'BLOCK_SIZE': 256, 'NUM_WARPS': 8}), triton.Config({
'BLOCK_SIZE': 512, 'NUM_WARPS': 16}), triton.Config({'BLOCK_SIZE': 1024,
'NUM_WARPS': 16}), triton.Config({'BLOCK_SIZE': 2048, 'NUM_WARPS': 32}),
triton.Config({'BLOCK_SIZE': 4096, 'NUM_WARPS': 32}), triton.Config({
'BLOCK_SIZE': 8192, 'NUM_WARPS': 48})], key=['n_cols'])
@triton.jit
def _rms_layernorm_backward(dY, dY_row_stride, X, X_row_stride, W,
W_row_stride, r, r_row_stride, dX, dX_row_stride, dW, n_cols, eps,
BLOCK_SIZE: tl.constexpr, NUM_WARPS: tl.constexpr):
pid = tl.program_id(0)
num_pids = tl.num_programs(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
dY_ptr = dY + pid * dY_row_stride + col_offsets
X_ptr = X + pid * X_row_stride + col_offsets
dX_ptr = dX + pid * dX_row_stride + col_offsets
dY_row = tl.load(dY_ptr, mask=mask, other=0).to(tl.float32)
X_row = tl.load(X_ptr, mask=mask, other=0).to(tl.float32)
W_row = tl.load(W + col_offsets, mask=mask, other=0).to(tl.float32)
rms = tl.load(r + pid).to(tl.float32)
X_norm = X_row * rms
dY_W = dY_row * W_row
sum_dY_X = tl.sum(dY_W * X_norm, axis=0)
dX = rms * (dY_W - X_norm * (sum_dY_X / n_cols))
dW_row = dY_row * X_norm
tl.atomic_add(dW + col_offsets, dW_row, mask=mask)
tl.store(dX_ptr, dX, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/rms_norm.py |
7596edff-211e-475d-872d-74ad640ee13a | inout_tensor.py | gmgu/study-triton | 2_inout_tensor/inout_tensor.py | 3a9a24fd3f1de3e7465535ffe72f6deac8a419bd | 0 | @triton.jit
def copy_kernel(in_ptr, out_ptr, n: tl.constexpr):
offsets = tl.arange(0, n)
x = tl.load(in_ptr + offsets)
y = tl.store(out_ptr + offsets, x)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/2_inout_tensor/inout_tensor.py |
9c7ea44a-b658-498e-bf16-7647e1ef2be1 | cross_entropy.py | ardywibowo/triton-mode | kernels/cross_entropy.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_cross_entropy_backward(input_grad_ptr, input_stride,
grad_output_ptr, num_classes, BLOCK_SIZE: tl.constexpr):
row_id = tl.program_id(0).to(tl.int64)
input_grad_ptr += row_id * input_stride
grad_output = tl.load(grad_output_ptr)
for i in range(0, num_classes, BLOCK_SIZE):
input_offsets = i + tl.arange(0, BLOCK_SIZE)
input_grad_block = tl.load(input_grad_ptr + input_offsets, mask=
input_offsets < num_classes)
tl.store(input_grad_ptr + input_offsets, input_grad_block *
grad_output, mask=input_offsets < num_classes)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/cross_entropy.py |
90672e0c-650f-4d76-8b2b-6f1033c4bc1c | y_9.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_9.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def ninth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size:
tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr,
col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST000 = 1.93163963757558
CONST001 = 2.65478475211798
CONST002 = 1.72771101506082
CONST004 = 1.59908344719522
CONST005 = 6.39633378878088
CONST006 = 6.39633378878088
CONST007 = 8.63855507530412
CONST008 = 9.59450068317133
CONST009 = 4.35889894354067
CONST010 = 10.7269778688696
CONST011 = 10.7269778688696
CONST012 = 6.39633378878088
CONST013 = 15.0007324039945
CONST014 = 13.0937127087774
CONST016 = 14.45506743704
CONST017 = 14.45506743704
CONST018 = 13.3827919767794
CONST019 = 13.5214774630291
CONST020 = 23.8930627690618
CONST021 = 27.0429549260581
CONST022 = 29.2403830344269
CONST023 = 29.2403830344269
CONST024 = 30.001464807989
CONST025 = -480.023436927823
CONST026 = -480.023436927823
CONST029 = 42.9079114754785
CONST030 = -462.562157985281
CONST032 = -967.518168434061
CONST034 = 57.8202697481601
CONST035 = 58.9217071894985
CONST036 = 58.9217071894985
CONST037 = 62.4530292249704
CONST038 = 1081.71819704233
CONST039 = 64.3618672132178
CONST040 = 578.202697481601
CONST044 = 600.029296159779
CONST045 = -936.795438374555
CONST047 = 96.7518168434061
CONST049 = 115.64053949632
CONST051 = -392.811381263323
CONST053 = 137.14955340795
CONST055 = 150.007324039945
CONST056 = -343.263291803828
CONST058 = 11.2632978048796
CONST061 = -315.37233853663
CONST062 = -314.249105010659
CONST063 = 205.957975082297
CONST065 = -294.608535947493
CONST066 = 240.011718463912
CONST068 = 241.879542108515
CONST069 = 255.853351551235
CONST070 = 255.853351551235
CONST071 = -241.879542108515
CONST072 = -240.011718463912
CONST073 = -241.879542108515
CONST074 = 788.430846341574
CONST075 = 1.72771101506082
CONST076 = -1.93163963757558
CONST077 = -1249.06058449941
CONST078 = -223.00191917791
CONST080 = -216.343639408465
CONST081 = 300.01464807989
CONST082 = -204.682681240988
CONST083 = -204.682681240988
CONST084 = -204.682681240988
CONST086 = -196.405690631662
CONST087 = -191.890013663426
CONST088 = -191.890013663427
CONST089 = -187.359087674911
CONST090 = -693.843236977922
CONST091 = 334.502878766866
CONST092 = -176.765121568496
CONST093 = -150.007324039945
CONST094 = -144.5506743704
CONST095 = 374.718175349822
CONST096 = 374.718175349822
CONST097 = -649.030918225395
CONST099 = -630.744677073259
CONST100 = -115.64053949632
CONST101 = -114.421097267943
CONST102 = -115.64053949632
CONST103 = -104.74970167022
CONST104 = 411.915950164594
CONST105 = -95.5722510762473
CONST106 = -90.106382439037
CONST107 = -90.0043944239669
CONST109 = -80.2967518606762
CONST110 = -78.4601809837321
CONST111 = 435.383175795327
CONST112 = -589.217071894985
CONST113 = -78.4601809837321
CONST114 = 435.383175795328
CONST115 = -68.5747767039748
CONST116 = -63.9633378878088
CONST117 = -63.9633378878088
CONST118 = -62.4530292249704
CONST119 = -58.9217071894985
CONST120 = -1081.71819704233
CONST121 = -57.8202697481601
CONST122 = -57.8202697481601
CONST123 = -58.9217071894985
CONST124 = -54.0859098521163
CONST125 = 462.562157985281
CONST127 = -48.3759084217031
CONST128 = -48.375908421703
CONST129 = -38.6327927515116
CONST130 = -30.9062342012093
CONST131 = 483.759084217031
CONST132 = -30.001464807989
CONST133 = -30.001464807989
CONST134 = -27.0429549260581
CONST135 = -24.1879542108515
CONST136 = -24.1879542108515
CONST137 = -1.63671408859718
CONST138 = -15.0007324039945
CONST139 = -13.5214774630291
CONST140 = -13.8216881204866
CONST141 = -13.0937127087774
CONST142 = -13.3827919767794
CONST143 = -9.82028453158308
CONST144 = -4.91014226579154
CONST145 = 511.706703102471
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR01 = VAR07 * VAR07 * VAR07
VAR02 = VAR06 * VAR06
VAR03 = VAR06 * VAR07
VAR04 = VAR07 * VAR07
VAR05 = VAR07 * VAR08
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR10 = VAR16 * VAR16 * VAR16
VAR11 = VAR15 * VAR15
VAR12 = VAR15 * VAR16
VAR13 = VAR16 * VAR16
VAR14 = VAR16 * VAR17
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
VAR19 = VAR25 * VAR25 * VAR25
VAR20 = VAR24 * VAR24
VAR21 = VAR24 * VAR25
VAR22 = VAR25 * VAR25
VAR23 = VAR25 * VAR26
Y00 = (CONST001 * VAR01 + CONST020 * VAR20 * x + CONST078 * VAR07 *
VAR22 + CONST091 * VAR05 * VAR24 + CONST105 * VAR03 * VAR26)
Y01 = y * (-CONST099 * VAR05 * VAR25 + CONST099 * VAR07 * VAR23 +
CONST106 * VAR03 * z - CONST106 * VAR21 * x)
Y02 = CONST000 * VAR01 + VAR03 * (CONST129 * VAR26 + CONST130 * VAR17
) + VAR05 * (CONST021 * VAR24 - CONST097 * VAR17 * VAR26) + VAR07 * (
CONST120 * VAR17 * VAR24 - CONST124 * VAR22) + x * (-CONST080 *
VAR17 * VAR22 + CONST139 * VAR20)
Y03 = VAR16 * (CONST077 * VAR07 * VAR25 + CONST095 * VAR05 * z +
CONST096 * VAR23 * x) + y * (-CONST089 * VAR05 * VAR25 - CONST089 *
VAR07 * VAR23 + CONST109 * VAR03 * z + CONST109 * VAR21 * x)
Y04 = (CONST002 * VAR01 + CONST007 * VAR20 * x + CONST135 * VAR05 *
VAR24 + CONST140 * VAR03 * VAR26 + VAR15 * (CONST032 * VAR07 *
VAR26 + CONST047 * VAR05 + CONST131 * VAR24 * x) + VAR17 * (-
CONST071 * VAR07 * VAR24 + CONST071 * VAR22 * x + CONST111 * VAR05 *
VAR26 + CONST127 * VAR03))
Y05 = VAR14 * (CONST030 * VAR07 * z - CONST030 * VAR25 * x) + VAR16 * (
CONST030 * VAR23 * x + CONST125 * VAR05 * z) + y * (CONST034 *
VAR07 * VAR23 + CONST121 * VAR05 * VAR25 - CONST121 * VAR21 * x +
CONST122 * VAR03 * z)
Y06 = CONST119 * VAR03 * VAR17 - CONST137 * VAR01 + VAR05 * (CONST035 *
VAR17 * VAR26 - CONST086 * VAR15 + CONST143 * VAR24) + VAR07 * (
CONST051 * VAR15 * VAR26 - CONST065 * VAR17 * VAR24 + CONST103 *
VAR13 + CONST141 * VAR22) + x * (-CONST062 * VAR13 * VAR26 -
CONST092 * VAR17 * VAR22 + CONST112 * VAR15 * VAR24 + CONST144 * VAR20)
Y07 = CONST132 * VAR03 * y * z + VAR05 * (CONST081 * VAR16 * z +
CONST107 * VAR25 * y) + VAR07 * (CONST026 * VAR14 * z + CONST044 *
VAR16 * VAR25 + CONST107 * VAR23 * y) + x * (CONST025 * VAR14 *
VAR25 + CONST053 * VAR12 * z + CONST081 * VAR16 * VAR23 + CONST132 *
VAR21 * y)
Y08 = CONST004 * VAR01 + VAR03 * (CONST006 * VAR26 + CONST116 * VAR17
) + VAR05 * (CONST008 * VAR24 + CONST069 * VAR15 + CONST087 * VAR17 *
VAR26) + VAR07 * (CONST005 * VAR22 + CONST083 * VAR13 + CONST087 *
VAR17 * VAR24 + CONST145 * VAR15 * VAR26) + x * (CONST004 * VAR20 +
CONST022 * VAR11 + CONST069 * VAR15 * VAR24 + CONST082 * VAR13 *
VAR26 + CONST116 * VAR17 * VAR22)
Y09 = CONST009 * VAR10 + VAR12 * (CONST110 * VAR26 + CONST113 * VAR08
) + VAR14 * (CONST063 * VAR06 + CONST063 * VAR24 + CONST104 * VAR08 *
VAR26) + VAR16 * (CONST056 * VAR06 * VAR26 + CONST056 * VAR08 *
VAR24 + CONST101 * VAR04 + CONST101 * VAR22) + y * (CONST010 *
VAR20 + CONST011 * VAR02 + CONST029 * VAR04 * VAR26 + CONST029 *
VAR08 * VAR22 + CONST039 * VAR06 * VAR24)
Y10 = CONST004 * VAR19 + VAR21 * (CONST005 * VAR08 + CONST117 * VAR17
) + VAR23 * (CONST008 * VAR06 + CONST070 * VAR15 + CONST088 * VAR08 *
VAR17) + VAR25 * (CONST012 * VAR04 + CONST082 * VAR13 + CONST087 *
VAR06 * VAR17 + CONST145 * VAR08 * VAR15) + z * (CONST004 * VAR02 +
CONST023 * VAR11 + CONST070 * VAR06 * VAR15 + CONST084 * VAR08 *
VAR13 + CONST117 * VAR04 * VAR17)
Y11 = VAR12 * (CONST115 * VAR08 - CONST115 * VAR26) + VAR14 * (CONST066 *
VAR06 + CONST072 * VAR24) + VAR16 * (CONST055 * VAR08 * VAR24 +
CONST093 * VAR04 + CONST093 * VAR06 * VAR26 - CONST093 * VAR22) + y * (
CONST013 * VAR02 + CONST024 * VAR04 * VAR26 + CONST133 * VAR08 *
VAR22 + CONST138 * VAR20)
Y12 = CONST036 * VAR17 * VAR21 + CONST137 * VAR19 + VAR23 * (CONST086 *
VAR15 + CONST123 * VAR08 * VAR17 - CONST143 * VAR06) + VAR25 * (
CONST014 * VAR04 - CONST051 * VAR08 * VAR15 + CONST065 * VAR06 *
VAR17 - CONST103 * VAR13) + z * (CONST062 * VAR08 * VAR13 +
CONST092 * VAR04 * VAR17 - CONST112 * VAR06 * VAR15 - CONST144 * VAR02)
Y13 = VAR14 * (CONST049 * VAR06 + CONST049 * VAR24 + CONST090 * VAR08 *
VAR26) + VAR16 * (CONST040 * VAR06 * VAR26 + CONST040 * VAR08 *
VAR24 + CONST100 * VAR22 + CONST102 * VAR04) + y * (CONST016 *
VAR20 + CONST017 * VAR02 + CONST094 * VAR06 * VAR24 + CONST121 *
VAR04 * VAR26 + CONST122 * VAR08 * VAR22)
Y14 = (CONST007 * VAR02 * z + CONST075 * VAR19 + CONST136 * VAR06 *
VAR23 + CONST140 * VAR08 * VAR21 + VAR15 * (CONST032 * VAR08 *
VAR25 + CONST047 * VAR23 + CONST131 * VAR06 * z) + VAR17 * (
CONST068 * VAR06 * VAR25 + CONST073 * VAR04 * z + CONST114 * VAR08 *
VAR23 + CONST128 * VAR21))
Y15 = VAR16 * (CONST037 * VAR22 - CONST045 * VAR06 * VAR26 + CONST045 *
VAR08 * VAR24 + CONST118 * VAR04) + y * (CONST018 * VAR02 +
CONST089 * VAR04 * VAR26 - CONST089 * VAR08 * VAR22 + CONST142 * VAR20)
Y16 = (CONST019 * VAR02 * z + CONST076 * VAR19 + CONST124 * VAR04 *
VAR25 - CONST129 * VAR08 * VAR21 + CONST134 * VAR06 * VAR23 + VAR17 *
(CONST038 * VAR06 * VAR25 + CONST080 * VAR04 * z + CONST097 * VAR08 *
VAR23 - CONST130 * VAR21))
Y17 = y * (CONST058 * VAR02 + CONST058 * VAR20 + CONST061 * VAR04 *
VAR26 + CONST061 * VAR08 * VAR22 + CONST074 * VAR06 * VAR24)
Y18 = (CONST001 * VAR19 + CONST020 * VAR02 * z + CONST078 * VAR04 *
VAR25 + CONST091 * VAR06 * VAR23 + CONST105 * VAR08 * VAR21)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
tl.store(output_ptr + output_row_offset + 9, Y09, mask=
output_row_offset + 9 < output_numel)
tl.store(output_ptr + output_row_offset + 10, Y10, mask=
output_row_offset + 10 < output_numel)
tl.store(output_ptr + output_row_offset + 11, Y11, mask=
output_row_offset + 11 < output_numel)
tl.store(output_ptr + output_row_offset + 12, Y12, mask=
output_row_offset + 12 < output_numel)
tl.store(output_ptr + output_row_offset + 13, Y13, mask=
output_row_offset + 13 < output_numel)
tl.store(output_ptr + output_row_offset + 14, Y14, mask=
output_row_offset + 14 < output_numel)
tl.store(output_ptr + output_row_offset + 15, Y15, mask=
output_row_offset + 15 < output_numel)
tl.store(output_ptr + output_row_offset + 16, Y16, mask=
output_row_offset + 16 < output_numel)
tl.store(output_ptr + output_row_offset + 17, Y17, mask=
output_row_offset + 17 < output_numel)
tl.store(output_ptr + output_row_offset + 18, Y18, mask=
output_row_offset + 18 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_9.py |
e243a29b-b114-4f2d-a187-afd303c61af0 | special.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/special.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def joint_second_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_stride = 9
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = output_striding + block_size * output_stride * block_id
CONST_00 = 3.87298334620742
CONST_01 = 2.23606797749979
CONST_02 = 4.47213595499958
CONST_03 = tl.sqrt(3.0)
g_Y10 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_Y11 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_Y12 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_Y20 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_Y21 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_Y22 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
g_Y23 = tl.load(sph_grad_ptr + output_row_offset + 7, mask=
output_row_offset + 7 < output_numel)
g_Y24 = tl.load(sph_grad_ptr + output_row_offset + 8, mask=
output_row_offset + 8 < output_numel)
g_x = (CONST_00 * g_Y20 * z + CONST_00 * g_Y21 * y - CONST_01 * g_Y22 *
x - CONST_00 * g_Y24 * x + CONST_03 * g_Y10)
g_y = (CONST_00 * g_Y21 * x + CONST_02 * g_Y22 * y + CONST_00 * g_Y23 *
z + CONST_03 * g_Y11)
g_z = (CONST_00 * g_Y20 * x - CONST_01 * g_Y22 * z + CONST_00 * g_Y23 *
y + CONST_00 * g_Y24 * z + CONST_03 * g_Y12)
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/special.py |
12f42420-1a7f-46d4-adc4-5b7cb9b2a72f | triton_kernels.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/triton_kernels.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def _triton_fourth_order_fwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl.
tensor, sh_1_0_ptr: tl.tensor, sh_1_1_ptr: tl.tensor, sh_1_2_ptr: tl.
tensor, sh_2_0_ptr: tl.tensor, sh_2_1_ptr: tl.tensor, sh_2_2_ptr: tl.
tensor, sh_2_3_ptr: tl.tensor, sh_2_4_ptr: tl.tensor, sh_3_0_ptr: tl.
tensor, sh_3_1_ptr: tl.tensor, sh_3_2_ptr: tl.tensor, sh_3_3_ptr: tl.
tensor, sh_3_4_ptr: tl.tensor, sh_3_5_ptr: tl.tensor, sh_3_6_ptr: tl.
tensor, sh_4_0_ptr: tl.tensor, sh_4_1_ptr: tl.tensor, sh_4_2_ptr: tl.
tensor, sh_4_3_ptr: tl.tensor, sh_4_4_ptr: tl.tensor, sh_4_5_ptr: tl.
tensor, sh_4_6_ptr: tl.tensor, sh_4_7_ptr: tl.tensor, sh_4_8_ptr: tl.
tensor, BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr):
sqrt_3 = 3 ** 0.5
block_id = tl.program_id(0)
offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id
x_row_start = x_ptr + offset
y_row_start = y_ptr + offset
z_row_start = z_ptr + offset
x = tl.load(x_row_start, mask=offset < vector_length)
y = tl.load(y_row_start, mask=offset < vector_length)
z = tl.load(z_row_start, mask=offset < vector_length)
sh_1_0 = x * sqrt_3
sh_1_1 = y * sqrt_3
sh_1_2 = z * sqrt_3
sqrt_15 = 15 ** 0.5
sqrt_5 = 5 ** 0.5
sq_x = x * x
sq_y = y * y
sq_z = z * z
sh_2_0 = sqrt_15 * x * z
sh_2_1 = sqrt_15 * x * y
sh_2_2 = sqrt_5 * (sq_y - 0.5 * (sq_x + sq_z))
sh_2_3 = sqrt_15 * y * z
sh_2_4 = 0.5 * sqrt_15 * (sq_z - sq_x)
sqrt_42 = 42 ** 0.5
sqrt_168 = 168 ** 0.5
sqrt_7 = 7 ** 0.5
sh_3_0 = 1 / 6 * sqrt_42 * (sh_2_0 * z + sh_2_4 * x)
sh_3_1 = sqrt_7 * sh_2_0 * y
sh_3_2 = 1 / 8 * sqrt_168 * (4 * sq_y - (sq_x + sq_z)) * x
sh_3_3 = 0.5 * sqrt_7 * y * (2 * sq_y - 3 * (sq_x + sq_z))
sh_3_4 = 1 / 8 * sqrt_168 * z * (4 * sq_y - (sq_x + sq_z))
sh_3_5 = sqrt_7 * (sh_2_4 * y)
sh_3_6 = 1 / 6 * sqrt_42 * (sh_2_4 * z - sh_2_0 * x)
sqrt_2 = 2 ** 0.5
sqrt_210 = 210 ** 0.5
sqrt_14 = 14 ** 0.5
sqrt_21 = 21 ** 0.5
sqrt_70 = 70 ** 0.5
sqrt_105 = 105 ** 0.5
sqrt_6 = 6 ** 0.5
sh_4_0 = 3 / 4 * sqrt_2 * (sh_3_0 * z + sh_3_6 * x)
sh_4_1 = (3 / 4 * sh_3_0 * y + 3 / 8 * sqrt_6 * sh_3_1 * z + 3 / 8 *
sqrt_6 * sh_3_5 * x)
sh_4_2 = (-3 / 56 * sqrt_14 * sh_3_0 * z + 3 / 14 * sqrt_21 * sh_3_1 *
y + 3 / 56 * sqrt_210 * sh_3_2 * z + 3 / 56 * sqrt_210 * sh_3_4 * x +
3 / 56 * sqrt_14 * sh_3_6 * x)
sh_4_3 = (-3 / 56 * sqrt_42 * sh_3_1 * z + 3 / 28 * sqrt_105 * sh_3_2 *
y + 3 / 28 * sqrt_70 * sh_3_3 * x + 3 / 56 * sqrt_42 * sh_3_5 * x)
sh_4_4 = (-3 / 28 * sqrt_42 * sh_3_2 * x + 3 / 7 * sqrt_7 * sh_3_3 * y -
3 / 28 * sqrt_42 * sh_3_4 * z)
sh_4_5 = (-3 / 56 * sqrt_42 * sh_3_1 * x + 3 / 28 * sqrt_70 * sh_3_3 *
z + 3 / 28 * sqrt_105 * sh_3_4 * y - 3 / 56 * sqrt_42 * sh_3_5 * z)
sh_4_6 = (-3 / 56 * sqrt_14 * sh_3_0 * x - 3 / 56 * sqrt_210 * sh_3_2 *
x + 3 / 56 * sqrt_210 * sh_3_4 * z + 3 / 14 * sqrt_21 * sh_3_5 * y -
3 / 56 * sqrt_14 * sh_3_6 * z)
sh_4_7 = (-3 / 8 * sqrt_6 * sh_3_1 * x + 3 / 8 * sqrt_6 * sh_3_5 * z +
3 / 4 * sh_3_6 * y)
sh_4_8 = 3 / 4 * sqrt_2 * (-sh_3_0 * x + sh_3_6 * z)
sh_1_0_start = sh_1_0_ptr + offset
sh_1_1_start = sh_1_1_ptr + offset
sh_1_2_start = sh_1_2_ptr + offset
sh_2_0_start = sh_2_0_ptr + offset
sh_2_1_start = sh_2_1_ptr + offset
sh_2_2_start = sh_2_2_ptr + offset
sh_2_3_start = sh_2_3_ptr + offset
sh_2_4_start = sh_2_4_ptr + offset
sh_3_0_start = sh_3_0_ptr + offset
sh_3_1_start = sh_3_1_ptr + offset
sh_3_2_start = sh_3_2_ptr + offset
sh_3_3_start = sh_3_3_ptr + offset
sh_3_4_start = sh_3_4_ptr + offset
sh_3_5_start = sh_3_5_ptr + offset
sh_3_6_start = sh_3_6_ptr + offset
sh_4_0_start = sh_4_0_ptr + offset
sh_4_1_start = sh_4_1_ptr + offset
sh_4_2_start = sh_4_2_ptr + offset
sh_4_3_start = sh_4_3_ptr + offset
sh_4_4_start = sh_4_4_ptr + offset
sh_4_5_start = sh_4_5_ptr + offset
sh_4_6_start = sh_4_6_ptr + offset
sh_4_7_start = sh_4_7_ptr + offset
sh_4_8_start = sh_4_8_ptr + offset
tl.store(sh_1_0_start, sh_1_0, mask=offset < vector_length)
tl.store(sh_1_1_start, sh_1_1, mask=offset < vector_length)
tl.store(sh_1_2_start, sh_1_2, mask=offset < vector_length)
tl.store(sh_2_0_start, sh_2_0, mask=offset < vector_length)
tl.store(sh_2_1_start, sh_2_1, mask=offset < vector_length)
tl.store(sh_2_2_start, sh_2_2, mask=offset < vector_length)
tl.store(sh_2_3_start, sh_2_3, mask=offset < vector_length)
tl.store(sh_2_4_start, sh_2_4, mask=offset < vector_length)
tl.store(sh_3_0_start, sh_3_0, mask=offset < vector_length)
tl.store(sh_3_1_start, sh_3_1, mask=offset < vector_length)
tl.store(sh_3_2_start, sh_3_2, mask=offset < vector_length)
tl.store(sh_3_3_start, sh_3_3, mask=offset < vector_length)
tl.store(sh_3_4_start, sh_3_4, mask=offset < vector_length)
tl.store(sh_3_5_start, sh_3_5, mask=offset < vector_length)
tl.store(sh_3_6_start, sh_3_6, mask=offset < vector_length)
tl.store(sh_4_0_start, sh_4_0, mask=offset < vector_length)
tl.store(sh_4_1_start, sh_4_1, mask=offset < vector_length)
tl.store(sh_4_2_start, sh_4_2, mask=offset < vector_length)
tl.store(sh_4_3_start, sh_4_3, mask=offset < vector_length)
tl.store(sh_4_4_start, sh_4_4, mask=offset < vector_length)
tl.store(sh_4_5_start, sh_4_5, mask=offset < vector_length)
tl.store(sh_4_6_start, sh_4_6, mask=offset < vector_length)
tl.store(sh_4_7_start, sh_4_7, mask=offset < vector_length)
tl.store(sh_4_8_start, sh_4_8, mask=offset < vector_length)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py |
2330b2e5-9c58-4b9b-8328-2f4b391bd8bf | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_softmax/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS,
NUM_STAGES)], key=['M'])
@triton.jit
def triton_jagged_softmax_kernel_variable_length_loop_buffer_then_sum(
input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
buffer_max_all = tl.full((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), value=float
('-inf'), dtype=tl.float32)
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
buffer_max_all = tl.maximum(buffer_max_all, input)
buffer_max = tl.max(buffer_max_all, axis=0, keep_dims=True)
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
buffer += tl.exp(input - buffer_max)
buffer_exp_sum = tl.sum(buffer, axis=0)
for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED
):
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=float('-inf')
)
output = tl.fdiv(tl.exp(input - buffer_max), buffer_exp_sum)
tl.store(output_ptr + idxs, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_softmax/kernels.py |
a85dfd4f-2d51-4198-9b23-08c7173a6ea2 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8)], key=['BC', 'BK'])
@triton.jit
def chunk_gla_fwd_A_kernel_intra_sub_intra_split(q, k, g, A, offsets,
indices, scale, B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K:
tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, NC:
tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_tc, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
i_t, i_i = i_tc // NC, i_tc % NC
i_j = i_i
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
all = T
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
all = B * T
if i_t * BT + i_i * BC >= T:
return
o_i = tl.arange(0, BC)
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
if HEAD_FIRST:
o_A = (i_k * B * H + i_bh) * T * BC + (i_t * BT + i_i * BC + tl.
arange(0, BC)) * BC
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT +
i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT +
i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_k = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT +
i_j * BC) * K + o_k, BK), BK)
else:
o_A = (i_k * all + bos + i_t * BT + i_i * BC + tl.arange(0, BC)
) * H * BC + i_h * BC
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
p_k = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
p_gk = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_j *
BC) * H * K + i_h * K + o_k, BK), BK)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0, 1))
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_A = tl.zeros([BC], dtype=tl.float32)
b_k = tl.load(p_k, mask=m_k, other=0).to(tl.float32)
b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32)
b_A += tl.sum(b_q * b_k[None, :] * tl.exp(b_g - b_gk[None, :]), 1)
b_A = tl.where(o_i >= j, b_A * scale, 0.0)
tl.store(A + o_A + j, b_A, mask=m_A)
p_k += K if HEAD_FIRST else H * K
p_gk += K if HEAD_FIRST else H * K
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
37d65854-b0fd-4bc2-a248-cd43fe18bd16 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_jagged_bmm_jagged_out_kernel(a_ptr, a_offset_ptr, b_ptr,
b_offset_ptr, c_ptr, offsets_mn_ptr, max_seq_len, num_blocks_n, K,
stride_am, stride_ak, stride_bk, stride_bn, allow_tf32: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K:
tl.constexpr):
"""
Kernel for computing C = A x B.
A has shape (sum_B(Mi), K), B has shape (K, sum_B(Ni))
and C has shape (sum_B(Mi * Ni))
"""
pid = tl.program_id(axis=0)
pid_batch = tl.program_id(axis=1)
begin_a = tl.load(a_offset_ptr + pid_batch)
end_a = tl.load(a_offset_ptr + pid_batch + 1)
begin_b = tl.load(b_offset_ptr + pid_batch)
end_b = tl.load(b_offset_ptr + pid_batch + 1)
offset_mn = tl.load(offsets_mn_ptr + pid_batch)
M = end_a - begin_a
M = tl.minimum(M, max_seq_len)
N = end_b - begin_b
N = tl.minimum(N, max_seq_len)
pid_m = pid // num_blocks_n
pid_n = pid % num_blocks_n
if pid_m * BLOCK_SIZE_M >= M or pid_n * BLOCK_SIZE_N >= N:
return
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak) + begin_a * stride_am
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] *
stride_bn) + begin_b * stride_bn
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
updated_offset = k + offs_k
a = tl.load(a_ptrs, mask=(updated_offset[None, :] < K) & (offs_am[:,
None] < M), other=0.0)
b = tl.load(b_ptrs, mask=(updated_offset[:, None] < K) & (offs_bn[
None, :] < N), other=0.0)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + offset_mn + N * offs_cm[:, None] + offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
9c9a0484-f69a-4208-aa24-d3d5c62c9050 | sb_varlen_bwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_bwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.jit
def _backward_one_row(seq_prog_id, seq_length, qk_scale, M_range, N_range,
D_range, D_mask, cm, DO_head_seq_ptr, stride_dom, stride_dod: tl.
constexpr, DR_head_seq_ptr, stride_drm, A_head_seq_ptr, stride_am: tl.
constexpr, Q_head_seq_ptr, stride_qm, stride_qd: tl.constexpr,
K_head_seq_ptr, stride_kn, stride_kd: tl.constexpr, V_head_seq_ptr,
stride_vn, stride_vd: tl.constexpr, DQ_head_seq_ptr, stride_dqm,
stride_dqd: tl.constexpr, DK_head_seq_ptr, stride_dkn, stride_dkd: tl.
constexpr, DV_head_seq_ptr, stride_dvn, stride_dvd: tl.constexpr,
KV_Lock_ptr, KV_Count_ptr, logit_scale, BLOCK_D: tl.constexpr,
NO_D_MASK: tl.constexpr, NO_M_MASK: tl.constexpr, ALLOW_TF32: tl.
constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, acc_dtype: tl.
constexpr=tl.float32, is_compiling: tl.constexpr=False, attend_current:
tl.constexpr=False):
block_start_offset = BLOCK_M * seq_prog_id
M_blk_idxs = block_start_offset + M_range
M_mask = M_blk_idxs < seq_length
NO_M_MASK = block_start_offset + BLOCK_M - 1 < seq_length
N_blk_idxs_start = 0
N_blk_idxs = N_blk_idxs_start + N_range
DO_blk_ptrs = DO_head_seq_ptr + (stride_dom * M_blk_idxs[:, None] +
stride_dod * D_range[None, :])
K_blk_ptrs = K_head_seq_ptr + (stride_kn * N_blk_idxs[:, None] +
stride_kd * D_range[None, :])
Q_blk_ptrs = Q_head_seq_ptr + (stride_qm * M_blk_idxs[:, None] +
stride_qd * D_range[None, :])
V_blk_ptrs = V_head_seq_ptr + (stride_vn * N_blk_idxs[:, None] +
stride_vd * D_range[None, :])
A_blk_ptrs = A_head_seq_ptr + stride_am * M_blk_idxs
DQ_blk_ptrs = DQ_head_seq_ptr + (stride_dqm * M_blk_idxs[:, None] +
stride_dqd * D_range[None, :])
DK_blk_ptrs = DK_head_seq_ptr + (stride_dkn * N_blk_idxs[:, None] +
stride_dkd * D_range[None, :])
DV_blk_ptrs = DV_head_seq_ptr + (stride_dvn * N_blk_idxs[:, None] +
stride_dvd * D_range[None, :])
DR_blk_ptrs = DR_head_seq_ptr + stride_drm * M_blk_idxs
if NO_D_MASK:
if NO_M_MASK:
q = tl.load(Q_blk_ptrs)
do = tl.load(DO_blk_ptrs)
dr = tl.load(DR_blk_ptrs)
neg_log_acc = tl.load(A_blk_ptrs, mask=M_mask)
else:
q = tl.load(Q_blk_ptrs, mask=M_mask[:, None])
do = tl.load(DO_blk_ptrs, mask=M_mask[:, None])
dr = tl.load(DR_blk_ptrs, mask=M_mask)
neg_log_acc = tl.load(A_blk_ptrs, mask=M_mask)
else:
MD_mask = M_mask[:, None] & D_mask[None, :]
q = tl.load(Q_blk_ptrs, mask=MD_mask)
do = tl.load(DO_blk_ptrs, mask=MD_mask)
dr = tl.load(DR_blk_ptrs, mask=M_mask)
neg_log_acc = tl.load(A_blk_ptrs, mask=M_mask)
neg_log_acc = neg_log_acc.to(dtype=acc_dtype)
grad_prev_acc = tl.zeros((BLOCK_M,), dtype=acc_dtype)
dq = tl.zeros((BLOCK_M, BLOCK_D), dtype=acc_dtype)
fwd_cm = tl.trans(cm)
iters = (block_start_offset + BLOCK_M) // BLOCK_N
for i in range(iters):
on_band = iters - i - 1 < BLOCK_M // BLOCK_N
N_mask = N_blk_idxs < seq_length
NO_N_MASK = N_blk_idxs_start + BLOCK_N - 1 < seq_length
k, v = load_kv(K_blk_ptrs, V_blk_ptrs, N_mask=N_mask, NO_N_MASK=
N_blk_idxs_start + BLOCK_N - 1 < seq_length, D_mask=D_mask,
NO_D_MASK=NO_D_MASK)
p, log_om_beta, neg_log_acc = compute_block(q, k, qk_scale,
neg_log_acc, M_blk_idxs, N_blk_idxs, cm, on_band, ALLOW_TF32,
attend_current=attend_current, backward=True, is_compiling=
is_compiling)
if not NO_M_MASK:
neg_log_acc = tl.where(M_mask, neg_log_acc, 0.0)
att_dA = p * (tl.dot(do, tl.trans(v), allow_tf32=ALLOW_TF32) - dr[:,
None])
cumul_att_dA = tl.dot(att_dA.to(cm.dtype), fwd_cm, allow_tf32=
ALLOW_TF32) + grad_prev_acc[:, None]
grad_prev_acc += tl.sum(att_dA, axis=1)
beta = 1 - tl.exp2(log_om_beta)
dqk = att_dA - beta * cumul_att_dA
dq = tl.dot(dqk.to(k.dtype), k, acc=dq, allow_tf32=ALLOW_TF32)
block_dk = tl.dot(tl.trans(dqk).to(q.dtype), q, allow_tf32=ALLOW_TF32
) * logit_scale
block_dv = tl.dot(tl.trans(p), do.to(p.dtype), allow_tf32=ALLOW_TF32)
locked_add(KV_Lock_ptr + i, KV_Count_ptr + i, DK_blk_ptrs, block_dk,
DV_blk_ptrs, block_dv, N_mask, NO_N_MASK, D_mask, NO_D_MASK)
N_blk_idxs += BLOCK_N
N_blk_idxs_start += BLOCK_N
K_blk_ptrs += BLOCK_N * stride_kn
V_blk_ptrs += BLOCK_N * stride_vn
DK_blk_ptrs += BLOCK_N * stride_dkn
DV_blk_ptrs += BLOCK_N * stride_dvn
dq = (logit_scale * dq).to(DQ_head_seq_ptr.type.element_ty)
if NO_D_MASK:
tl.store(DQ_blk_ptrs, dq, mask=M_mask[:, None])
else:
tl.store(DQ_blk_ptrs, dq, mask=M_mask[:, None] & D_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Transposed Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_bwd.py |
0fa9d38c-c513-408f-a16f-741c7127f438 | flash_triton.py | MayDomine/Burst-Attention | burst_attn/flash_triton.py | b088c554072935074ea9c643de5ee363be5ab1f6 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[
'BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args[
'BLOCK_HEADDIM']})
@triton.jit
def _fwd_kernel(Q, K, V, Bias, Out, Lse, TMP, softmax_scale, stride_qb,
stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb,
stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob,
stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded,
headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.
constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M:
tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M:
tl.constexpr, BLOCK_N: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] *
stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] *
stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] *
stride_vn + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:,
None] * stride_bm + offs_n[None, :])
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
elif EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[
None, :] < headdim), other=0.0)
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) *
BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None,
:] < headdim, other=0.0)
elif EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n +
offs_n)[:, None] < seqlen_k, other=0.0)
else:
k = tl.load(k_ptrs + start_n * stride_kn, mask=((start_n +
offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
if not EVEN_N:
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float
('-inf'))
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :],
0, float('-inf'))
if BIAS_TYPE != 'none':
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n <
seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n, mask=(offs_m[:, None] <
seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k
), other=0.0).to(tl.float32)
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
acc_o_scale = tl.exp(m_i - m_ij)
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None,
:] < headdim, other=0.0)
elif EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n +
offs_n)[:, None] < seqlen_k, other=0.0)
else:
v = tl.load(v_ptrs + start_n * stride_vn, mask=((start_n +
offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:,
None] * stride_om + offs_d[None, :])
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
elif EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (
offs_d[None, :] < headdim))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py |
8a6f7534-fbb7-4a16-8b55-555cc439bcf0 | paged_attn_v2.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn_v2.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _paged_attention_v2_reduce(out, exp_sums, max_logits, tmp_out,
context_lens, stride_exp_m, stride_exp_n, stride_out_m, stride_out_n,
stride_tmp_m, stride_tmp_n, stride_tmp_k, HEAD_SIZE: tl.constexpr,
NUM_PARTITIONS: tl.constexpr):
seq_idx = tl.program_id(axis=1)
head_idx = tl.program_id(axis=0)
context_len = tl.load(context_lens + seq_idx)
num_partitions = tl.cdiv(context_len, PARTITION_SIZE)
exp_sum = 0.0
max_logit = float('-inf')
offs_logit = seq_idx * stride_exp_m + head_idx * stride_exp_n
head_size_offs = tl.arange(0, HEAD_SIZE)
tmp_out_ptr = seq_idx * stride_tmp_m + head_idx * stride_tmp_n
out_ptr = seq_idx * stride_out_m + head_idx * stride_out_n + head_size_offs
acc = tl.zeros([HEAD_SIZE], dtype=tl.float32)
global_exp_sum = tl.zeros([1], dtype=tl.float32)
logits = tl.load(max_logits + offs_logit + tl.arange(0, NUM_PARTITIONS),
mask=tl.arange(0, NUM_PARTITIONS) < num_partitions, other=float('-inf')
)
max_logit = tl.max(logits, axis=0)
exp_sum = tl.load(exp_sums + offs_logit + tl.arange(0, NUM_PARTITIONS),
mask=tl.arange(0, NUM_PARTITIONS) < num_partitions, other=0.0)
rescaled_exp_sum = exp_sum * tl.exp(logits - max_logit)
global_exp_sum += tl.sum(rescaled_exp_sum, axis=0)
tmp = tl.load(tmp_out + tmp_out_ptr + tl.arange(0, NUM_PARTITIONS)[:,
None] * stride_tmp_k + head_size_offs)
acc += tl.sum(tmp * rescaled_exp_sum[:, None], axis=0)
inv_sum = 1.0 / (global_exp_sum + 1e-06)
tl.store(out + out_ptr, acc * inv_sum)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn_v2.py |
8114d12c-96e5-4779-a4c3-39663c02465d | conv.py | chengzeyi/stable-fast | src/sfast/triton/ops/conv.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @conv_heuristics()
@triton.jit
def _kernel_delta_x(x, w, bias, y, stride_xn, stride_xc, stride_xh,
stride_xw, stride_wn, stride_wc, stride_wh, stride_ww, stride_yn,
stride_yc, stride_yh, stride_yw, delta_x_ptr, BATCH, IN_C, IN_H, IN_W,
KERNEL_N, KERNEL_H, KERNEL_W, OUT_H, OUT_W, stride_h, stride_w,
padding_h, padding_w, dilation_h, dilation_w, output_padding_h,
output_padding_w, groups, ACC_TYPE: tl.constexpr, CONV1X1_NHWC: tl.
constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.
constexpr, GROUP_H: tl.constexpr, WITH_BIAS: tl.constexpr):
"""
each program instance computes a [BLOCK_BATCH, BLOCK_N, BLOCK_H, BLOCK_W] block of y
"""
pid_nhw = tl.program_id(0)
pid_k = tl.program_id(1)
off_y_k = pid_k * BLOCK_N + tl.arange(0, BLOCK_N)
off_y_nhw = pid_nhw * BLOCK_M + tl.arange(0, BLOCK_M)
off_y_n = off_y_nhw // (OUT_H * OUT_W)
off_y_hw = off_y_nhw % (OUT_H * OUT_W)
off_y_h = off_y_hw // OUT_W + output_padding_h
off_y_w = off_y_hw % OUT_W + output_padding_w
off_x_n = off_y_n
off_x_h = off_y_h * stride_h - padding_h
off_x_w = off_y_w * stride_w - padding_w
off_x_nhw = off_x_n * stride_xn + off_x_h * stride_xh + off_x_w * stride_xw
off_x_crs = tl.arange(0, BLOCK_K)
CRS = IN_C * KERNEL_H * KERNEL_W
if not CONV1X1_NHWC:
delta_x_ptrs = delta_x_ptr + off_x_crs
off_x_crs_unpacked = tl.load(delta_x_ptrs, mask=off_x_crs < CRS)
x_ptrs = x + off_x_nhw[:, None] + off_x_crs_unpacked[None, :]
else:
x_ptrs = x + off_x_nhw[:, None] + off_x_crs[None, :]
mask_x = ((off_x_n < BATCH) & (off_x_h >= 0) & (off_x_h < IN_H) & (
off_x_w >= 0) & (off_x_w < IN_W))[:, None] & (off_x_crs < CRS)[None, :]
off_w_crs = tl.arange(0, BLOCK_K)
off_w_k = off_y_k
w_ptrs = w + off_w_crs[:, None] + off_w_k[None, :] * stride_wn
mask_w = (off_x_crs < CRS)[:, None] & (off_w_k < KERNEL_N)[None, :]
matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0)
matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for crs in range(0, CRS, BLOCK_K):
acc += tl.dot(matrix_x, matrix_w, out_dtype=ACC_TYPE)
w_ptrs += BLOCK_K
if not CONV1X1_NHWC:
delta_x_ptrs += BLOCK_K
off_x_crs = crs + BLOCK_K + tl.arange(0, BLOCK_K)
off_x_crs_unpacked = tl.load(delta_x_ptrs, mask=off_x_crs < CRS,
other=0)
x_ptrs = x + off_x_nhw[:, None] + off_x_crs_unpacked[None, :]
else:
off_x_crs = crs + BLOCK_K + tl.arange(0, BLOCK_K)
x_ptrs += BLOCK_K
mask_x = ((off_x_n < BATCH) & (off_x_h >= 0) & (off_x_h < IN_H) & (
off_x_w >= 0) & (off_x_w < IN_W))[:, None] & (off_x_crs < CRS)[
None, :]
mask_w = (off_x_crs < CRS)[:, None] & (off_w_k < KERNEL_N)[None, :]
matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0)
matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0)
if WITH_BIAS:
acc += tl.load(bias + off_y_k)[None, :]
acc = acc.to(y.dtype.element_ty)
off_y_k = pid_k * BLOCK_N + tl.arange(0, BLOCK_N)
off_y_nhw = pid_nhw * BLOCK_M + tl.arange(0, BLOCK_M)
off_y_n = off_y_nhw // (OUT_H * OUT_W)
off_y_hw = off_y_nhw % (OUT_H * OUT_W)
off_y_h = off_y_hw // OUT_W + output_padding_h
off_y_w = off_y_hw % OUT_W + output_padding_w
y_ptrs = y + off_y_n[:, None] * stride_yn + off_y_h[:, None
] * stride_yh + off_y_w[:, None] * stride_yw + off_y_k[None, :
] * stride_yc
mask_y = (off_y_n < BATCH)[:, None] & (off_y_h < OUT_H + output_padding_h)[
:, None] & (off_y_w < OUT_W + output_padding_w)[:, None] & (off_y_k <
KERNEL_N)[None, :]
tl.store(y_ptrs, acc, mask=mask_y)
return
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/conv.py |
bd8d9b19-0eb1-4606-abf9-07bc9b74d955 | logsumexp.py | sustcsonglin/flash-linear-attention | fla/ops/utils/logsumexp.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'HAS_SCALE': lambda args: args['scale'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16, 32]], key=['D'])
@triton.jit
def logsumexp_fwd_kernel(x, z, scale, D: tl.constexpr, B: tl.constexpr,
HAS_SCALE: tl.constexpr):
i_n, i_d = tl.program_id(0).to(tl.int64), tl.program_id(1).to(tl.int64)
o_d = i_d * B + tl.arange(0, B)
m_d = o_d < D
b_x = tl.load(x + i_n * D + o_d, mask=m_d, other=-float('inf'))
if HAS_SCALE:
b_x = b_x * scale
b_m = tl.max(b_x, 0)
b_z = tl.log(tl.sum(tl.exp(b_x - b_m), 0)) + b_m
tl.store(z + i_n * tl.cdiv(D, B) + i_d, b_z)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/logsumexp.py |
6a35f449-cfba-4f15-a4d9-9faf8f90396e | gemm_postop_addmatrix_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M':
256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4,
'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({
'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32
), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512,
'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages
=2, num_warps=32)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel_with_block_pointers(a_ptr, b_ptr, c_ptr, d_ptr, M: tl.
constexpr, N: tl.constexpr, K: tl.constexpr, stride_am: tl.constexpr,
stride_ak: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.
constexpr, stride_cm: tl.constexpr, stride_cn: tl.constexpr, stride_dm:
tl.constexpr, stride_dn: tl.constexpr, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(
stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(
stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
b = tl.load(b_block_ptr, boundary_check=(0, 1))
accumulator += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
d_block_ptr = tl.make_block_ptr(base=d_ptr, shape=(M, N), strides=(
stride_dm, stride_dn), offsets=(pid_m * BLOCK_SIZE_M, pid_n *
BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
d = tl.load(d_block_ptr, boundary_check=(0, 1))
c = accumulator + d
c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=(
stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M, pid_n *
BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py |
10debe65-3ac2-4a2b-b2a0-78f9bbae7964 | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def tensor_elementwise_add(x, y):
return x + y
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
965c0840-cbb1-4409-bcfe-384d35052963 | softmax.py | l1351868270/implicit_gemm.triton | triton_kernel/softmax.py | 64eb8548ccf4576883c928f6315be8b24680a455 | 0 | @triton.jit
def _ld_softmax_bwd_kernel(ds_ptr, p_ptr, dp_ptr, ds_row_stride,
p_row_stride, dp_row_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
p_start_ptr = p_ptr + row_idx * p_row_stride
dp_start_ptr = dp_ptr + row_idx * dp_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
p_ptrs = p_start_ptr + col_offsets
dp_ptrs = dp_start_ptr + col_offsets
mask = col_offsets < n_cols
p_row = tl.load(p_ptrs, mask=mask, other=0)
dp_row = tl.load(dp_ptrs, mask=mask, other=0)
ds_row = p_row * (dp_row - tl.sum(p_row * dp_row, axis=0))
ds_start_ptr = ds_ptr + row_idx * ds_row_stride
ds_ptrs = ds_start_ptr + col_offsets
tl.store(ds_ptrs, ds_row, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_kernel/softmax.py |
23ae7635-b08c-4919-a8c0-55f2f7104fe2 | group_norm.py | chengzeyi/stable-fast | src/sfast/triton/ops/group_norm.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @eval(
"""triton.heuristics({
'ROW_SIZE':
lambda kwargs: triton.next_power_of_2(kwargs['C'] // kwargs['groups']),
'BLOCK_SIZE':
lambda kwargs: max(
1, min(triton.next_power_of_2(kwargs['HxW']),
4096 // (triton.next_power_of_2(kwargs['C'] // kwargs['groups']))
)),
})"""
)
@eval(
"""triton.heuristics({
'num_warps':
lambda kwargs: max(1, min(16, kwargs['ROW_SIZE'] * kwargs['BLOCK_SIZE'] // 128)),
'C_G': lambda kwargs: kwargs['C'] // kwargs['groups'],
})"""
)
@triton.jit
def group_norm_4d_channels_last_forward_collect_stats_kernel(input_ptr, N,
C, HxW, groups, eps, mean_ptr, rstd_ptr, C_G, ROW_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr):
group = tl.program_id(0)
pid_batch = tl.program_id(1)
offset = pid_batch * C * HxW + group * C_G
X = input_ptr + offset
_mean = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32)
_m2 = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32)
_weight = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32)
row = tl.arange(0, ROW_SIZE)
for off in range(0, HxW, BLOCK_SIZE):
r = off + tl.arange(0, BLOCK_SIZE)
m2_ = tl.zeros((BLOCK_SIZE, ROW_SIZE), dtype=tl.float32)
mask = (r < HxW)[:, None] & (row[None, :] < C_G)
weight_ = mask.to(tl.float32)
x = tl.load(X + (r * C)[:, None] + row[None, :], mask=mask).to(tl.
float32)
_mean, _m2, _weight = welford_combine(_mean, _m2, _weight, x, m2_,
weight_)
_mean = tl.view(_mean, (BLOCK_SIZE * ROW_SIZE,))
_m2 = tl.view(_m2, (BLOCK_SIZE * ROW_SIZE,))
_weight = tl.view(_weight, (BLOCK_SIZE * ROW_SIZE,))
mean, m2, weight = tl.reduce((_mean, _m2, _weight), 0, welford_combine)
var = m2 / weight
rstd = 1.0 / tl.sqrt(var + eps)
offset = pid_batch * groups + group
tl.store(mean_ptr + offset, mean)
tl.store(rstd_ptr + offset, rstd)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/group_norm.py |
770ec2b3-7745-432e-bbae-8d2c438cc02f | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_softmax_kernel(input_ptr, output_ptr, input_offsets_ptr,
input_row_stride, input_head_stride, output_row_stride,
output_head_stride, max_seq_len: tl.constexpr, BLOCK_SIZE: tl.constexpr):
"""
input shpae is [SUM_B, H]
output shape is [SUM_B, H]
"""
pid_batch = tl.program_id(0)
pid_head = tl.program_id(1)
row_begin = tl.load(input_offsets_ptr + pid_batch)
row_end = tl.load(input_offsets_ptr + pid_batch + 1)
N = tl.minimum(max_seq_len, row_end - row_begin)
if N == 0:
return
row_start_ptr = input_ptr + row_begin * input_row_stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = (row_start_ptr + col_offsets * input_row_stride + pid_head *
input_head_stride)
row = tl.load(input_ptrs, mask=col_offsets < N, other=-float('inf'))
row_mins_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_mins_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_begin * output_row_stride
output_ptrs = (output_row_start_ptr + col_offsets * output_row_stride +
pid_head * output_head_stride)
tl.store(output_ptrs, softmax_output, mask=col_offsets < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
5c760d10-cb85-49c6-9d13-141637fb65e6 | matrix-vector-multiplication-bf16.py | northstreet12/triton-cpu | python/tutorials/matrix-vector-multiplication-bf16.py | bfb302ffc5fde3b9efe040cb452ddac0454dbb98 | 0 | @triton.jit
def gemv_kernel(Y, A, X, M, N, stride_am, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr):
start_m = tl.program_id(0)
rm = start_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
rn = tl.arange(0, BLOCK_SIZE_N)
A = A + (rm[:, None] * stride_am + rn[None, :])
X = X + rn
acc = tl.zeros((BLOCK_SIZE_M,), dtype=tl.float32)
for n in range(N, 0, -BLOCK_SIZE_N):
a = tl.load(A)
x = tl.load(X)
acc += tl.sum(a * x[None, :], axis=1)
A += BLOCK_SIZE_N
X += BLOCK_SIZE_N
y = acc.to(tl.bfloat16)
Y = Y + rm
tl.store(Y, y)
| {
"Data Type": [
"bf16",
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/northstreet12/triton-cpu/blob/bfb302ffc5fde3b9efe040cb452ddac0454dbb98/python/tutorials/matrix-vector-multiplication-bf16.py |
1e8d4b3f-525d-4e92-a1e7-b8c95e9d1b8e | bwd_kernel_dk_dv.py | ROCm/aotriton | tritonsrc/bwd_kernel_dk_dv.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L, D, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_bz,
stride_bh, stride_bm, stride_bn, stride_oz, stride_oh, stride_om,
stride_ok, stride_dkz, stride_dkh, stride_dkn, stride_dkk, stride_dvz,
stride_dvh, stride_dvk, stride_dvn, num_head_q: 'i32', num_head_k:
'i32', cu_seqlens_q, cu_seqlens_k, num_seqlens: 'i32', max_seqlen_q:
'i32', max_seqlen_k: 'i32', head_dim: 'i32', dropout_p, philox_seed_ptr,
philox_offset1: '*u32', philox_offset2: 'u32', BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, CAUSAL: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.
constexpr):
philox_seed = 0
philox_offset_base = philox_offset2
if ENABLE_DROPOUT:
philox_seed = tl.load(philox_seed_ptr)
philox_offset_base += tl.load(philox_offset1)
start_k = tl.program_id(0) * BLOCK_N
off_h_k = tl.program_id(1)
off_z = tl.program_id(2)
num_z = tl.num_programs(2)
offs_m = tl.arange(0, BLOCK_M)
offs_n = start_k + tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
ld_offs_d = None if not PADDED_HEAD else tl.arange(0, BLOCK_DMODEL)
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
seqlen_q = max_seqlen_q
seqlen_k = max_seqlen_k
batch_index = off_z
if num_seqlens > 0:
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
if start_k >= seqlen_k:
return
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
batch_index = 0
if num_seqlens < 0:
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
if start_k >= seqlen_k:
return
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
batch_index = off_z
k_offset = (off_h_k * stride_kh + batch_index * stride_kz +
cu_seqlens_k_start * stride_kn)
K += k_offset
kt_ptrs = K + offs_d[:, None] * stride_kk + offs_n[None, :] * stride_kn
if start_k + BLOCK_N <= seqlen_k:
kt = load_fn(kt_ptrs, ld_offs_d, None, head_dim, seqlen_k)
else:
kt = load_fn(kt_ptrs, ld_offs_d, offs_n, head_dim, seqlen_k)
v_offset = (off_h_k * stride_vh + batch_index * stride_vz +
cu_seqlens_k_start * stride_vk)
V += v_offset
vt_ptrs = V + offs_d[:, None] * stride_vn + offs_n[None, :] * stride_vk
if start_k + BLOCK_N <= seqlen_k:
vt = load_fn(vt_ptrs, ld_offs_d, None, head_dim, seqlen_k)
else:
vt = load_fn(vt_ptrs, ld_offs_d, offs_n, head_dim, seqlen_k)
if BIAS_TYPE == 0:
B_block_ptr = 0
elif BIAS_TYPE == 1:
B_block_ptr = tl.make_block_ptr(base=B + off_h_k * stride_bh +
batch_index * stride_bz, shape=(seqlen_q, seqlen_k), strides=(
stride_bm, stride_bn), offsets=(0, start_k), block_shape=(
BLOCK_M, BLOCK_N), order=(1, 0))
else:
tl.static_assert(False, f'Unsupported BIAS_TYPE {BIAS_TYPE}')
dk_offset = (off_h_k * stride_dkh + batch_index * stride_dkz +
cu_seqlens_k_start * stride_dkn)
DK += dk_offset
dv_offset = (off_h_k * stride_dvh + batch_index * stride_dvz +
cu_seqlens_k_start * stride_dvk)
DV += dv_offset
dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
qk_scale = sm_scale * 1.44269504089
bias_scale = 1.0 / sm_scale
group_size = num_head_q // num_head_k
q_lo = start_k if CAUSAL else 0
q_hi = seqlen_q
real_seqlen_q = q_hi - q_lo
n_blocks = tl.cdiv(q_hi - q_lo, BLOCK_M)
n_extra_tokens = 0
if real_seqlen_q < BLOCK_M:
n_extra_tokens = BLOCK_M - real_seqlen_q
elif real_seqlen_q % BLOCK_M:
n_extra_tokens = real_seqlen_q % BLOCK_M
is_irregular_q = n_extra_tokens != 0
leading_masked_blocks = 0
trailing_masked_blocks = 0
if CAUSAL:
leading_masked_blocks = tl.cdiv(BLOCK_N, BLOCK_M)
trailing_masked_blocks = 1 if is_irregular_q else 0
else:
leading_masked_blocks = 0
trailing_masked_blocks = 1 if is_irregular_q else 0
n_full_blocks = n_blocks - leading_masked_blocks - trailing_masked_blocks
dropout_scale = 1.0 / (1.0 - dropout_p) if ENABLE_DROPOUT else 1.0
for off_h_q in range(off_h_k * group_size, off_h_k * group_size +
group_size):
off_zh = off_z * num_head_q + off_h_q * 1
if ENABLE_DROPOUT:
batch_philox_offset = (philox_offset_base + off_zh *
max_seqlen_q * max_seqlen_k)
else:
batch_philox_offset = 0
D_ptrs = D + off_zh * max_seqlen_q
l_ptrs = L + off_zh * max_seqlen_q
q_offset = (off_h_q * stride_qh + batch_index * stride_qz +
cu_seqlens_q_start * stride_qm)
q_ptrs = Q + q_offset + offs_m[:, None] * stride_qm + offs_d[None, :
] * stride_qk
do_offset = (off_h_q * stride_oh + batch_index * stride_oz +
cu_seqlens_q_start * stride_om)
do_ptrs = DO + do_offset + offs_m[:, None] * stride_om + offs_d[None, :
] * stride_ok
lo = 0
hi = 0
if leading_masked_blocks > 0:
lo = q_lo
hi = lo + leading_masked_blocks * BLOCK_M
overflow_size = 0 if hi < q_hi else hi - q_hi
dk, dv = bwd_inner_dk_dv(dk, dv, qk_scale, bias_scale, q_ptrs,
stride_qm, kt, vt, B_block_ptr, do_ptrs, stride_om, l_ptrs,
D_ptrs, seqlen_q, seqlen_k, head_dim, start_k, lo, hi,
overflow_size, dropout_p, dropout_scale, philox_seed,
batch_philox_offset, max_seqlen_k, BLOCK_M, BLOCK_DMODEL,
BLOCK_N, False, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD, BIAS_TYPE)
tl.debug_barrier()
if n_full_blocks > 0:
lo = q_lo + leading_masked_blocks * BLOCK_M
hi = lo + n_full_blocks * BLOCK_M
dk, dv = bwd_inner_dk_dv(dk, dv, qk_scale, bias_scale, q_ptrs,
stride_qm, kt, vt, B_block_ptr, do_ptrs, stride_om, l_ptrs,
D_ptrs, seqlen_q, seqlen_k, head_dim, start_k, lo, hi, 0,
dropout_p, dropout_scale, philox_seed, batch_philox_offset,
max_seqlen_k, BLOCK_M, BLOCK_DMODEL, BLOCK_N, True, False,
ENABLE_DROPOUT, PADDED_HEAD, BIAS_TYPE)
if n_full_blocks >= 0 and trailing_masked_blocks > 0:
tl.debug_barrier()
lo = (q_lo + leading_masked_blocks * BLOCK_M + n_full_blocks *
BLOCK_M)
hi = q_hi
overflow_size = lo + trailing_masked_blocks * BLOCK_M - q_hi
dk, dv = bwd_inner_dk_dv(dk, dv, qk_scale, bias_scale, q_ptrs,
stride_qm, kt, vt, B_block_ptr, do_ptrs, stride_om, l_ptrs,
D_ptrs, seqlen_q, seqlen_k, head_dim, start_k, lo, hi,
overflow_size, dropout_p, dropout_scale, philox_seed,
batch_philox_offset, max_seqlen_k, BLOCK_M, BLOCK_DMODEL,
BLOCK_N, False, CAUSAL, ENABLE_DROPOUT, PADDED_HEAD, BIAS_TYPE)
dk = (dk * sm_scale).to(kt.type.element_ty)
dv = dv.to(vt.type.element_ty)
mstore2d(dk, BLOCK_N, BLOCK_DMODEL, o_base=DK, o_start_row=start_k,
o_start_col=0, o_rows=seqlen_k, o_cols=head_dim, stride_row=
stride_dkn, stride_col=stride_dkk)
mstore2d(dv, BLOCK_N, BLOCK_DMODEL, o_base=DV, o_start_row=start_k,
o_start_col=0, o_rows=seqlen_k, o_cols=head_dim, stride_row=
stride_dvk, stride_col=stride_dvn)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_kernel_dk_dv.py |
ee05de93-5deb-4808-b6ef-0af8f24d4eda | triton_fused_vq_attn.py | LouChao98/vqtree | ops/triton_fused_vq_attn.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[
'BLOCK_M'] == 0})
@triton.jit
def _vq_fwd_kernel(Q, K_VQ, K_VQ_CNT, V_VQ, V_VQ_INDEX, Out, L,
softmax_scale, stride_q_b, stride_q_h, stride_q_m, stride_kvq_h,
stride_kvq_c, stride_kvqc_b, stride_kvqc_h, stride_kvqc_n, stride_vvq_b,
stride_vvq_h, stride_vvq_n, stride_vvqi_b, stride_vvqi_h, stride_vvqi_n,
stride_o_b, stride_o_h, stride_o_m, nheads, seqlen_q, codebook_size,
CACHE_KEY_SEQLEN_Q, WINDOW_SIZE: tl.constexpr, BLOCK_HEADDIM: tl.
constexpr, EVEN_M: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.
constexpr, WRITE_LSE: tl.constexpr):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + WINDOW_SIZE + BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF
acc = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_q_b + off_h *
stride_q_h), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_q_m,
1), offsets=(start_m * BLOCK_M + WINDOW_SIZE + BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_HEADDIM), order=(1, 0))
K_VQ_block_ptr = tl.make_block_ptr(base=K_VQ + off_h * stride_kvq_h,
shape=(BLOCK_HEADDIM, codebook_size), strides=(1, stride_kvq_c),
offsets=(0, 0), block_shape=(BLOCK_HEADDIM, BLOCK_N), order=(0, 1))
K_VQC_block_ptr = tl.make_block_ptr(base=K_VQ_CNT + (off_b *
stride_kvqc_b + off_h * stride_kvqc_h + start_m * stride_kvqc_n),
shape=(codebook_size,), strides=(1,), offsets=(0,), block_shape=(
BLOCK_N,), order=(0,))
VVQI_block_ptr = tl.make_block_ptr(base=V_VQ_INDEX + (off_b *
stride_vvqi_b + off_h * stride_vvqi_h + start_m * stride_vvqi_n),
shape=(codebook_size,), strides=(1,), offsets=(0,), block_shape=(
BLOCK_N,), order=(0,))
V_VQ += off_b * stride_vvq_b + off_h * stride_vvq_h
if EVEN_M:
q = tl.load(Q_block_ptr)
else:
q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero')
acc, l_i, m_i = _vq_attn_fwd_inner(acc, l_i, m_i, q, softmax_scale,
K_VQ_block_ptr, K_VQC_block_ptr, VVQI_block_ptr, V_VQ, stride_vvq_n,
codebook_size, BLOCK_HEADDIM, BLOCK_N)
acc = acc / l_i[:, None]
if WRITE_LSE:
l_ptrs = L + off_hb * seqlen_q + offs_m
if EVEN_M:
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
else:
tl.store(l_ptrs, m_i + tl.math.log2(l_i), mask=offs_m < seqlen_q)
out_ptrs = Out + off_b * stride_o_b + off_h * stride_o_h + (offs_m[:,
None] * stride_o_m + offs_d[None, :])
if EVEN_M:
tl.store(out_ptrs, acc)
else:
tl.store(out_ptrs, acc, mask=offs_m[:, None] < seqlen_q)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Softmax",
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_vq_attn.py |
a1c210f1-dae7-43a1-b05d-d236f1f87998 | real_rnn_tie_input_gate.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def fwd_sequential_scan_fused(v, f1, hidden, B, L, C, BLOCK_M: tl.constexpr):
offset_b = tl.program_id(0)
if offset_b >= B:
return
offset_n = tl.program_id(1)
ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + offset_n * BLOCK_M
h1 = tl.zeros([BLOCK_M], dtype=tl.float32)
for _ in range(L):
x0 = tl.load(v + ptr).to(tl.float32)
decay1 = tl.load(f1 + ptr).to(tl.float32)
decay1 = tl.sigmoid(decay1)
h1 = (h1 - x0) * decay1 + x0
tl.store(hidden + ptr, h1.to(hidden.dtype.element_ty))
ptr += C
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py |
4e1ef67f-1573-4d17-8121-3a7f8e148d1b | shape.py | 2niuhe/triton_utils | src/triton_utils/shape.py | 6184906ac3b86dac3ccbfac128ec393ccecde5df | 0 | @triton.jit
def load_full_2d(ptr, sz0: tl.constexpr, sz1: tl.constexpr, stride0=None,
stride1=1):
"""Load 2d block [0,...,sz0-1] x [0,...,sz1-1]"""
stride0 = stride0 or sz1
offs = get_2d_offset(tl.arange(0, sz0), tl.arange(0, sz1), stride0, stride1
)
mask = get_2d_mask(tl.arange(0, sz0), tl.arange(0, sz1), sz0, sz1)
return tl.load(ptr + offs, mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency"
]
} | [
"Apache"
] | https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py |
399b5bd6-779f-481e-b641-a97817c65b63 | k_softmax.py | kimiasa/Experiments | src/ops/triton/k_softmax.py | c4e73bfefd8290695ec52b6386b6b81838ca94a1 | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['K'])
@triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])})
@triton.heuristics({'IS_FP16': lambda nargs: nargs['Y'].dtype == torch.float16}
)
@triton.jit
def _softmax(Y, X, M, stride_ym, stride_yn, stride_xm, stride_xn, stride_m,
K, LOG: tl.constexpr, MASK_TYPE: tl.constexpr, CAUSAL: tl.constexpr,
DEPTH: tl.constexpr, IS_FP16: tl.constexpr):
"""
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time.
"""
m = tl.program_id(0)
n = tl.program_id(1)
k = tl.arange(0, DEPTH)
x_ptrs = X + m * stride_xm + n * stride_xn + k
io_mask = k < K
if CAUSAL:
io_mask = io_mask & (k <= n)
x = tl.load(x_ptrs, mask=io_mask, other=float('-inf'))
if CAUSAL:
off = float('-inf')
off = off.to(x.dtype)
x = tl.where(k > n, off, x)
if MASK_TYPE is not None:
if MASK_TYPE == 'qk':
mask_ptrs = M + n * stride_m + k
elif MASK_TYPE == 'bk':
mask_ptrs = M + m * stride_m + k
add_mask = tl.load(mask_ptrs, io_mask, other=float('-inf'))
x += add_mask
z = x - tl.max(x, axis=0)
if IS_FP16:
z = z.to(tl.float32)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
if LOG:
y = z - tl.log(denom)
else:
y = num / denom
y_ptrs = Y + m * stride_ym + n * stride_yn + k
tl.store(y_ptrs, y, mask=k < K)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/ops/triton/k_softmax.py |
a5f58e11-541c-49c1-aad8-632c55cafadf | mamba_ssm.py | Charlie-XIAO/sparse-vllm | vllm/model_executor/layers/mamba/ops/mamba_ssm.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.heuristics({'HAS_DT_BIAS': lambda args: args['dt_bias_ptr'] is not
None})
@triton.heuristics({'HAS_D': lambda args: args['D_ptr'] is not None})
@triton.heuristics({'HAS_Z': lambda args: args['z_ptr'] is not None})
@triton.heuristics({'HAS_STATE_BATCH_INDICES': lambda args: args[
'state_batch_indices_ptr'] is not None})
@triton.heuristics({'BLOCK_SIZE_DSTATE': lambda args: triton.
next_power_of_2(args['dstate'])})
@triton.jit
def _selective_scan_update_kernel(state_ptr, x_ptr, dt_ptr, dt_bias_ptr,
A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, state_batch_indices_ptr,
batch, nheads, dim, dstate, nheads_ngroups_ratio, stride_state_batch,
stride_state_head, stride_state_dim, stride_state_dstate,
stride_x_batch, stride_x_head, stride_x_dim, stride_dt_batch,
stride_dt_head, stride_dt_dim, stride_dt_bias_head, stride_dt_bias_dim,
stride_A_head, stride_A_dim, stride_A_dstate, stride_B_batch,
stride_B_group, stride_B_dstate, stride_C_batch, stride_C_group,
stride_C_dstate, stride_D_head, stride_D_dim, stride_z_batch,
stride_z_head, stride_z_dim, stride_out_batch, stride_out_head,
stride_out_dim, DT_SOFTPLUS: tl.constexpr, TIE_HDIM: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, HAS_DT_BIAS: tl.constexpr, HAS_D: tl.
constexpr, HAS_Z: tl.constexpr, HAS_STATE_BATCH_INDICES: tl.constexpr,
BLOCK_SIZE_DSTATE: tl.constexpr):
pid_m = tl.program_id(axis=0)
pid_b = tl.program_id(axis=1)
pid_h = tl.program_id(axis=2)
if HAS_STATE_BATCH_INDICES:
state_batch_indices_ptr += pid_b
state_batch_idx = tl.load(state_batch_indices_ptr)
state_ptr += (state_batch_idx * stride_state_batch + pid_h *
stride_state_head)
else:
state_ptr += pid_b * stride_state_batch + pid_h * stride_state_head
x_ptr += pid_b * stride_x_batch + pid_h * stride_x_head
dt_ptr += pid_b * stride_dt_batch + pid_h * stride_dt_head
if HAS_DT_BIAS:
dt_bias_ptr += pid_h * stride_dt_bias_head
A_ptr += pid_h * stride_A_head
B_ptr += (pid_b * stride_B_batch + pid_h // nheads_ngroups_ratio *
stride_B_group)
C_ptr += (pid_b * stride_C_batch + pid_h // nheads_ngroups_ratio *
stride_C_group)
if HAS_Z:
z_ptr += pid_b * stride_z_batch + pid_h * stride_z_head
out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = tl.arange(0, BLOCK_SIZE_DSTATE)
state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[
None, :] * stride_state_dstate)
x_ptrs = x_ptr + offs_m * stride_x_dim
dt_ptrs = dt_ptr + offs_m * stride_dt_dim
if HAS_DT_BIAS:
dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim
if HAS_D:
D_ptr += pid_h * stride_D_head
A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] *
stride_A_dstate)
B_ptrs = B_ptr + offs_n * stride_B_dstate
C_ptrs = C_ptr + offs_n * stride_C_dstate
if HAS_D:
D_ptrs = D_ptr + offs_m * stride_D_dim
if HAS_Z:
z_ptrs = z_ptr + offs_m * stride_z_dim
out_ptrs = out_ptr + offs_m * stride_out_dim
state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None,
:] < dstate), other=0.0)
x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
if not TIE_HDIM:
dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
if HAS_DT_BIAS:
dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl
.float32)
if DT_SOFTPLUS:
dt = softplus(dt)
A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] <
dstate), other=0.0).to(tl.float32)
dA = tl.exp(A * dt[:, None])
else:
dt = tl.load(dt_ptr).to(tl.float32)
if HAS_DT_BIAS:
dt += tl.load(dt_bias_ptr).to(tl.float32)
if DT_SOFTPLUS:
dt = softplus(dt)
A = tl.load(A_ptr).to(tl.float32)
dA = tl.exp(A * dt)
B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32)
C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32)
if HAS_D:
D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
if HAS_Z:
z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
dB = B[None, :] * dt[:, None] if not TIE_HDIM else B * dt
state = state * dA + dB * x[:, None]
tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None,
:] < dstate))
out = tl.sum(state * C[None, :], axis=1)
if HAS_D:
out += x * D
if HAS_Z:
out *= z * tl.sigmoid(z)
tl.store(out_ptrs, out, mask=offs_m < dim)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/mamba/ops/mamba_ssm.py |
bfa71336-4d07-4f50-b917-10440a567a7d | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [2, 4, 8]], key=['BK'])
@triton.jit
def fwd_prepare_wy_repr_kernel_chunk64(k, g, beta, Aw, Au, offsets, indices,
T: tl.constexpr, K: tl.constexpr, H: tl.constexpr, BT: tl.constexpr, BK:
tl.constexpr, BC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
b_Aw = tl.zeros([BC, BC], dtype=tl.float32)
b_Aw2 = tl.zeros([BC, BC], dtype=tl.float32)
b_Aw3 = tl.zeros([BC, BC], dtype=tl.float32)
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BC,), (0,))
p_beta2 = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT +
BC,), (BC,), (0,))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BC,), (0,))
p_beta2 = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT + BC,), (BC,), (0,))
b_beta = tl.load(p_beta, boundary_check=(0,))
b_beta2 = tl.load(p_beta2, boundary_check=(0,))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BC, BK), (1, 0))
p_k2 = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT + BC, i_k * BK), (BC, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BC, BK), (1, 0))
p_k2 = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT + BC, i_k * BK), (BC, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_k2 = tl.load(p_k2, boundary_check=(0, 1))
b_kb2 = (b_k2 * b_beta2[:, None]).to(b_k2.dtype)
b_Aw += tl.dot(b_kb, tl.trans(b_k))
b_Aw2 += tl.dot(b_kb2, tl.trans(b_k2))
b_Aw3 += tl.dot(b_kb2, tl.trans(b_k))
b_Aw = -tl.where(tl.arange(0, BC)[:, None] > tl.arange(0, BC)[None, :],
b_Aw, 0)
b_Aw2 = -tl.where(tl.arange(0, BC)[:, None] > tl.arange(0, BC)[None, :],
b_Aw2, 0)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BC,
), (0,))
p_g2 = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT + BC,),
(BC,), (0,))
else:
p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,),
(BC,), (0,))
p_g2 = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT +
BC,), (BC,), (0,))
b_g = tl.load(p_g, boundary_check=(0,))
b_g2 = tl.load(p_g2, boundary_check=(0,))
b_Au = b_Aw * tl.exp(b_g[:, None] - b_g[None, :])
b_Au2 = b_Aw2 * tl.exp(b_g2[:, None] - b_g2[None, :])
b_Au3 = b_Aw3 * tl.exp(b_g2[:, None] - b_g[None, :])
for i in range(1, BC):
mask = tl.arange(0, BC) == i
b_aw = tl.sum(tl.where(mask[:, None], b_Aw, 0), 0)
b_aw2 = tl.sum(tl.where(mask[:, None], b_Aw2, 0), 0)
b_au = tl.sum(tl.where(mask[:, None], b_Au, 0), 0)
b_au2 = tl.sum(tl.where(mask[:, None], b_Au2, 0), 0)
b_aw = b_aw + tl.sum(b_aw[:, None] * b_Aw, 0) * (tl.arange(0, BC) < i)
b_aw2 = b_aw2 + tl.sum(b_aw2[:, None] * b_Aw2, 0) * (tl.arange(0,
BC) < i)
b_au = b_au + tl.sum(b_au[:, None] * b_Au, 0) * (tl.arange(0, BC) < i)
b_au2 = b_au2 + tl.sum(b_au2[:, None] * b_Au2, 0) * (tl.arange(0,
BC) < i)
b_Aw = tl.where(mask[:, None], b_aw, b_Aw)
b_Aw2 = tl.where(mask[:, None], b_aw2, b_Aw2)
b_Au = tl.where(mask[:, None], b_au, b_Au)
b_Au2 = tl.where(mask[:, None], b_au2, b_Au2)
b_Aw += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_Aw2 += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_Aw3 = -tl.dot(tl.dot(b_Aw2, b_Aw3, allow_tf32=False), b_Aw,
allow_tf32=False)
b_Au += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_Au2 += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_Au3 = -tl.dot(tl.dot(b_Au2, b_Au3, allow_tf32=False), b_Au,
allow_tf32=False)
if HEAD_FIRST:
p_Aw1 = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT, 0), (BC, BC), (1, 0))
p_Aw2 = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT + BC, BC), (BC, BC), (1, 0))
p_Aw3 = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT + BC, 0), (BC, BC), (1, 0))
p_Aw4 = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT, BC), (BC, BC), (1, 0))
p_Au1 = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT, 0), (BC, BC), (1, 0))
p_Au2 = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT + BC, BC), (BC, BC), (1, 0))
p_Au3 = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT + BC, 0), (BC, BC), (1, 0))
p_Au4 = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT, BC), (BC, BC), (1, 0))
else:
p_Aw1 = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BC, BC), (1, 0))
p_Aw2 = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + BC, BC), (BC, BC), (1, 0))
p_Aw3 = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + BC, 0), (BC, BC), (1, 0))
p_Aw4 = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, BC), (BC, BC), (1, 0))
p_Au1 = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, 0), (BC, BC), (1, 0))
p_Au2 = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + BC, BC), (BC, BC), (1, 0))
p_Au3 = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT + BC, 0), (BC, BC), (1, 0))
p_Au4 = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H *
BT, 1), (i_t * BT, BC), (BC, BC), (1, 0))
tl.store(p_Aw1, b_Aw.to(p_Aw1.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Aw2, b_Aw2.to(p_Aw2.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Aw3, b_Aw3.to(p_Aw3.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Aw4, tl.zeros([BC, BC], dtype=tl.float32).to(p_Aw4.dtype.
element_ty), boundary_check=(0, 1))
tl.store(p_Au1, b_Au.to(p_Au1.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Au2, b_Au2.to(p_Au2.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Au3, b_Au3.to(p_Au3.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_Au4, tl.zeros([BC, BC], dtype=tl.float32).to(p_Au4.dtype.
element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Quantization",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py |
382c7df7-c231-4082-9738-316b2060d437 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/retention/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def fused_recurrent_retention_fwd_kernel(q, k, v, o, h0, ht, offsets, scale,
B: tl.constexpr, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V:
tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, REVERSE: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_k, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int64), tl.load(offsets +
i_n + 1).to(tl.int64)
all = T
T = eos - bos
else:
bos, eos = i_n * T, i_n * T + T
all = B * T
b_b = 1 - tl.math.exp2(-5 - i_h * 1.0)
if HEAD_FIRST:
p_q = q + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_k = k + i_nh * T * K + i_k * BK + tl.arange(0, BK)
p_v = v + i_nh * T * V + i_v * BV + tl.arange(0, BV)
p_o = o + (i_k * B * H + i_nh) * T * V + i_v * BV + tl.arange(0, BV)
else:
p_q = q + (bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_k = k + (bos + (T - 1 if REVERSE else 0)
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_v = v + (bos + (T - 1 if REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_o = o + (i_k * all + bos + (T - 1 if REVERSE else 0)
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
mask_k = i_k * BK + tl.arange(0, BK) < K
mask_v = i_v * BV + tl.arange(0, BV) < V
mask_h = mask_k[None, :] & mask_v[:, None]
b_h = tl.zeros([BV, BK], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = h0 + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
b_h += tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
for _ in range(0, T):
b_q = tl.load(p_q, mask=mask_k, other=0).to(tl.float32) * scale
b_k = tl.load(p_k, mask=mask_k, other=0).to(tl.float32)
b_v = tl.load(p_v, mask=mask_v, other=0).to(tl.float32)
b_h = b_b * b_h + b_k[None, :] * b_v[:, None]
b_o = b_h * b_q[None, :]
b_o = tl.sum(b_o, axis=1)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), mask=mask_v)
p_q += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_k += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * K
p_v += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
p_o += (-1 if REVERSE else 1) * (1 if HEAD_FIRST else H) * V
if STORE_FINAL_STATE:
p_ht = ht + i_nh * K * V + (i_k * BK + tl.arange(0, BK)[None, :]
) * V + (i_v * BV + tl.arange(0, BV)[:, None])
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/fused_recurrent.py |
cdef9924-80d7-4787-9792-40f7c0314224 | triton_kernels.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/triton_kernels.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def _triton_third_order_fwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl.
tensor, sh_1_0_ptr: tl.tensor, sh_1_1_ptr: tl.tensor, sh_1_2_ptr: tl.
tensor, sh_2_0_ptr: tl.tensor, sh_2_1_ptr: tl.tensor, sh_2_2_ptr: tl.
tensor, sh_2_3_ptr: tl.tensor, sh_2_4_ptr: tl.tensor, sh_3_0_ptr: tl.
tensor, sh_3_1_ptr: tl.tensor, sh_3_2_ptr: tl.tensor, sh_3_3_ptr: tl.
tensor, sh_3_4_ptr: tl.tensor, sh_3_5_ptr: tl.tensor, sh_3_6_ptr: tl.
tensor, BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr):
sqrt_3 = 3 ** 0.5
block_id = tl.program_id(0)
offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id
x_row_start = x_ptr + offset
y_row_start = y_ptr + offset
z_row_start = z_ptr + offset
x = tl.load(x_row_start, mask=offset < vector_length)
y = tl.load(y_row_start, mask=offset < vector_length)
z = tl.load(z_row_start, mask=offset < vector_length)
sh_1_0 = x * sqrt_3
sh_1_1 = y * sqrt_3
sh_1_2 = z * sqrt_3
sqrt_15 = 15 ** 0.5
sqrt_5 = 5 ** 0.5
sq_x = x * x
sq_y = y * y
sq_z = z * z
sh_2_0 = sqrt_15 * x * z
sh_2_1 = sqrt_15 * x * y
sh_2_2 = sqrt_5 * (sq_y - 0.5 * (sq_x + sq_z))
sh_2_3 = sqrt_15 * y * z
sh_2_4 = 0.5 * sqrt_15 * (sq_z - sq_x)
sqrt_42 = 42 ** 0.5
sqrt_168 = 168 ** 0.5
sqrt_7 = 7 ** 0.5
sh_3_0 = 1 / 6 * sqrt_42 * (sh_2_0 * z + sh_2_4 * x)
sh_3_1 = sqrt_7 * sh_2_0 * y
sh_3_2 = 1 / 8 * sqrt_168 * (4 * sq_y - (sq_x + sq_z)) * x
sh_3_3 = 0.5 * sqrt_7 * y * (2 * sq_y - 3 * (sq_x + sq_z))
sh_3_4 = 1 / 8 * sqrt_168 * z * (4 * sq_y - (sq_x + sq_z))
sh_3_5 = sqrt_7 * (sh_2_4 * y)
sh_3_6 = 1 / 6 * sqrt_42 * (sh_2_4 * z - sh_2_0 * x)
sh_1_0_start = sh_1_0_ptr + offset
sh_1_1_start = sh_1_1_ptr + offset
sh_1_2_start = sh_1_2_ptr + offset
sh_2_0_start = sh_2_0_ptr + offset
sh_2_1_start = sh_2_1_ptr + offset
sh_2_2_start = sh_2_2_ptr + offset
sh_2_3_start = sh_2_3_ptr + offset
sh_2_4_start = sh_2_4_ptr + offset
sh_3_0_start = sh_3_0_ptr + offset
sh_3_1_start = sh_3_1_ptr + offset
sh_3_2_start = sh_3_2_ptr + offset
sh_3_3_start = sh_3_3_ptr + offset
sh_3_4_start = sh_3_4_ptr + offset
sh_3_5_start = sh_3_5_ptr + offset
sh_3_6_start = sh_3_6_ptr + offset
tl.store(sh_1_0_start, sh_1_0, mask=offset < vector_length)
tl.store(sh_1_1_start, sh_1_1, mask=offset < vector_length)
tl.store(sh_1_2_start, sh_1_2, mask=offset < vector_length)
tl.store(sh_2_0_start, sh_2_0, mask=offset < vector_length)
tl.store(sh_2_1_start, sh_2_1, mask=offset < vector_length)
tl.store(sh_2_2_start, sh_2_2, mask=offset < vector_length)
tl.store(sh_2_3_start, sh_2_3, mask=offset < vector_length)
tl.store(sh_2_4_start, sh_2_4, mask=offset < vector_length)
tl.store(sh_3_0_start, sh_3_0, mask=offset < vector_length)
tl.store(sh_3_1_start, sh_3_1, mask=offset < vector_length)
tl.store(sh_3_2_start, sh_3_2, mask=offset < vector_length)
tl.store(sh_3_3_start, sh_3_3, mask=offset < vector_length)
tl.store(sh_3_4_start, sh_3_4, mask=offset < vector_length)
tl.store(sh_3_5_start, sh_3_5, mask=offset < vector_length)
tl.store(sh_3_6_start, sh_3_6, mask=offset < vector_length)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py |
8a07eb48-f388-411a-8bea-4db66f7e583a | test_triton.py | pytorch/xla | test/test_triton.py | 40efdb7b6571ce92797b5ba42619b79c1b147b3e | 0 | @triton.jit
def add_kernel(x_ptr, y_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch/xla/blob/40efdb7b6571ce92797b5ba42619b79c1b147b3e/test/test_triton.py |
73d4eb91-5118-44b5-a7cc-d60243dd1659 | fused_recurrent.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/fused_recurrent.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_recurrent_gsa_inference_kernel(q, k, v, s, g, o, hk0, hv0, hkt,
hvt, scale, K: tl.constexpr, V: tl.constexpr, M: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, NG: tl.constexpr):
i_bh = tl.program_id(0)
i_bg = i_bh // NG
b_s = tl.load(s + i_bg * M + tl.arange(0, M)).to(tl.float32)
b_g = tl.load(g + i_bg * M + tl.arange(0, M)).to(tl.float32)
b_g = tl.exp(b_g)
b_ok = tl.zeros([M], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
o_k = i_k * BK + tl.arange(0, BK)
p_hk0 = hk0 + i_bg * K * M + o_k[None, :] * M + tl.arange(0, M)[:, None
]
mask_k = o_k < K
mask_hk = (tl.arange(0, M) < M)[:, None] & mask_k[None, :]
b_hk = tl.load(p_hk0, mask=mask_hk, other=0.0).to(tl.float32)
b_q = tl.load(q + i_bh * K + o_k, mask=mask_k, other=0.0).to(tl.float32
) * scale
b_k = tl.load(k + i_bg * K + o_k, mask=mask_k, other=0.0).to(tl.float32
)
b_hk = b_hk * b_g[:, None] + b_k[None, :] * b_s[:, None]
b_ok += tl.sum(b_hk * b_q[None, :], axis=1)
if i_bh % NG == 0:
p_hkt = hkt + i_bg * K * M + o_k[None, :] * M + tl.arange(0, M)[
:, None]
tl.store(p_hkt, b_hk.to(p_hkt.dtype.element_ty), mask=mask_hk)
b_qv = tl.softmax(b_ok)
for i_v in range(tl.cdiv(V, BV)):
o_v = i_v * BV + tl.arange(0, BV)
p_hv0 = hv0 + i_bg * M * V + tl.arange(0, M)[None, :] * V + o_v[:, None
]
mask_v = o_v < V
mask_hv = mask_v[:, None] & (tl.arange(0, M) < M)[None, :]
b_hv = tl.load(p_hv0, mask=mask_hv, other=0).to(tl.float32)
b_v = tl.load(v + i_bg * V + o_v, mask=mask_v, other=0).to(tl.float32)
b_hv = b_hv * b_g[None, :] + b_s[None, :] * b_v[:, None]
b_ov = tl.sum(b_hv * b_qv[None, :], axis=1)
tl.store(o + i_bh * V + o_v, b_ov.to(o.dtype.element_ty), mask=mask_v)
if i_bh % NG == 0:
p_hvt = hvt + i_bg * M * V + tl.arange(0, M)[None, :] * V + o_v[
:, None]
tl.store(p_hvt, b_hv.to(p_hvt.dtype.element_ty), mask=mask_hv)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/fused_recurrent.py |
d007ef85-87e3-4ce0-8e46-7b94cff34ce5 | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsOpt)), key=['N_CTX'])
@triton.jit
def _attn_fwd_opt(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl.
constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr,
ENABLE_WS: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
pid = tl.program_id(0)
off_hz = tl.program_id(1)
_attn_fwd_compute(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid,
Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA,
LOOP_SCHEDULE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
858e4bb7-3ed2-490f-84f5-a9677c3f962b | lstm_fw.py | NX-AI/flashrnn | flashrnn/flashrnn/triton_fused/lstm_fw.py | 3fca666a81c8740af4878d7bc5e2a51900e4fe14 | 0 | @triton.autotune(configs, key=['siz_B', 'T', 'B', 'NH', 'DH'])
@triton.jit
def _forward_sequence_kernel(states_initial, Wx, R, b, states_all,
gates_all, T: tl.constexpr, NS: tl.constexpr, B: tl.constexpr, NH: tl.
constexpr, DH: tl.constexpr, NGI: tl.constexpr, NGR: tl.constexpr,
siz_B: tl.constexpr, OUTPUT_GATES: tl.constexpr, DTYPE: tl.constexpr=tl
.float32):
idx_b_NH, idx_b_B = tl.program_id(0), tl.program_id(1)
str_matWx_NH = T * NGI * B * DH
str_matWx_T = NGI * B * DH
str_matStatesAll_NH = (T + 1) * NS * B * DH
str_matStatesAll_T = NS * B * DH
str_matGatesAll_NH = T * NGI * B * DH
str_matGatesAll_T = NGI * B * DH
matHtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 0 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matHtrans = tl.load(matHtrans_initial_ptr).to(tl.float32)
matCtrans_initial_ptr = tl.make_block_ptr(base=states_initial +
idx_b_NH * NS * B * DH + 1 * B * DH, shape=(B, DH), strides=(DH, 1),
offsets=(idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
matCtrans = tl.load(matCtrans_initial_ptr).to(tl.float32)
matHtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 0 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matHtrans_initial_store_ptr, matHtrans.to(DTYPE))
matCtrans_initial_store_ptr = tl.make_block_ptr(base=states_all +
idx_b_NH * str_matStatesAll_NH + 0 * str_matStatesAll_T + 1 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0),
block_shape=(siz_B, DH), order=(0, 1))
tl.store(matCtrans_initial_store_ptr, matCtrans.to(DTYPE))
matRtrans_i_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
0 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_i = tl.load(matRtrans_i_ptr)
matRtrans_f_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
1 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_f = tl.load(matRtrans_f_ptr)
matRtrans_z_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
2 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_z = tl.load(matRtrans_z_ptr)
matRtrans_o_ptr = tl.make_block_ptr(base=R + idx_b_NH * DH * NGR * DH +
3 * DH * DH, shape=(DH, DH), strides=(DH, 1), offsets=(0, 0),
block_shape=(DH, DH), order=(0, 1))
matRtrans_o = tl.load(matRtrans_o_ptr)
vecB_i_ptr = b + idx_b_NH * NGI * DH + 0 * DH + tl.arange(0, DH)
vecB_i = tl.load(vecB_i_ptr)
vecB_f_ptr = b + idx_b_NH * NGI * DH + 1 * DH + tl.arange(0, DH)
vecB_f = tl.load(vecB_f_ptr)
vecB_z_ptr = b + idx_b_NH * NGI * DH + 2 * DH + tl.arange(0, DH)
vecB_z = tl.load(vecB_z_ptr)
vecB_o_ptr = b + idx_b_NH * NGI * DH + 3 * DH + tl.arange(0, DH)
vecB_o = tl.load(vecB_o_ptr)
for idx_t in range(T):
matIxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 0 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matIxtrans = tl.load(matIxtrans_ptr)
matFxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 1 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matFxtrans = tl.load(matFxtrans_ptr)
matZxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 2 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matZxtrans = tl.load(matZxtrans_ptr)
matOxtrans_ptr = tl.make_block_ptr(base=Wx + idx_b_NH *
str_matWx_NH + idx_t * str_matWx_T + 3 * B * DH, shape=(B, DH),
strides=(DH, 1), offsets=(idx_b_B * siz_B, 0), block_shape=(
siz_B, DH), order=(0, 1))
matOxtrans = tl.load(matOxtrans_ptr)
matRhtrans_i = tl.dot(matHtrans.to(DTYPE), matRtrans_i)
matRhtrans_f = tl.dot(matHtrans.to(DTYPE), matRtrans_f)
matRhtrans_z = tl.dot(matHtrans.to(DTYPE), matRtrans_z)
matRhtrans_o = tl.dot(matHtrans.to(DTYPE), matRtrans_o)
matIbar = matIxtrans + matRhtrans_i + vecB_i[None, :]
matFbar = matFxtrans + matRhtrans_f + vecB_f[None, :]
matZbar = matZxtrans + matRhtrans_z + vecB_z[None, :]
matObar = matOxtrans + matRhtrans_o + vecB_o[None, :]
matI = tl.sigmoid(matIbar)
matF = tl.sigmoid(matFbar)
matZ = triton_tanh(matZbar)
matO = tl.sigmoid(matObar)
matCtrans_next = matF * matCtrans + matI * matZ
matHtrans_next = matO * triton_tanh(matCtrans_next)
matHtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 0 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matHtrans_next_ptr, matHtrans_next.to(DTYPE))
matCtrans_next_ptr = tl.make_block_ptr(base=states_all + idx_b_NH *
str_matStatesAll_NH + (idx_t + 1) * str_matStatesAll_T + 1 * B *
DH, shape=(B, DH), strides=(DH, 1), offsets=(idx_b_B * siz_B, 0
), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matCtrans_next_ptr, matCtrans_next.to(DTYPE))
if OUTPUT_GATES:
matGatesItrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
0 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesItrans_ptr, matI.to(DTYPE))
matGatesFtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
1 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesFtrans_ptr, matF.to(DTYPE))
matGatesZtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
2 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesZtrans_ptr, matZ.to(DTYPE))
matGatesOtrans_ptr = tl.make_block_ptr(base=gates_all +
idx_b_NH * str_matGatesAll_NH + idx_t * str_matGatesAll_T +
3 * B * DH, shape=(B, DH), strides=(DH, 1), offsets=(
idx_b_B * siz_B, 0), block_shape=(siz_B, DH), order=(0, 1))
tl.store(matGatesOtrans_ptr, matO.to(DTYPE))
matCtrans = matCtrans_next
matHtrans = matHtrans_next
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Batch-Oriented"
]
} | [
"MIT",
"BSD"
] | https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/lstm_fw.py |
87908dfa-3000-45a4-bd58-e284ba835528 | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=[Config({'BLOCK_SIZE': 512}), Config({'BLOCK_SIZE':
1024}), Config({'BLOCK_SIZE': 2048}), Config({'BLOCK_SIZE': 4096}),
Config({'BLOCK_SIZE': 8192})], key=['K'])
@triton.jit
def _kernel_quantize_fp8_row(A, A_scale, A_fp8, scale_ub, B, M, N, K,
stride_ab, stride_am, stride_an, stride_ak, stride_ob, stride_om,
stride_on, stride_ok, TL_FP8_DTYPE: tl.constexpr, MAX_FP8: tl.constexpr,
EPS: tl.constexpr, CLAMP_MAX: tl.constexpr, BLOCK_SIZE: tl.constexpr,
USE_INT64: tl.constexpr) ->None:
"""Quantize and scale each row.
Scale per row i is computed as MAX_FP8 / max(abs(A[i, :]))
Kernel naively iterates through matrix with [1, BLOCK_SIZE] tiles
in a max pass then scale/quantize pass.
Todo:
* Better tiling schemes.
Args:
A (Tensor): higher precision input tensor of 4 dimension.
A_scale (Tensor): [B * M * N] reciprocal scale tensor per row.
A_fp8 (Tensor): fp8 scaled tensor. A_fp8 = A / a_scale
scale_ub (Tensor): [1] Maximum value allowed for scale.
B (int): Size of dimenion 0
M (int): Size of dimenion 1
N (int): Size of dimenion 2
K (int): Size of dimenion 3
stride_ab (int): Stride of b dimension of A.
stride_am (int): Stride of m dimension of A.
stride_an (int): Stride of n dimension of A.
stride_ak (int): Stride of k dimension of A.
stride_ob (int): Stride of b dimension of output.
stride_om (int): Stride of m dimension of output.
stride_on (int): Stride of n dimension of output.
stride_ok (int): Stride of k dimension of output.
TL_FP8_DTYPE (tl.dtype): Target fp8 datatype.
MAX_FP8 (float): Maxmimum expressible value for FP8.
EPS (float): Epsilon value for numerical stability.
CLAMP_MAX (bool): Whethar to apply scale_ub.
BLOCK_SIZE (int): Block size for reduction.
USE_INT64 (bool): Whether to use int64 indexing for large inputs.
"""
pid = tl.program_id(0)
if USE_INT64:
pid = pid.to(tl.int64)
n_offset = tl.arange(0, BLOCK_SIZE)
a_offset_base = pid // (M * N) * stride_ab + pid % (M * N
) // N * stride_am + pid % (M * N) % N * stride_an
a_fp8_offset_base = pid // (M * N) * stride_ob + pid % (M * N
) // N * stride_om + pid % (M * N) % N * stride_on
cur_max = 0.0
for _k in range(0, tl.cdiv(K, BLOCK_SIZE)):
a = tl.load(A + a_offset_base + n_offset * stride_ak, mask=n_offset <
K, other=0.0)
tile_max = tl.max(tl.abs(a))
cur_max = tl.maximum(tile_max, cur_max)
n_offset += BLOCK_SIZE
if CLAMP_MAX:
ub = tl.load(scale_ub)
cur_max = tl.clamp(cur_max, EPS, ub)
else:
cur_max = tl.maximum(cur_max, EPS)
a_scale = MAX_FP8 / cur_max
tl.store(A_scale + pid, 1.0 / a_scale)
n_offset = tl.arange(0, BLOCK_SIZE)
for _k in range(0, tl.cdiv(K, BLOCK_SIZE)):
a = tl.load(A + a_offset_base + n_offset * stride_ak, mask=n_offset <
K, other=0.0)
a_fp8 = a * a_scale
a_fp8 = tl.clamp(a_fp8, -MAX_FP8, MAX_FP8)
a_fp8.to(TL_FP8_DTYPE)
tl.store(A_fp8 + a_fp8_offset_base + n_offset * stride_ok, a_fp8,
mask=n_offset < K)
n_offset += BLOCK_SIZE
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.