uuid
stringlengths 36
36
| file_name
stringlengths 5
50
| repo_name
stringclasses 110
values | file_path
stringlengths 7
112
| commit_hash
stringclasses 110
values | starcount
int64 0
0
| input
stringlengths 39
33.8k
| category
dict | licenses
sequencelengths 1
2
| github_url
stringlengths 94
193
|
---|---|---|---|---|---|---|---|---|---|
c5940182-0af6-4902-b164-e9d9776bf1ee | _quantize.py | IBM/qattn | qattn/nn/functional/_quantize.py | 07ceda0aceb9afd299d622325944c0c0471827fe | 0 | @triton.jit
def clamp(x: tl.tensor, min_val, max_val) ->tl.tensor:
"""Clamps all elements in `x` into range [min, max].
Args:
x (tl.tensor): the input tensor.
min_val (Number): lower bound of the range.
max_val (Number): upper bound of the range.
Returns:
tl.tensor: the output tensor.
"""
return tl.math.min(tl.math.max(x, min_val), max_val)
| {
"Data Type": [],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_quantize.py |
cb753c39-8d4d-492d-af54-91bdf9d9f3a8 | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsTmaWS)), key=['N_CTX'])
@triton.jit
def _attn_fwd_tma_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr,
STAGE: tl.constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.
constexpr, ENABLE_WS: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
pid = tl.program_id(0)
off_hz = tl.program_id(1)
_attn_fwd_compute_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid,
Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA,
LOOP_SCHEDULE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
9391ce5d-adac-414a-92ce-2ca0faecd484 | copy_strided.py | triton-lang/triton | python/examples/copy_strided.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def kernel(X, stride_xm, Z, stride_zn, BLOCK_M: tl.constexpr, BLOCK_N: tl.
constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * 1
Zs = Z + off_m[:, None] * 1 + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access",
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/examples/copy_strided.py |
04bd883d-3bc8-46e4-9259-5f5271e53a83 | y_7.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_7.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def seventh_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor,
block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.
constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST002 = 3.87298334620742
CONST008 = 11.7655316231354
CONST010 = 16.5555704843566
CONST012 = 20.4939015319192
CONST013 = 20.4939015319192
CONST014 = 22.0740939791422
CONST015 = 23.5310632462709
CONST017 = 36.7901566319036
CONST019 = 38.4260653723485
CONST020 = 38.4260653723485
CONST021 = 38.4260653723485
CONST023 = -4.9916923169903
CONST025 = 47.0621264925418
CONST026 = 50.8329064189723
CONST028 = 55.1852349478554
CONST029 = 56.2781179722634
CONST030 = 56.2781179722634
CONST032 = 66.5558975598707
CONST033 = 75.2994023880668
CONST037 = 101.665812837945
CONST038 = 110.370469895711
CONST041 = 147.160626527614
CONST042 = -1.66389743899677
CONST043 = -9.37968632871057
CONST044 = -1.66389743899677
CONST045 = -220.740939791422
CONST046 = -220.740939791422
CONST047 = -1.60108605718119
CONST048 = -187.593726574211
CONST049 = -9.1975391579759
CONST050 = -1.83950783159518
CONST051 = -1.83950783159518
CONST052 = -4.80325817154356
CONST053 = -147.160626527614
CONST054 = -140.695294930659
CONST055 = -133.111795119741
CONST056 = -125.499003980111
CONST057 = -125.499003980111
CONST058 = -99.833846339806
CONST059 = -87.7389315936062
CONST060 = -76.852130744697
CONST061 = -66.5558975598707
CONST062 = -62.7495019900557
CONST063 = -52.6433589561637
CONST064 = -44.1481879582843
CONST065 = -44.3705983732471
CONST066 = -40.6663251351779
CONST067 = -40.6663251351779
CONST068 = -8.31948719498384
CONST069 = -37.6497011940334
CONST070 = -33.2779487799353
CONST071 = -25.4164532094862
CONST072 = -25.4164532094862
CONST073 = -17.5477863187212
CONST074 = -11.7655316231354
CONST075 = -11.0370469895711
CONST076 = -9.1975391579759
CONST077 = -8.47215106982872
CONST078 = -4.80325817154356
CONST079 = -2.50682661696018
CONST080 = -1.60108605718119
VAR06 = x * x * x * x
VAR07 = x * x * x
VAR08 = x * x
VAR03 = VAR06 * VAR07
VAR04 = VAR07 * VAR07
VAR05 = VAR07 * VAR08
VAR15 = y * y * y * y
VAR16 = y * y * y
VAR17 = y * y
VAR12 = VAR15 * VAR16
VAR13 = VAR16 * VAR16
VAR14 = VAR16 * VAR17
VAR24 = z * z * z * z
VAR25 = z * z * z
VAR26 = z * z
VAR21 = VAR24 * VAR25
VAR22 = VAR25 * VAR25
VAR23 = VAR25 * VAR26
Y00 = (CONST059 * VAR07 * VAR24 - CONST063 * VAR05 * VAR26 - CONST073 *
VAR22 * x + CONST079 * VAR03)
Y01 = y * (CONST029 * VAR23 * x + CONST030 * VAR05 * z + CONST048 *
VAR07 * VAR25)
Y02 = CONST050 * VAR03 + VAR05 * (CONST010 * VAR26 + CONST014 * VAR17
) + VAR07 * (CONST045 * VAR17 * VAR26 - CONST076 * VAR24) + x * (
CONST038 * VAR17 * VAR24 + CONST076 * VAR22)
Y03 = VAR16 * (CONST041 * VAR25 * x + CONST053 * VAR07 * z) + y * (-
CONST064 * VAR05 * z + CONST064 * VAR23 * x)
Y04 = CONST042 * VAR03 + VAR05 * (-CONST042 * VAR26 - CONST070 * VAR17
) + VAR07 * (CONST061 * VAR17 * VAR26 + CONST065 * VAR15 - CONST068 *
VAR24) + x * (-CONST023 * VAR22 - CONST055 * VAR15 * VAR26 +
CONST058 * VAR17 * VAR24)
Y05 = CONST015 * VAR05 * y * z + VAR07 * (CONST025 * VAR25 * y +
CONST057 * VAR16 * z) + x * (CONST015 * VAR23 * y + CONST033 *
VAR14 * z + CONST056 * VAR16 * VAR25)
Y06 = CONST047 * VAR03 + VAR05 * (CONST020 * VAR17 + CONST078 * VAR26
) + VAR07 * (CONST052 * VAR24 + CONST060 * VAR15 - CONST060 * VAR17 *
VAR26) + x * (CONST012 * VAR13 + CONST019 * VAR17 * VAR24 +
CONST060 * VAR15 * VAR26 + CONST080 * VAR22)
Y07 = CONST002 * VAR12 + VAR14 * (CONST066 * VAR08 + CONST067 * VAR26
) + VAR16 * (CONST026 * VAR06 + CONST026 * VAR24 + CONST037 * VAR08 *
VAR26) + y * (CONST071 * VAR06 * VAR26 + CONST072 * VAR08 * VAR24 +
CONST077 * VAR04 + CONST077 * VAR22)
Y08 = CONST047 * VAR21 + VAR23 * (CONST020 * VAR17 + CONST052 * VAR08
) + VAR25 * (CONST052 * VAR06 - CONST060 * VAR08 * VAR17 + CONST060 *
VAR15) + z * (CONST013 * VAR13 + CONST021 * VAR06 * VAR17 +
CONST047 * VAR04 + CONST060 * VAR08 * VAR15)
Y09 = VAR14 * (CONST069 * VAR08 - CONST069 * VAR26) + VAR16 * (-
CONST062 * VAR06 + CONST062 * VAR24) + y * (CONST008 * VAR08 *
VAR24 + CONST074 * VAR04 + CONST074 * VAR06 * VAR26 - CONST074 * VAR22)
Y10 = -CONST042 * VAR21 + VAR23 * (CONST044 * VAR08 + CONST070 * VAR17
) + VAR25 * (CONST032 * VAR08 * VAR17 - CONST065 * VAR15 + CONST068 *
VAR06) + z * (CONST023 * VAR04 + CONST055 * VAR08 * VAR15 -
CONST058 * VAR06 * VAR17)
Y11 = VAR16 * (CONST017 * VAR06 + CONST017 * VAR24 + CONST046 * VAR08 *
VAR26) + y * (CONST028 * VAR06 * VAR26 + CONST028 * VAR08 * VAR24 +
CONST075 * VAR04 + CONST075 * VAR22)
Y12 = CONST051 * VAR21 + VAR23 * (CONST010 * VAR08 + CONST014 * VAR17
) + VAR25 * (CONST045 * VAR08 * VAR17 - CONST049 * VAR06) + z * (
CONST038 * VAR06 * VAR17 + CONST049 * VAR04)
Y13 = y * (CONST043 * VAR04 - CONST043 * VAR22 - CONST054 * VAR06 *
VAR26 + CONST054 * VAR08 * VAR24)
Y14 = (-CONST059 * VAR06 * VAR25 + CONST063 * VAR08 * VAR23 + CONST073 *
VAR04 * z - CONST079 * VAR21)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
tl.store(output_ptr + output_row_offset + 7, Y07, mask=
output_row_offset + 7 < output_numel)
tl.store(output_ptr + output_row_offset + 8, Y08, mask=
output_row_offset + 8 < output_numel)
tl.store(output_ptr + output_row_offset + 9, Y09, mask=
output_row_offset + 9 < output_numel)
tl.store(output_ptr + output_row_offset + 10, Y10, mask=
output_row_offset + 10 < output_numel)
tl.store(output_ptr + output_row_offset + 11, Y11, mask=
output_row_offset + 11 < output_numel)
tl.store(output_ptr + output_row_offset + 12, Y12, mask=
output_row_offset + 12 < output_numel)
tl.store(output_ptr + output_row_offset + 13, Y13, mask=
output_row_offset + 13 < output_numel)
tl.store(output_ptr + output_row_offset + 14, Y14, mask=
output_row_offset + 14 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_7.py |
ad543ac2-d2ac-447e-8a53-c871539d272e | y_2.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_2.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def second_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
CONST_00 = 3.87298334620742
CONST_01 = 2.23606797749979
CONST_02 = 4.47213595499958
g_Y20 = tl.load(sph_grad_ptr + output_row_offset, mask=
output_row_offset < output_numel)
g_Y21 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_Y22 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_Y23 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_Y24 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += (CONST_00 * g_Y20 * z + CONST_00 * g_Y21 * y - CONST_01 * g_Y22 *
x - CONST_00 * g_Y24 * x)
g_y += CONST_00 * g_Y21 * x + CONST_02 * g_Y22 * y + CONST_00 * g_Y23 * z
g_z += (CONST_00 * g_Y20 * x - CONST_01 * g_Y22 * z + CONST_00 * g_Y23 *
y + CONST_00 * g_Y24 * z)
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_2.py |
28f5b112-be8c-4f0b-8096-f9ab7e3bb85c | mhmoe.py | dtadpole/triton-playground | mhmoe.py | 2d317976722d63080133b1bf88b1f0cdec98f831 | 0 | @triton.jit
def _mlp_wide_kernel_bwd_dx(dx, pid_h, pid_b, x_ptr, w1_ptr, w2_ptr, o_ptr,
dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb,
stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob,
stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e,
stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl.
constexpr, BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr):
"""Kernel for computing the mlp_bwd_dx
Z = X @ W1, H = f(Z), O = H @ W2
- X has shape (B, D)
- W1 has shape (D, E)
- W2 has shape (E, D)
- O has shape (B, D)
- dX has shape (B, D)
- dW1 has shape (D, E)
- dW2 has shape (E, D)
- dO has shape (B, D)
"""
TARGET_TYPE = x_ptr.type.element_ty
offs_b = tl.arange(0, BLOCK_SIZE_B)
offs_d = tl.arange(0, D)
offs_e = tl.arange(0, BLOCK_SIZE_E)
x_ptrs = x_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:, None]) *
stride_xb + offs_d[None, :] * stride_xd)
x_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[None, :
] < D)
do_ptrs = do_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:, None]
) * stride_dob + offs_d[None, :] * stride_dod)
do_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[None,
:] < D)
w1_ptrs = w1_ptr + ((pid_h * D + offs_d[:, None]) * stride_w1d + offs_e
[None, :] * stride_w1e)
w2_ptrs = w2_ptr + ((pid_h * E + offs_e[:, None]) * stride_w2e + offs_d
[None, :] * stride_w2d)
dw1_ptrs = dw1_ptr + ((pid_h * D + offs_d[:, None]) * stride_dw1d +
offs_e[None, :] * stride_dw1e)
dw2_ptrs = dw2_ptr + ((pid_h * E + offs_e[:, None]) * stride_dw2e +
offs_d[None, :] * stride_dw2d)
x = tl.load(x_ptrs, mask=x_mask, other=0.0)
do = tl.load(do_ptrs, mask=do_mask, other=0.0)
for e in range(0, tl.cdiv(E, BLOCK_SIZE_E)):
w1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - e *
BLOCK_SIZE_E)
w2_mask = (offs_e[:, None] < E - e * BLOCK_SIZE_E) & (offs_d[None,
:] < D)
w1 = tl.load(w1_ptrs, mask=w1_mask, other=0.0)
w2 = tl.load(w2_ptrs, mask=w2_mask, other=0.0)
z = tl.dot(x, w1, out_dtype=tl.float32)
if ACTIVATION == 'leaky_relu':
h = leaky_relu(z).to(TARGET_TYPE)
elif ACTIVATION == 'silu':
h = silu(z).to(TARGET_TYPE)
elif ACTIVATION == 'sigmoid':
h = tl.sigmoid(z).to(TARGET_TYPE)
else:
h = z.to(TARGET_TYPE)
dh = tl.dot(do, tl.trans(w2), out_dtype=tl.float32)
if ACTIVATION == 'leaky_relu':
dz = (dh * d_leacky_relu(z)).to(TARGET_TYPE)
elif ACTIVATION == 'silu':
dz = (dh * d_silu(z, h)).to(TARGET_TYPE)
elif ACTIVATION == 'sigmoid':
dz = (dh * d_sigmoid(h)).to(TARGET_TYPE)
else:
dz = dh.to(TARGET_TYPE)
dx += tl.dot(dz, tl.trans(w1), out_dtype=tl.float32)
w1_ptrs += BLOCK_SIZE_E * stride_w1e
w2_ptrs += BLOCK_SIZE_E * stride_w2e
dw1_ptrs += BLOCK_SIZE_E * stride_dw1e
dw2_ptrs += BLOCK_SIZE_E * stride_dw2e
return dx
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Activation Functions",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py |
9a05112d-23d7-43f7-92f8-e3e2e3cd9ee0 | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def _bwd_kernel(Q, K, V, S, sm_scale, DO, DQ, DK, DV, M, D, stride_qz,
stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn,
stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, stride_doz,
stride_doh, stride_dom, stride_dok, stride_dqz, stride_dqh, stride_dqm,
stride_dqk, stride_dkz, stride_dkh, stride_dkn, stride_dkk, stride_dvz,
stride_dvh, stride_dvn, stride_dvk, stride_mz, stride_mh, stride_mm,
stride_dz, stride_dh, stride_dm, M_Q, N_CTX, BLOCK_DMODEL: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr, CAUSAL: tl.constexpr, USE_ALIBI: tl.constexpr):
off_h = tl.program_id(0)
off_z = tl.program_id(1)
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_kz + off_h * stride_kh
V += off_z * stride_vz + off_h * stride_vh
DO += off_z * stride_doz + off_h * stride_doh
DQ += off_z * stride_dqz + off_h * stride_dqh
DK += off_z * stride_dkz + off_h * stride_dkh
DV += off_z * stride_dvz + off_h * stride_dvh
if USE_ALIBI:
slope = tl.load(S + off_h)
for start_n in range(0, N_CTX, BLOCK_N):
offs_n_curr = start_n + tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
offs_m = tl.arange(0, BLOCK_M)
k_ptrs = K + (offs_n_curr[:, None] * stride_kn + offs_d[None, :] *
stride_kk)
v_ptrs = V + (offs_n_curr[:, None] * stride_vn + offs_d[None, :] *
stride_vk)
q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
)
dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_d[None, :] *
stride_dqk)
do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_d[None, :] *
stride_dok)
m_ptrs = M + off_z * stride_mz + off_h * stride_mh + offs_m * stride_mm
D_ptrs = D + off_z * stride_dz + off_h * stride_dh + offs_m * stride_dm
dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
if EVEN_N:
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
else:
k = tl.load(k_ptrs, mask=offs_n_curr[:, None] < N_CTX, other=0)
v = tl.load(v_ptrs, mask=offs_n_curr[:, None] < N_CTX, other=0)
k = k.to(tl.float16)
v = v.to(tl.float16)
if CAUSAL:
begin = start_n + M_Q - N_CTX
dq_ptrs += begin * stride_dqm
q_ptrs += begin * stride_qm
do_ptrs += begin * stride_dom
m_ptrs += begin * stride_mm
D_ptrs += begin * stride_dm
else:
begin = 0
for start_m in range(begin, M_Q, BLOCK_M):
offs_m_curr = start_m + offs_m
if EVEN_M:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < M_Q, other=0)
q = q.to(tl.float16)
qk = tl.dot(q, k, trans_b=True)
qk *= sm_scale
if USE_ALIBI & CAUSAL:
qk += causal_alibi_mask(slope, offs_m_curr, offs_n_curr,
M_Q, N_CTX, EVEN_M, EVEN_N)
elif USE_ALIBI:
qk += symmetric_alibi_mask(slope, offs_m_curr, offs_n_curr,
M_Q, N_CTX, EVEN_M, EVEN_N)
elif CAUSAL:
qk += causal_mask(offs_m_curr, offs_n_curr, M_Q, N_CTX,
EVEN_M, EVEN_N)
if EVEN_M:
m = tl.load(m_ptrs)
Di = tl.load(D_ptrs)
do = tl.load(do_ptrs)
else:
m = tl.load(m_ptrs, mask=offs_m_curr < M_Q, other=0)
Di = tl.load(D_ptrs, mask=offs_m_curr < M_Q, other=0)
do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < M_Q, other=0)
do = do.to(tl.float16)
p = tl.exp(qk - m[:, None])
dv += tl.dot(p.to(tl.float16), do, trans_a=True)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, v, trans_b=True)
ds = p * dp * sm_scale
dk += tl.dot(ds.to(tl.float16), q, trans_a=True)
if EVEN_M:
dq = tl.load(dq_ptrs, eviction_policy='evict_last')
else:
dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < M_Q,
other=0, eviction_policy='evict_last')
dq += tl.dot(ds.to(tl.float16), k)
if EVEN_M:
tl.store(dq_ptrs, dq, eviction_policy='evict_last')
else:
tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < M_Q,
eviction_policy='evict_last')
q_ptrs += BLOCK_M * stride_qm
dq_ptrs += BLOCK_M * stride_dqm
do_ptrs += BLOCK_M * stride_dom
m_ptrs += BLOCK_M * stride_mm
D_ptrs += BLOCK_M * stride_dm
offs_d = tl.arange(0, BLOCK_DMODEL)
dv_ptrs = DV + (offs_n_curr[:, None] * stride_dvn + offs_d[None, :] *
stride_dvk)
dk_ptrs = DK + (offs_n_curr[:, None] * stride_dkn + offs_d[None, :] *
stride_dkk)
if EVEN_N:
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
else:
tl.store(dv_ptrs, dv, mask=offs_n_curr[:, None] < N_CTX)
tl.store(dk_ptrs, dk, mask=offs_n_curr[:, None] < N_CTX)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
4db0e91c-60b7-4181-91a9-99d1f7995e1e | flash_triton.py | MayDomine/Burst-Attention | burst_attn/flash_triton.py | b088c554072935074ea9c643de5ee363be5ab1f6 | 0 | @triton.jit
def _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k,
headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.
constexpr):
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
else:
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
elif EVEN_HEADDIM:
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
else:
tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[
None, :] < headdim))
tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[
None, :] < headdim))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py |
743113b7-b1b8-41f3-ae0c-36089061283a | matmul.py | sustcsonglin/flash-linear-attention | fla/ops/utils/matmul.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'HAS_INPUT': lambda args: args['input'] is not None,
'HAS_ALPHA': lambda args: args['alpha'] is not None, 'HAS_BETA': lambda
args: args['beta'] is not None})
@triton.autotune(configs=[triton.Config({'BM': 128, 'BK': 64, 'BN': 256,
'G': 4}, num_stages=3, num_warps=8), triton.Config({'BM': 64, 'BK': 32,
'BN': 256, 'G': 4}, num_stages=4, num_warps=4), triton.Config({'BM':
128, 'BK': 32, 'BN': 128, 'G': 4}, num_stages=4, num_warps=4), triton.
Config({'BM': 128, 'BK': 32, 'BN': 64, 'G': 4}, num_stages=4, num_warps
=4), triton.Config({'BM': 64, 'BK': 32, 'BN': 128, 'G': 4}, num_stages=
4, num_warps=4), triton.Config({'BM': 128, 'BK': 32, 'BN': 32, 'G': 4},
num_stages=4, num_warps=4), triton.Config({'BM': 64, 'BK': 32, 'BN': 32,
'G': 4}, num_stages=5, num_warps=2), triton.Config({'BM': 32, 'BK': 32,
'BN': 64, 'G': 4}, num_stages=5, num_warps=2), triton.Config({'BM': 128,
'BK': 128, 'BN': 256, 'G': 4}, num_stages=3, num_warps=8), triton.
Config({'BM': 256, 'BK': 128, 'BN': 128, 'G': 4}, num_stages=3,
num_warps=8), triton.Config({'BM': 256, 'BK': 128, 'BN': 64, 'G': 4},
num_stages=4, num_warps=4), triton.Config({'BM': 64, 'BK': 128, 'BN':
256, 'G': 4}, num_stages=4, num_warps=4), triton.Config({'BM': 128,
'BK': 128, 'BN': 128, 'G': 4}, num_stages=4, num_warps=4), triton.
Config({'BM': 128, 'BK': 64, 'BN': 64, 'G': 4}, num_stages=4, num_warps
=4), triton.Config({'BM': 64, 'BK': 64, 'BN': 128, 'G': 4}, num_stages=
4, num_warps=4), triton.Config({'BM': 128, 'BK': 64, 'BN': 32, 'G': 4},
num_stages=4, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel(a, b, c, input, alpha, beta, M, N, K, s_am, s_ak, s_bk,
s_bn, s_cm, s_cn, BM: tl.constexpr, BK: tl.constexpr, BN: tl.constexpr,
G: tl.constexpr, ACTIVATION: tl.constexpr, HAS_INPUT: tl.constexpr,
HAS_ALPHA: tl.constexpr, HAS_BETA: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
NM, NN = tl.num_programs(0), tl.num_programs(1)
i_m, i_n = tl.program_id(0), tl.program_id(1)
i_m, i_n = tl.swizzle2d(i_m, i_n, NM, NN, G)
o_am = (i_m * BM + tl.arange(0, BM)) % M
o_bn = (i_n * BN + tl.arange(0, BN)) % N
o_k = tl.arange(0, BK)
p_a = a + (o_am[:, None] * s_am + o_k[None, :] * s_ak)
p_b = b + (o_k[:, None] * s_bk + o_bn[None, :] * s_bn)
b_acc = tl.zeros((BM, BN), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BK)):
b_a = tl.load(p_a, mask=o_k[None, :] < K - k * BK, other=0.0)
b_b = tl.load(p_b, mask=o_k[:, None] < K - k * BK, other=0.0)
b_acc += tl.dot(b_a, b_b, allow_tf32=False)
p_a += BK * s_ak
p_b += BK * s_bk
o_cm = i_m * BM + tl.arange(0, BM)
o_cn = i_n * BN + tl.arange(0, BN)
mask = (o_cm[:, None] < M) & (o_cn[None, :] < N)
b_c = b_acc
if ACTIVATION == 'leaky_relu':
b_c = leaky_relu(b_c)
if HAS_ALPHA:
b_c *= tl.load(alpha)
if HAS_INPUT:
p_i = input + s_cm * o_cm[:, None] + s_cn * o_cn[None, :]
b_i = tl.load(p_i, mask=mask, other=0.0).to(tl.float32)
if HAS_BETA:
b_i *= tl.load(beta)
b_c += b_i
p_c = c + s_cm * o_cm[:, None] + s_cn * o_cn[None, :]
tl.store(p_c, b_c.to(c.dtype.element_ty), mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/matmul.py |
41d7f9ea-8351-46ef-aa36-57ca4cdd7d75 | 06-fused-attention.py | triton-lang/triton | python/tutorials/06-fused-attention.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _attn_bwd_dq(dq, q, K, V, do, m, D, stride_tok, stride_d, H, N_CTX,
BLOCK_M2: tl.constexpr, BLOCK_N2: tl.constexpr, HEAD_DIM: tl.constexpr,
start_m, start_n, num_steps, MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
Di = tl.load(D + offs_m)
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = offs_m[:, None] >= offs_n[None, :]
p = tl.where(mask, p, 0.0)
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
dq += tl.dot(ds, tl.trans(kT))
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access",
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py |
f12f4856-2ba8-4fdf-accd-606056062839 | sparse_linear.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_linear.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.autotune(configs=autotune_configs, key=['row_dense_dim',
'row_sparse_dim', 'col_dim'])
@triton.jit
def input_row_sparse_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr,
expert_ends_ptr, expert_pad_begins_ptr, row_dense_dim: tl.constexpr,
row_sparse_dim: tl.constexpr, col_dim: tl.constexpr, inner_dim: tl.
constexpr, lhs_stride_row: tl.constexpr, lhs_stride_inner: tl.constexpr,
rhs_stride_inner: tl.constexpr, rhs_stride_col: tl.constexpr,
out_stride_row: tl.constexpr, out_stride_col: tl.constexpr, accumulate:
tl.constexpr, block_size_row: tl.constexpr, block_size_col: tl.
constexpr, block_size_inner: tl.constexpr, group_size_row: tl.constexpr):
tl.static_assert(row_sparse_dim % block_size_row == 0)
tl.static_assert(col_dim % block_size_col == 0)
tl.static_assert(inner_dim % block_size_inner == 0)
tl.static_assert(row_dense_dim % row_sparse_dim == 0)
pid_row, pid_col = tl.swizzle2d(tl.program_id(axis=0), tl.program_id(
axis=1), row_dense_dim // block_size_row, col_dim // block_size_col,
group_size_row)
row_dense_offset = pid_row * block_size_row
sparse_index = row_dense_offset // row_sparse_dim
row_sparse_offset = row_dense_offset % row_sparse_dim
col_offset = pid_col * block_size_col
inner_begin = tl.load(expert_ends_ptr + sparse_index - 1, mask=
sparse_index > 0, other=0)
inner_end = tl.load(expert_pad_begins_ptr + sparse_index)
inner_offset = inner_begin // block_size_inner * block_size_inner
row_range = tl.arange(0, block_size_row)[:, None]
col_range = tl.arange(0, block_size_col)[None, :]
inner_range = tl.arange(0, block_size_inner) + inner_offset
lhs_ptr += (row_sparse_offset + row_range) * lhs_stride_row
rhs_ptr += (col_offset + col_range) * rhs_stride_col
out_ptr += (row_dense_offset + row_range) * out_stride_row + (col_offset +
col_range) * out_stride_col
mask = (inner_begin <= inner_range) & (inner_range < inner_end)
out = tl.dot(tl.load(lhs_ptr + inner_range[None, :] * lhs_stride_inner,
mask=mask[None, :], other=0), tl.load(rhs_ptr + inner_range[:, None
] * rhs_stride_inner, mask=mask[:, None], other=0))
for i in range(1, tl.cdiv(inner_end - inner_offset, block_size_inner)):
inner_range += block_size_inner
mask = (inner_begin <= inner_range) & (inner_range < inner_end)
out += tl.dot(tl.load(lhs_ptr + inner_range[None, :] *
lhs_stride_inner, mask=mask[None, :], other=0), tl.load(rhs_ptr +
inner_range[:, None] * rhs_stride_inner, mask=mask[:, None],
other=0))
if accumulate:
out += tl.load(out_ptr)
tl.store(out_ptr, out)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py |
45c898a1-7e30-4f2d-bdf7-18b7ae8654ae | shape.py | 2niuhe/triton_utils | src/triton_utils/shape.py | 6184906ac3b86dac3ccbfac128ec393ccecde5df | 0 | @triton.jit
def load_2d(ptr, sz0: tl.constexpr, sz1: tl.constexpr, n0, n1, max0, max1,
stride0=None, stride1=1):
"""Chunk 2d matrix (defined by ptr) into 2d grid, where each chunk has size (sz0,sz1).
Load the (n0,n1)th chunk. Ie, load [n0*sz0,...,(n0+1)*sz0-1] x [n1*sz1,...,(n1+1)*sz1-1].
"""
stride0 = stride0 or sz1
offs0 = get_1d_offest(sz0, n0)
offs1 = get_1d_offest(sz1, n1)
offs = get_2d_offset(offs0, offs1, stride0, stride1)
mask = get_2d_mask(offs0, offs1, max0, max1)
return tl.load(ptr + offs, mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py |
5558131b-8910-430e-bfcb-0a8e3e1e1a06 | pointwise.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/pointwise.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def triton_add_kernel(input_ptr, other_ptr, out_ptr, numel: tl.constexpr,
block_size: tl.constexpr):
block_start = tl.program_id(axis=0).to(tl.int64) * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < numel
input_ = tl.load(input_ptr + offsets, mask=mask)
other = tl.load(other_ptr + offsets, mask=mask)
tl.store(out_ptr + offsets, input_ + other, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py |
3ba95bb3-c419-4af2-b7ce-b7e0d81c14fd | group_norm.py | chengzeyi/stable-fast | src/sfast/triton/ops/group_norm.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @eval(
"""triton.heuristics({
'BLOCK_SIZE':
lambda kwargs: triton.next_power_of_2(kwargs['cluster_num']),
})"""
)
@eval(
"""triton.heuristics({
'num_warps':
lambda kwargs: max(1, min(16, kwargs['BLOCK_SIZE'] // 128)),
})"""
)
@triton.jit
def group_norm_4d_channels_last_forward_collect_stats_kernel_stage_2(
cluster_mean_ptr, cluster_m2_ptr, cluster_weight_ptr, N, groups,
cluster_num, eps, mean_ptr, rstd_ptr, BLOCK_SIZE: tl.constexpr):
group = tl.program_id(0)
pid_batch = tl.program_id(1)
block = tl.arange(0, BLOCK_SIZE)
mask = block < cluster_num
offset = pid_batch * groups * cluster_num + group * cluster_num + block
cluster_mean = tl.load(cluster_mean_ptr + offset, mask=mask)
cluster_m2 = tl.load(cluster_m2_ptr + offset, mask=mask)
cluster_weight = tl.load(cluster_weight_ptr + offset, mask=mask)
mean, m2, weight = tl.reduce((cluster_mean, cluster_m2, cluster_weight),
0, welford_combine)
var = m2 / weight
rstd = 1.0 / tl.sqrt(var + eps)
offset = pid_batch * groups + group
tl.store(mean_ptr + offset, mean)
tl.store(rstd_ptr + offset, rstd)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/group_norm.py |
5e7d7488-136e-4938-aca1-8982d4c280bf | paged_attn.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/paged_attn.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _inner_paged_attn_unroll_2_kernel(q, k_cache, v_cache, stride_km,
block_base_ptrs, base_offs_kv, alibi_slope, block_offs, seq_len, qkv,
qk_max, exp_sum, BLOCK_SIZE: tl.constexpr, LO: tl.constexpr, HI: tl.
constexpr):
for block_idx in range(LO, HI, 2):
offs_kv_0 = tl.load(block_base_ptrs + block_idx + 0
) * stride_km + base_offs_kv
offs_kv_1 = tl.load(block_base_ptrs + block_idx + 1
) * stride_km + base_offs_kv
k_0 = tl.load(k_cache + offs_kv_0)
k_1 = tl.load(k_cache + offs_kv_1)
v_0 = tl.load(v_cache + offs_kv_0)
v_1 = tl.load(v_cache + offs_kv_1)
_qk_0 = tl.sum((q[None, :] * k_0).to(tl.float32), axis=1)
_qk_1 = tl.sum((q[None, :] * k_1).to(tl.float32), axis=1)
if alibi_slope is not None:
_qk_0 += alibi_slope * ((block_idx + 0) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_1 += alibi_slope * ((block_idx + 1) * BLOCK_SIZE +
block_offs - seq_len + 1)
_qk_max = tl.maximum(tl.max(_qk_0, axis=0), qk_max)
_qk_max = tl.maximum(tl.max(_qk_1, axis=0), _qk_max)
exp_tmp = tl.exp(_qk_0 - _qk_max) + tl.exp(_qk_1 - _qk_max)
_exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(exp_tmp, axis=0)
qkv_sum_tmp = tl.exp(_qk_0[:, None] - _qk_max).to(v_cache.dtype.
element_ty) * v_0 + tl.exp(_qk_1[:, None] - _qk_max).to(v_cache
.dtype.element_ty) * v_1
qkv = (qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + qkv_sum_tmp
) / _exp_sum
qk_max = _qk_max
exp_sum = _exp_sum
return qkv, qk_max, exp_sum
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py |
3db55c15-f4b1-4305-b51b-6e73a82136ea | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not
None, 'USE_INITIAL_STATE': lambda args: args['dh0'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4]], key=['BT', 'BK', 'BV'])
@triton.jit
def chunk_delta_rule_bwd_kernel_dhu(q, k, d, dht, dh0, do, dh, dv, dv2,
offsets, chunk_offsets, scale, T: tl.constexpr, H: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, USE_FINAL_STATE_GRADIENT: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST:
tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_dh = tl.zeros([BK, BV], dtype=tl.float32)
if USE_FINAL_STATE_GRADIENT:
p_dht = tl.make_block_ptr(dht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_dh += tl.load(p_dht, boundary_check=(0, 1))
for i_t in range(NT - 1, -1, -1):
if HEAD_FIRST:
p_dh = tl.make_block_ptr(dh + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_dh = tl.make_block_ptr(dh + ((boh + i_t) * H + i_h) * K * V,
(K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1))
b_dh_tmp = tl.zeros([BK, BV], dtype=tl.float32)
for i_c in range(tl.cdiv(BT, BC) - 1, -1, -1):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_nh * T * K, (K, T), (1, K), (
i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1))
p_k = tl.make_block_ptr(k + i_nh * T * K, (T, K), (K, 1), (
i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0))
p_d = tl.make_block_ptr(d + i_nh * T * K, (K, T), (1, K), (
i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1))
p_dv = tl.make_block_ptr(dv + i_nh * T * V, (T, V), (V, 1),
(i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_nh * T * V, (T, V), (V, 1),
(i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0))
p_dv2 = tl.make_block_ptr(dv2 + i_nh * T * V, (T, V), (V, 1
), (i_t * BT + i_c * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1,
H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT + i_c * BC, i_k * BK), (BC, BK), (1, 0))
p_d = tl.make_block_ptr(d + (bos * H + i_h) * K, (K, T), (1,
H * K), (i_k * BK, i_t * BT + i_c * BC), (BK, BC), (0, 1))
p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V),
(H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV),
(1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V),
(H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV),
(1, 0))
p_dv2 = tl.make_block_ptr(dv2 + (bos * H + i_h) * V, (T, V),
(H * V, 1), (i_t * BT + i_c * BC, i_v * BV), (BC, BV),
(1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_d = tl.load(p_d, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_dv = tl.load(p_dv, boundary_check=(0, 1))
b_dv += tl.dot(b_k, b_dh.to(b_k.dtype), allow_tf32=False)
tl.store(p_dv2, b_dv.to(p_dv.dtype.element_ty), boundary_check=
(0, 1))
b_dh_tmp += tl.dot(b_q, b_do.to(b_q.dtype), allow_tf32=False)
b_dh_tmp -= tl.dot(b_d, b_dv.to(b_q.dtype), allow_tf32=False)
b_dh += b_dh_tmp
if USE_INITIAL_STATE:
p_dh0 = tl.make_block_ptr(dh0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_dh0, b_dh.to(p_dh0.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py |
1e01284c-e8e1-42a2-b16b-7529e9d7459b | cross_entropy_loss.py | tdrussell/qlora-pipe | kernels/cross_entropy_loss.py | 6fb7c8eeae52a0e36c41f00628985f29d8330684 | 0 | @triton.heuristics({'DO_LOGIT_SCALING': lambda args: args['DO_LOGIT_SCALING']})
@triton.jit
def _cross_entropy_forward(logits_ptr, logits_row_stride, loss_ptr,
logsumexp_ptr, labels_ptr, VOCAB_SIZE: tl.constexpr, BLOCK_SIZE: tl.
constexpr, DO_LOGIT_SCALING: tl.constexpr, LOGIT_SCALE: tl.constexpr):
"""
Cross Entropy Loss = 1/n sum [ -yi log(Pi) ]
Pi = exp(xi) / sum(exp(xi))
CE_i = -y log(p) = -y log[ exp(x) / sum(exp(x)) ]
= -y [ x - log[sum(exp(x))] ]
= y * (log[sum(exp(x))] - x)
If y == 0: CE_i = 0
If y == 1: CE_i = logsumexp - x
logsumexp is also stable
Take y = log[sum(exp(x))]
exp(y) = sum(exp(x))
exp(y) = sum(exp(x - c)*exp(c)) Since e^(x-c)*e^c = e^x
exp(y) = exp(c)*sum(exp(x - c))
y = log(exp(c)*sum(exp(x - c)))
y = c + log[sum(exp(x - c))]
This means we can set c = max(x) to make sure
exp(x - c) always is exp(x - max(x)).
This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1.
"""
row_idx = tl.program_id(0)
logits_ptr += row_idx * logits_row_stride.to(tl.int64)
loss_ptr += row_idx
logsumexp_ptr += row_idx
labels_ptr += row_idx
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < VOCAB_SIZE
label_idx = tl.load(labels_ptr).to(tl.int32)
logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float('inf')
).to(tl.float32)
if DO_LOGIT_SCALING:
logits = LOGIT_SCALE * logits
pass
c = tl.max(logits, 0)
logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0))
if label_idx != -100:
x = tl.load(logits_ptr + label_idx).to(tl.float32)
if DO_LOGIT_SCALING:
x = LOGIT_SCALE * x
pass
loss = logsumexp - x
else:
loss = 0.0
tl.store(logsumexp_ptr, logsumexp)
tl.store(loss_ptr, loss)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax",
"Cross Entropy"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/tdrussell/qlora-pipe/blob/6fb7c8eeae52a0e36c41f00628985f29d8330684/kernels/cross_entropy_loss.py |
da158db8-4576-47cc-be40-fffdcc99f725 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/rwkv6/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BS': 16}, num_warps=2), triton.
Config({'BS': 16}, num_warps=4), triton.Config({'BS': 16}, num_warps=8),
triton.Config({'BS': 32}, num_warps=2), triton.Config({'BS': 32},
num_warps=4), triton.Config({'BS': 32}, num_warps=8), triton.Config({
'BS': 64}, num_warps=2), triton.Config({'BS': 64}, num_warps=4), triton
.Config({'BS': 64}, num_warps=8)], key=['S', 'BT'])
@triton.jit
def chunk_rwkv6_fwd_cumsum_kernel(s, oi, oe, offsets, indices, T: tl.
constexpr, H: tl.constexpr, S: tl.constexpr, BT: tl.constexpr, BS: tl.
constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl.constexpr):
i_s, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
m_i = tl.where(o_i[:, None] >= o_i[None, :], 1.0, 0.0)
m_e = tl.where(o_i[:, None] > o_i[None, :], 1.0, 0.0)
if HEAD_FIRST:
p_s = tl.make_block_ptr(s + i_bh * T * S, (T, S), (S, 1), (i_t * BT,
i_s * BS), (BT, BS), (1, 0))
p_oi = tl.make_block_ptr(oi + i_bh * T * S, (T, S), (S, 1), (i_t *
BT, i_s * BS), (BT, BS), (1, 0))
p_oe = tl.make_block_ptr(oe + i_bh * T * S, (T, S), (S, 1), (i_t *
BT, i_s * BS), (BT, BS), (1, 0))
else:
p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H * S, 1),
(i_t * BT, i_s * BS), (BT, BS), (1, 0))
p_oi = tl.make_block_ptr(oi + (bos * H + i_h) * S, (T, S), (H * S,
1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
p_oe = tl.make_block_ptr(oe + (bos * H + i_h) * S, (T, S), (H * S,
1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32)
b_oi = tl.dot(m_i, b_s, allow_tf32=False)
b_oe = tl.dot(m_e, b_s, allow_tf32=False)
tl.store(p_oi, b_oi.to(p_oi.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_oe, b_oe.to(p_oe.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py |
8312c2c2-b06b-4313-beab-8941ed39ad2b | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged_jagged_elementwise_arithmetic_ops(x_ptr, y_ptr, M: tl.constexpr,
N: tl.constexpr, stride_row: tl.constexpr, stride_col: tl.constexpr,
output, thread_block_row_size: tl.constexpr, thread_block_col_size: tl.
constexpr, ops_func: tl.constexpr) ->None:
pid = tl.program_id(0)
num_group_n = (N + thread_block_col_size - 1) // thread_block_col_size
pid_n = pid % num_group_n
pid_m = pid // num_group_n
offset_m = pid_m * thread_block_row_size + tl.arange(0,
thread_block_row_size)
offset_n = pid_n * thread_block_col_size + tl.arange(0,
thread_block_col_size)
mask = (offset_m[:, None] < M) & (offset_n[None, :] < N)
offset = offset_m[:, None] * stride_row + offset_n[None, :] * stride_col
x_ptr += offset
y_ptr += offset
x = tl.load(x_ptr, mask=mask)
y = tl.load(y_ptr, mask=mask)
if ops_func == 'add':
z = tensor_elementwise_add(x, y)
else:
z = tensor_elementwise_mul(x, y)
output += offset
tl.store(output, z, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
e91acd67-c457-4e0b-98c0-f49971d6f6da | simulate.py | Aalanli/AMDGPUExperiments | simulate.py | 2a6fd9e1e81d1916e3d87db4dda930e2fa417527 | 0 | @triton.jit
def simulate_kernel(output, n_steps, seed, p, start, block_size: tl.constexpr):
n_program = tl.num_programs(axis=0)
pid = tl.program_id(axis=0)
block_start = pid * block_size
offsets = block_start + tl.arange(0, block_size)
state = tl.full([block_size], start, dtype=tl.uint32)
for _ in range(n_steps):
this_seed = tl.randint(seed, pid)
rand = tl.rand(this_seed, offsets)
state = tl.where(state == 0, state, tl.where(rand < p, state - 1,
state + 1))
pid += n_program
fall_off = state == 0
prob = tl.sum(fall_off.to(tl.int64))
tl.store(output + tl.program_id(0), prob)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Aalanli/AMDGPUExperiments/blob/2a6fd9e1e81d1916e3d87db4dda930e2fa417527/simulate.py |
caf594ae-0007-42f3-a11f-df8a6e031d08 | triton_fused_local_attn2.py | LouChao98/vqtree | ops/triton_fused_local_attn2.py | 27a53274df7a804bce27dffcce5f5be73f64b6f3 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr, V_block_ptr,
start_m, offs_m, offs_n, SEQLEN_K: tl.constexpr, WINDOW_SIZE: tl.
constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl.
constexpr, STAGE: tl.constexpr):
if STAGE == 1:
lo = start_m * BLOCK_M - WINDOW_SIZE
hi = start_m * BLOCK_M
if lo < 0:
lo = 0
else:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
hi = min(hi, SEQLEN_K)
EVEN_MASK_FREE = EVEN_MN & ((STAGE == 1) | (STAGE == 2))
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_MASK_FREE:
k = tl.load(K_block_ptr)
else:
k = tl.load(K_block_ptr, boundary_check=(1,), padding_option='zero'
)
qk = tl.dot(q, k) * (sm_scale * RCP_LN2)
if STAGE == 3:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk += tl.where(mask, 0, NEGINF)
if not EVEN_MASK_FREE:
qk += tl.where((start_n + offs_n)[None, :] < SEQLEN_K, 0, NEGINF)
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
acc *= alpha[:, None]
if EVEN_MASK_FREE:
v = tl.load(V_block_ptr)
else:
v = tl.load(V_block_ptr, boundary_check=(1,), padding_option='zero'
)
acc += tl.dot(p.to(V_block_ptr.dtype.element_ty), v)
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
return acc, l_i, m_i
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn2.py |
85d4ae25-63a1-4dfe-83dc-04295de1183a | copy.py | chengzeyi/stable-fast | src/sfast/triton/ops/copy.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @eval(
"""triton.heuristics({
'BLOCK_M': lambda kwargs: min(64, triton.next_power_of_2(kwargs['size_inp_0'])),
'BLOCK_N': lambda kwargs: min(64, triton.next_power_of_2(kwargs['size_inp_1'])),
'BATCH_STRIDE_INP_IS_1': lambda kwargs: kwargs['batch_stride_inp'] == 1,
'STRIDE_INP_0_IS_1': lambda kwargs: kwargs['stride_inp_0'] == 1,
'STRIDE_INP_1_IS_1': lambda kwargs: kwargs['stride_inp_1'] == 1,
'BATCH_STRIDE_OUT_IS_1': lambda kwargs: kwargs['batch_stride_out'] == 1,
'STRIDE_OUT_0_IS_1': lambda kwargs: kwargs['stride_out_0'] == 1,
'STRIDE_OUT_1_IS_1': lambda kwargs: kwargs['stride_out_1'] == 1,
})"""
)
@eval(
"""triton.heuristics({
'num_warps': lambda kwargs: max(1, min(16, kwargs['BLOCK_M'] * kwargs['BLOCK_N'] // 32)),
})"""
)
@triton.jit
def copy_3d_kernel(output_ptr, input_ptr, bs, size_inp_0, size_inp_1,
batch_stride_inp, stride_inp_0, stride_inp_1, batch_stride_out,
stride_out_0, stride_out_1, BATCH_STRIDE_INP_IS_1: tl.constexpr,
STRIDE_INP_0_IS_1: tl.constexpr, STRIDE_INP_1_IS_1: tl.constexpr,
BATCH_STRIDE_OUT_IS_1: tl.constexpr, STRIDE_OUT_0_IS_1: tl.constexpr,
STRIDE_OUT_1_IS_1: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.
constexpr):
pid = tl.program_id(0)
pid_batch = tl.program_id(1)
grid_m = tl.cdiv(size_inp_0, BLOCK_M)
grid_n = tl.cdiv(size_inp_1, BLOCK_N)
pid_m = pid // grid_n
pid_n = pid - pid_m * grid_n
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
A = input_ptr + (1 if BATCH_STRIDE_INP_IS_1 else batch_stride_inp
) * pid_batch + (rm[:, None] * (1 if STRIDE_INP_0_IS_1 else
stride_inp_0) + rn[None, :] * (1 if STRIDE_INP_1_IS_1 else
stride_inp_1))
B = output_ptr + (1 if BATCH_STRIDE_OUT_IS_1 else batch_stride_out
) * pid_batch + (rm[:, None] * (1 if STRIDE_OUT_0_IS_1 else
stride_out_0) + rn[None, :] * (1 if STRIDE_OUT_1_IS_1 else
stride_out_1))
mask = (rm < size_inp_0)[:, None] & (rn < size_inp_1)[None, :]
a = tl.load(A, mask=mask)
tl.store(B, a, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/copy.py |
0ba1eec5-4ae0-4399-a078-20d80cf2ab46 | gemm_postop_gelu_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.jit
def gelu(x):
"""
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1 + tanh(kAlpha * (x + 0.044715 * x * x * x)))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py |
1e8d66ac-783c-4e08-bacd-8c8629067011 | softmax_online_v2_spec_rev.py | iclementine/optimize_softmax | softmax_online_v2_spec_rev.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr
):
pid_m = tl.program_id(0)
m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype.
element_ty)
z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty)
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, prev_multiple, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs).to(output_ptr.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
for start_n in range(prev_multiple, N, TILE_N):
n_offsets = start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
new_m = tl.maximum(m, inp)
new_z = tl.exp(m - new_m) * z + tl.exp(inp - new_m)
m = new_m
z = new_z
final_m = tl.max(m, 0)
z = tl.sum(tl.exp(m - final_m) * z)
m = final_m
prev_multiple = prev_multiple_of(N, TILE_N)
for start_n in range(0, TILE_N, TILE_N):
n_offsets = prev_multiple - start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
for start_n in range(TILE_N, N, TILE_N):
n_offsets = prev_multiple - start_n + tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
inp = tl.load(input_ptrs).to(output_ptr.dtype.element_ty)
e = tl.exp(inp - m)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_spec_rev.py |
a454fb7a-be17-4207-aa7f-74a5ac3b9ee8 | attn_qk_int8_per_block_hd128_causal.py | rodjjo/editorium | editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py | 7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694 | 0 | @triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs, K_scale_ptr, V_ptrs,
start_m, BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.
constexpr, STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.
constexpr, N_CTX: tl.constexpr):
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
K_scale_ptr += lo // BLOCK_N
K_ptrs += HEAD_DIM * lo
V_ptrs += HEAD_DIM * lo
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k_mask = offs_n[None, :] < N_CTX - start_n
k = tl.load(K_ptrs, mask=k_mask)
k_scale = tl.load(K_scale_ptr)
qk = tl.dot(q, k).to(tl.float32) * q_scale * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= start_n + offs_n[None, :]
qk = qk + tl.where(mask, 0, -1000000.0)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
acc = acc * alpha[:, None]
v = tl.load(V_ptrs, mask=offs_n[:, None] < N_CTX - start_n)
p = p.to(tl.float16)
acc += tl.dot(p, v, out_dtype=tl.float16)
m_i = m_ij
K_ptrs += BLOCK_N * HEAD_DIM
K_scale_ptr += 1
V_ptrs += BLOCK_N * HEAD_DIM
return acc, l_i, m_i
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py |
4149e45f-a98a-46f3-9ba5-1d39130b86ec | sparse_linear.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_linear.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.autotune(configs=autotune_configs, key=['col_sparse_dim',
'inner_dim', 'sparse_dim'])
@triton.jit
def output_sparse_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr, expert_ends_ptr,
row_dim: tl.constexpr, col_sparse_dim: tl.constexpr, inner_dim: tl.
constexpr, sparse_dim: tl.constexpr, padded_sparse_dim: tl.constexpr,
lhs_stride_row: tl.constexpr, lhs_stride_inner: tl.constexpr,
rhs_stride_inner: tl.constexpr, rhs_stride_col: tl.constexpr,
out_stride_row: tl.constexpr, out_stride_col: tl.constexpr, accumulate:
tl.constexpr, block_size_row: tl.constexpr, block_size_col: tl.
constexpr, block_size_inner: tl.constexpr, group_size_row: tl.constexpr):
tl.static_assert(row_dim % block_size_row == 0)
tl.static_assert(col_sparse_dim % block_size_col == 0)
tl.static_assert(inner_dim % block_size_inner == 0)
tl.static_assert(sparse_dim <= padded_sparse_dim)
pid_row, pid_col = tl.swizzle2d(tl.program_id(axis=0), tl.program_id(
axis=1), row_dim // block_size_row, col_sparse_dim //
block_size_col, group_size_row)
row_offset = pid_row * block_size_row
col_sparse_offset = pid_col * block_size_col
sparse_range = tl.arange(0, padded_sparse_dim)
expert_ends = tl.load(expert_ends_ptr + sparse_range, mask=sparse_range <
sparse_dim, other=row_dim)
sparse_index = tl.sum((expert_ends <= row_offset).to(tl.int64))
if sparse_index == sparse_dim:
return
col_dense_offset = col_sparse_offset + sparse_index * col_sparse_dim
row_range = tl.arange(0, block_size_row)[:, None]
col_range = tl.arange(0, block_size_col)[None, :]
inner_range = tl.arange(0, block_size_inner)
lhs_ptr += (row_offset + row_range) * lhs_stride_row + inner_range[None, :
] * lhs_stride_inner
rhs_ptr += inner_range[:, None] * rhs_stride_inner + (col_dense_offset +
col_range) * rhs_stride_col
out_ptr += (row_offset + row_range) * out_stride_row + (col_sparse_offset +
col_range) * out_stride_col
out = tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr), out_dtype=tl.float32)
for k in range(1, inner_dim // block_size_inner):
lhs_ptr += block_size_inner * lhs_stride_inner
rhs_ptr += block_size_inner * rhs_stride_inner
out += tl.dot(tl.load(lhs_ptr), tl.load(rhs_ptr))
if accumulate:
out += tl.load(out_ptr)
tl.store(out_ptr, out)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access",
"Tiled"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py |
c4bde555-085f-4158-9b7e-4d8f039195cf | mlstm_matmul.py | LukasBluebaum/xLSTM-Triton-CUDA-Implementation | mlstm_matmul.py | 6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b | 0 | @triton.jit
def mlstm_matmul_kernel_df(dF, F, NH: tl.constexpr, S: tl.constexpr):
bh_id = tl.program_id(0)
batch_id = bh_id // NH
head_id = bh_id % NH
batch_offset_f = batch_id * NH * S + head_id * S
offset_f = tl.arange(0, S)
df = tl.load(dF + batch_offset_f + offset_f, offset_f < S)
df = tl.associative_scan(df, 0, scan_add_op)
f = tl.load(F + batch_offset_f + offset_f, offset_f < S)
df = tl.sigmoid(-f) * df
tl.store(dF + batch_offset_f + offset_f, df, offset_f < S)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Recurrent Neural Networks",
"Elementwise Operations",
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py |
5b75cd56-ee47-4e6f-a821-a5604495112c | test_triton_varargs.py | facebookresearch/xformers | tests/test_triton_varargs.py | a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc | 0 | @triton.jit
def sumN(output_ptr, scaling_ptr, *inputs, BLOCK_SIZE: tl.constexpr):
offset = tl.arange(0, BLOCK_SIZE)
output = tl.zeros([BLOCK_SIZE], tl.float32)
scaling: 'VAR_ARGS_ARRAY'
for i in range(len(scaling)):
scaling[i] = tl.load(scaling_ptr + i)
for i in range(2):
for j in range(len(inputs)):
output = output + tl.load(inputs[j] + offset) * scaling[j]
tl.store(output_ptr + offset, output)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD"
] | https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py |
f62e413e-399f-406a-a6fb-44c3e664e00d | dw.py | Forkxz/TritonDeepLearningKernel | kernel/dropconnect/dw.py | add54b6318e8fa5fdbf8c7b47659de9fceaa5691 | 0 | @triton.jit
def dropconnect_dx_kernel(dy_ptr, x_ptr, dw_ptr, seed, M, K, N, stride_dym,
stride_dyn, stride_xm, stride_xk, stride_dm, stride_dk, stride_dn,
stride_dwk, stride_dwn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, ALLOWTF32: tl.constexpr):
"""
dY_m = Y.grad
dO_m = dY_m.view(M,1,N).broadcast_to(M,K,N)
dWD_m = dO_m * x_m_cast
dw_m_cast = dWD_m * D
dw_m = dw_m_cast.sum(dim=0) """
pid_k = tl.program_id(0)
pid_n = tl.program_id(1)
offset_m = 0
offset_k = pid_k * BLOCK_SIZE_K
offset_n = pid_n * BLOCK_SIZE_N
dy_offsets = block_offsets_2d(M, N, stride_dym, stride_dyn, offset_m,
offset_n, BLOCK_SIZE_M, BLOCK_SIZE_N)
x_offsets = block_offsets_2d(M, K, stride_xm, stride_xk, offset_m,
offset_k, BLOCK_SIZE_M, BLOCK_SIZE_K)
d_offsets = block_offsets_3d(M, K, N, stride_dm, stride_dk, stride_dn,
offset_m, offset_k, offset_n, BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N)
dy_offsets = dy_offsets.reshape(BLOCK_SIZE_M, 1, BLOCK_SIZE_N)
x_offsets = x_offsets.reshape(BLOCK_SIZE_M, BLOCK_SIZE_K, 1)
offs_m = tl.arange(0, BLOCK_SIZE_M)
dy_tile = dy_ptr + dy_offsets
x_tile = x_ptr + x_offsets
ASM: tl.constexpr = 'cvt.rna.tf32.f32 $0, $1;'
accumulator = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for m in range(0, tl.cdiv(M, BLOCK_SIZE_M)):
random_masks = tl.random.rand(seed, d_offsets) > 0.5
m_mask = offs_m[:, None, None] < M - m * BLOCK_SIZE_M
dy_load = tl.load(dy_tile, mask=m_mask, other=0.0)
x_load = tl.load(x_tile, mask=m_mask, other=0.0)
dy = tl.where(random_masks, dy_load, 0.0)
wd = tl.where(random_masks, x_load, 0.0)
mul = dy * wd
accumulator += tl.sum(mul, axis=0)
dy_tile += BLOCK_SIZE_M * stride_dym
x_tile += BLOCK_SIZE_M * stride_xm
d_offsets += BLOCK_SIZE_M * stride_dm
dw_offset, dw_mask = block_offsets_2d(K, N, stride_dwk, stride_dwn,
offset_k, offset_n, BLOCK_SIZE_K, BLOCK_SIZE_N, True)
dw_tile = dw_ptr + dw_offset
dw = accumulator.to(dw_tile.dtype.element_ty)
tl.store(dw_tile, dw, mask=dw_mask)
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/dw.py |
0ef41aca-fbb5-44bb-ae73-f97ff2bac77e | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4)], key=['BT', 'BK'])
@triton.jit
def fused_chunk_delta_rule_fwd_kernel(q, k, v, v_new, d, o, initial_state,
final_state, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale,
BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, DK: tl.constexpr,
DV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE:
tl.constexpr, CHECK: tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_h = tl.zeros([BK, BV], dtype=tl.float32)
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, DK), (s_k_t, s_k_d), (0,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (DK, T), (s_k_d, s_k_t), (i_k *
BK, 0), (BK, BT), (0, 1))
p_d = tl.make_block_ptr(d + i_bh * s_k_h, (T, DK), (s_k_t, s_k_d), (0,
i_k * BK), (BT, BK), (1, 0))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, DV), (s_v_t, s_v_d), (0,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (i_bh + i_k * B * H) * s_v_h, (T, DV), (
s_v_t, s_v_d), (0, i_v * BV), (BT, BV), (1, 0))
p_v_new = tl.make_block_ptr(v_new + i_bh * s_v_h, (T, DV), (s_v_t,
s_v_d), (0, i_v * BV), (BT, BV), (1, 0))
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(initial_state + i_bh * DK * DV, (DK, DV), (
DV, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_h = tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
for i in range(0, tl.cdiv(T, BT)):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_d = tl.load(p_d, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_k.dtype)
b_s = tl.dot(b_q, b_k, allow_tf32=False)
b_s = tl.where(m_s, b_s, 0)
b_v_prime = tl.dot(b_d, b_h.to(b_q.dtype), allow_tf32=False)
b_v = b_v - b_v_prime
tl.store(p_v_new, b_v.to(p_v.dtype.element_ty), boundary_check=(0, 1))
b_o = tl.dot(b_s.to(b_q.dtype), b_v.to(b_q.dtype), allow_tf32=False)
if CHECK and i == 0:
b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_k, b_v.to(b_k.dtype), allow_tf32=False)
else:
b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False)
b_h = b_h + tl.dot(b_k, b_v.to(b_k.dtype), allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
p_q = tl.advance(p_q, (BT, 0))
p_k = tl.advance(p_k, (0, BT))
p_v = tl.advance(p_v, (BT, 0))
p_v_new = tl.advance(p_v_new, (BT, 0))
p_o = tl.advance(p_o, (BT, 0))
p_d = tl.advance(p_d, (BT, 0))
if STORE_FINAL_STATE:
p_final = tl.make_block_ptr(final_state + i_bh * DK * DV, (DK, DV),
(DV, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_final, b_h.to(p_final.dtype.element_ty), boundary_check=
(0, 1))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced",
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/fused_chunk.py |
07031280-262d-4433-ae3a-7a2222f9099b | gemm_postop_gelu_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M':
256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4,
'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({
'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32
), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512,
'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages
=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N':
128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'},
num_stages=2, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel_with_block_pointers_batched(a_ptr, b_ptr, c_ptr, B: tl.
constexpr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_az:
tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.constexpr,
stride_bz: tl.constexpr, stride_bk: tl.constexpr, stride_bn: tl.
constexpr, stride_cz: tl.constexpr, stride_cm: tl.constexpr, stride_cn:
tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr):
bid = tl.program_id(axis=0)
pid = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offset_a = bid.to(tl.int64) * stride_az
offset_b = bid.to(tl.int64) * stride_bz
a_block_ptr = tl.make_block_ptr(base=a_ptr + offset_a, shape=(M, K),
strides=(stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr + offset_b, shape=(K, N),
strides=(stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
b = tl.load(b_block_ptr, boundary_check=(0, 1))
accumulator += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
c = gelu(accumulator)
offset_c = bid.to(tl.int64) * stride_cz
c_block_ptr = tl.make_block_ptr(base=c_ptr + offset_c, shape=(M, N),
strides=(stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M,
pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N),
order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Matrix Multiplication",
"Activation Functions"
],
"Memory Access Pattern": [
"Blocked Access",
"Tiled"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py |
1fa87111-7ad0-4833-a0b9-b14824dfba0a | dequant_kernel.py | drisspg/transformer_nuggets | transformer_nuggets/quant/dequant_kernel.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def dequantize(inputs, nf4_lut):
"""Dequantizes the nf4 data to bfloat16"""
return tl.load(nf4_lut + inputs)
| {
"Data Type": [
"bf16"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/quant/dequant_kernel.py |
c184d15a-84bd-4dc9-90d1-69aa80226eab | gemm_postop_addmatrix_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256,
'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode':
'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M':
256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4,
'grf_mode': 'large'}, num_stages=3, num_warps=32), triton.Config({
'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K':
32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32
), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N': 512,
'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'}, num_stages
=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 8, 'BLOCK_SIZE_N':
128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 1, 'grf_mode': 'large'},
num_stages=2, num_warps=4)], key=['M', 'N', 'K'])
@triton.jit
def matmul_kernel_with_block_pointers_batched(a_ptr, b_ptr, c_ptr, d_ptr, B:
tl.constexpr, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr,
stride_az: tl.constexpr, stride_am: tl.constexpr, stride_ak: tl.
constexpr, stride_bz: tl.constexpr, stride_bk: tl.constexpr, stride_bn:
tl.constexpr, stride_cz: tl.constexpr, stride_cm: tl.constexpr,
stride_cn: tl.constexpr, stride_dz: tl.constexpr, stride_dm: tl.
constexpr, stride_dn: tl.constexpr, BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M:
tl.constexpr):
bid = tl.program_id(axis=0)
pid = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
offset_a = bid.to(tl.int64) * stride_az
offset_b = bid.to(tl.int64) * stride_bz
a_block_ptr = tl.make_block_ptr(base=a_ptr + offset_a, shape=(M, K),
strides=(stride_am, stride_ak), offsets=(pid_m * BLOCK_SIZE_M, 0),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr + offset_b, shape=(K, N),
strides=(stride_bk, stride_bn), offsets=(0, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N), order=(1, 0))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_block_ptr, boundary_check=(0, 1))
b = tl.load(b_block_ptr, boundary_check=(0, 1))
accumulator += tl.dot(a, b)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
offset_d = bid.to(tl.int64) * stride_dz
d_block_ptr = tl.make_block_ptr(base=d_ptr + offset_d, shape=(M, N),
strides=(stride_dm, stride_dn), offsets=(pid_m * BLOCK_SIZE_M,
pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N),
order=(1, 0))
d = tl.load(d_block_ptr, boundary_check=(0, 1))
c = accumulator + d
offset_c = bid.to(tl.int64) * stride_cz
c_block_ptr = tl.make_block_ptr(base=c_ptr + offset_c, shape=(M, N),
strides=(stride_cm, stride_cn), offsets=(pid_m * BLOCK_SIZE_M,
pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N),
order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py |
e1abcb7f-d624-4f0f-adf5-1a9471e35d14 | fp8_gemm.py | pytorch/FBGEMM | fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.autotune(configs=MATMUL_CONFIGS, key=['m_key', 'n_key', 'k_key'])
@triton.jit
def _kernel_matmul_fp8_row(A_ptr, B_ptr, C_ptr, M, N, K, m_key, n_key,
k_key, A_scale, B_scale, Bias, stride_am, stride_ak, stride_bn,
stride_bk, stride_cm, stride_cn, dot_out_dtype: tl.constexpr,
allow_tf32: tl.constexpr, fp8_fast_accum: tl.constexpr, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.
constexpr, SPLIT_K: tl.constexpr, USE_BIAS: tl.constexpr, AB_DTYPE: tl.
constexpr, NUM_SMS: tl.constexpr) ->None:
"""Matmul kernel of [M, K] @ [N, K] with row-wise scales
performs swizzled matmul in [BLOCK_M, BLOCK_K] with [BLOCK_K, BLOCK_N] tiles.
Args:
A (TensorWrapper): [M, K] input tensor.
B (TensorWrapper): [N, K] input tensor.
C (TensorWrapper): [M, N] output tensor.
M (int): M dimension of input tensor.
N (int): N dimension of input tensor.
K (int): K dimension of input tensor.
m_key (int): Autotuning key for M dimension of input tensor.
n_key (int): Autotuning key for N dimension of input tensor.
k_key (int): Autotuning key for K dimension of input tensor.
A_scale (TensorWrapper): [M] reciprocal scale tensor per row. A * A_scale = original A.
B_scale (TensorWrapper): [N] reciprocal scale tensor per row. B * B_scale = original B.
Bias (tensorWrapper): [N] Optional bias tensor.
stride_am (int): Stride of M dimension of A.
stride_ak (int): Stride of K dimension of A.
stride_bn (int): Stride of N dimension of B.
stride_bk (int): Stride of K dimension of B.
stride_cm (int): Stride of M dimension of C.
stride_cn (int): Stride of N dimension of C.
dot_out_dtype (torch.dtype): Output type of tensor core.
allow_tf32 (bool): Whether to use TF32 for tensor core.
fp8_fast_accum (bool): Whether to use fast accumulation for tensor core.
BLOCK_M (int): Block size for M dimension.
BLOCK_N (int): Block size for N dimension.
BLOCK_K (int): Block size for K dimension.
GROUP_M (int): Number of groups for M dimension swizzle.
SPLIT_K (int): Number of SM's to launch per row.
USE_BIAS (bool): Whether to use bias.
EVEN_K (bool): Whether K is evenly divisible by BLOCK_K * SPLIT_K.
AB_DTYPE (bool): Wether to cast A and B to C.dtype before tensor core.
"""
start_pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M)
num_pid_n = tl.cdiv(N, BLOCK_N)
k_tiles = tl.cdiv(K, BLOCK_K)
num_tiles = num_pid_m * num_pid_n
tiles_per_SM = num_tiles // NUM_SMS
if start_pid < num_tiles % NUM_SMS:
tiles_per_SM += 1
tile_id = start_pid - NUM_SMS
ki = -1
offs_k_for_mask = tl.arange(0, BLOCK_K)
num_pid_in_group = GROUP_M * num_pid_n
pid_m = 0
pid_n = 0
offs_am = tl.arange(0, BLOCK_M)
offs_bn = tl.arange(0, BLOCK_N)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
for _ in range(0, k_tiles * tiles_per_SM):
ki = tl.where(ki == k_tiles - 1, 0, ki + 1)
if ki == 0:
tile_id += NUM_SMS
group_id = tile_id // num_pid_in_group
first_pid_m = group_id * GROUP_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_M)
pid_m = first_pid_m + tile_id % group_size_m
pid_n = tile_id % num_pid_in_group // group_size_m
start_m = pid_m * BLOCK_M
start_n = pid_n * BLOCK_N
offs_am = start_m + tl.arange(0, BLOCK_M)
offs_bn = start_n + tl.arange(0, BLOCK_N)
offs_am = tl.where(offs_am < M, offs_am, 0)
offs_bn = tl.where(offs_bn < N, offs_bn, 0)
offs_am = tl.max_contiguous(tl.multiple_of(offs_am, BLOCK_M),
BLOCK_M)
offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn, BLOCK_N),
BLOCK_N)
offs_k = ki * BLOCK_K + tl.arange(0, BLOCK_K)
A = A_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak
)
B = B_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn
)
a = tl.load(A, mask=offs_k_for_mask[None, :] < K - ki * BLOCK_K,
other=0.0)
b = tl.load(B, mask=offs_k_for_mask[:, None] < K - ki * BLOCK_K,
other=0.0)
acc = tl.dot(a, b, acc, out_dtype=dot_out_dtype, allow_tf32=allow_tf32)
if ki == k_tiles - 1:
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
a_scale = tl.load(A_scale + rm, mask=rm < M)
b_scale = tl.load(B_scale + rn, mask=rn < N)
scale = a_scale[:, None] * b_scale[None, :]
acc *= scale
if USE_BIAS:
bias = tl.load(Bias + rn, mask=rn < N)
acc += bias[None, :]
acc = acc.to(C_ptr.dtype.element_ty)
C = C_ptr + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc, mask=mask)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype)
| {
"Data Type": [
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py |
2923d2d3-cffb-4408-8fa9-e9794bb2be61 | 05-layer-norm.py | triton-lang/triton | python/tutorials/05-layer-norm.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _layer_norm_bwd_dwdb(DW, DB, FINAL_DW, FINAL_DB, M, N, BLOCK_SIZE_M: tl
.constexpr, BLOCK_SIZE_N: tl.constexpr):
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.0)
db += tl.load(DB + offs, mask=mask, other=0.0)
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/05-layer-norm.py |
d99050b5-9c96-4233-a447-a34ac46ac1f1 | _semi_structured_conversions.py | huyz2023/2by4-pretrain | sparse/_semi_structured_conversions.py | 9e330125dea71e5a3dee235f4efb8869f9e4cdd0 | 0 | @triton.jit
def _sparse_semi_structured_to_dense_kernel(sparse_ptr, meta_reordered_ptr,
dense_ptr, m, k, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
group, interweave = 32, 4
dest_row = row_idx // 32 * 32 + row_idx % 8 * 4 + row_idx % group // 8
if dest_row % 2 == 0:
dest_row_ = (row_idx // 32 * 32 + row_idx % 8 * 4 + row_idx % group //
8 + tl.arange(0, BLOCK_SIZE // 16) % 2)
dest_col_ = tl.arange(0, BLOCK_SIZE // 16) // 2 * 2
index = dest_col_ // 2 * m * 2 + dest_row_ * 2 + dest_col_ % 2
meta = tl.load(meta_reordered_ptr + index, mask=tl.arange(0,
BLOCK_SIZE // 16) < k // 16, other=-float('inf'))
else:
dest_row_ = (row_idx // 32 * 32 + row_idx % 8 * 4 + row_idx % group //
8 - (tl.arange(0, BLOCK_SIZE // 16) + 1) % 2)
dest_col_ = tl.arange(0, BLOCK_SIZE // 16) // 2 * 2 + 1
index = dest_col_ // 2 * m * 2 + dest_row_ * 2 + dest_col_ % 2
meta = tl.load(meta_reordered_ptr + index, mask=tl.arange(0,
BLOCK_SIZE // 16) < k // 16, other=-float('inf'))
meta_20 = (meta & 3) + (row_idx * k + 16 * tl.arange(0, BLOCK_SIZE // 16))
meta_21 = (meta >> 2 & 3) + (row_idx * k + 16 * tl.arange(0, BLOCK_SIZE //
16))
meta_22 = (meta >> 4 & 3) + (row_idx * k + 16 * tl.arange(0, BLOCK_SIZE //
16) + 4)
meta_23 = (meta >> 6 & 3) + (row_idx * k + 16 * tl.arange(0, BLOCK_SIZE //
16) + 4)
meta_24 = (meta >> 8 & 3) + (row_idx * k + 16 * tl.arange(0, BLOCK_SIZE //
16) + 8)
meta_25 = (meta >> 10 & 3) + (row_idx * k + 16 * tl.arange(0,
BLOCK_SIZE // 16) + 8)
meta_26 = (meta >> 12 & 3) + (row_idx * k + 16 * tl.arange(0,
BLOCK_SIZE // 16) + 12)
meta_27 = (meta >> 14 & 3) + (row_idx * k + 16 * tl.arange(0,
BLOCK_SIZE // 16) + 12)
row0 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16), mask=tl.arange(0, BLOCK_SIZE // 16) < k // 16,
other=-float('inf'))
row1 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 1, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row2 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 2, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row3 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 3, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row4 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 4, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row5 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 5, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row6 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 6, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
row7 = tl.load(sparse_ptr + row_idx * k // 2 + 8 * tl.arange(0,
BLOCK_SIZE // 16) + 7, mask=tl.arange(0, BLOCK_SIZE // 16) < k //
16, other=-float('inf'))
tl.store(dense_ptr + meta_20, row0, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_21, row1, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_22, row2, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_23, row3, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_24, row4, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_25, row5, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_26, row6, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
tl.store(dense_ptr + meta_27, row7, mask=tl.arange(0, BLOCK_SIZE // 16) <
k // 16)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Transposed Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Memory-Bound"
]
} | [
"BSD"
] | https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/_semi_structured_conversions.py |
0fff7131-f1c9-4588-b8ed-1ddc2f4a032b | mas_triton.py | uthree/tts_impl | src/tts_impl/functional/monotonic_align/mas_triton.py | a9d9a66b26a0de4694e502dedfdff7be26d99ddd | 0 | @triton.jit
def _maximum_path(path, value, t_x, t_y, B, T, S, max_neg_val, BLOCK_SIZE_X:
tl.constexpr):
batch = tl.program_id(axis=0)
path += batch * T * S
value += batch * T * S
x_length = tl.load(t_x + batch)
y_length = tl.load(t_y + batch)
offs_prev = tl.arange(0, BLOCK_SIZE_X)
init = tl.where(offs_prev == 0, tl.load(value), max_neg_val)
tl.store(value + offs_prev * S, init, mask=offs_prev < x_length)
for j in range(1, y_length, 1):
v_cur = tl.load(value + offs_prev * S + (j - 1), mask=offs_prev <
x_length, other=max_neg_val)
v_prev = tl.load(value + (offs_prev - 1) * S + (j - 1), mask=(0 <
offs_prev) & (offs_prev < x_length), other=max_neg_val)
v = tl.maximum(v_cur, v_prev) + tl.load(value + offs_prev * S + j,
mask=offs_prev < x_length)
tl.store(value + offs_prev * S + j, v, mask=offs_prev < x_length)
index = x_length - 1
for j in range(y_length - 1, -1, -1):
tl.store(path + index * S + j, 1)
if index > 0:
v_left = tl.load(value + index * S + j - 1)
v_leftdown = tl.load(value + (index - 1) * S + j - 1)
if v_left < v_leftdown:
index += -1
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/uthree/tts_impl/blob/a9d9a66b26a0de4694e502dedfdff7be26d99ddd/src/tts_impl/functional/monotonic_align/mas_triton.py |
031b0c3d-1c3a-4448-87a8-dbf9c6afc3b9 | cross_entropy_loss.py | tdrussell/qlora-pipe | kernels/cross_entropy_loss.py | 6fb7c8eeae52a0e36c41f00628985f29d8330684 | 0 | @triton.heuristics({'DO_LOGIT_SCALING': lambda args: args['DO_LOGIT_SCALING']})
@triton.jit
def _cross_entropy_backward(logits_ptr, logits_row_stride, dloss_ptr,
dloss_row_stride, logsumexp_ptr, labels_ptr, VOCAB_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr, DO_LOGIT_SCALING: tl.constexpr, LOGIT_SCALE:
tl.constexpr):
"""
CE_i = -y log(P) = y * (log[sum(exp(x))] - x)
dC/dx = d/dx (y * log[sum(exp(x))] - x * y)
From https://en.wikipedia.org/wiki/LogSumExp
d/dx logsumexp = exp(x) / sum(exp(x)) = softmax(x)
dC/dx = y * exp(x) / sum(exp(x)) - d/dx (x * y)
dC/dx = y * exp[ log[exp(x) / sum(exp(x))] ] using x = exp(log(x)) trick
dC/dx = y * exp[x - logsumexp] - d/dx (x * y)
If y == 0: dC/dx = 0
If y == 1 and x == label: dC/dlabel = exp[x - logsumexp] - 1
If y == 1 and x != label: dC/dx = exp[x - logsumexp]
"""
row_idx = tl.program_id(0)
block_idx = tl.program_id(1)
logits_ptr += row_idx * logits_row_stride.to(tl.int64)
dloss_ptr += row_idx * dloss_row_stride
col_offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = col_offsets < VOCAB_SIZE
label_idx = tl.load(labels_ptr + row_idx).to(tl.int32)
if label_idx != -100:
dloss = tl.load(dloss_ptr)
else:
dloss = 0.0
x = tl.load(logits_ptr + col_offsets, mask=mask, other=-float('inf')).to(tl
.float32)
if DO_LOGIT_SCALING:
x = LOGIT_SCALE * x
pass
logsumexp = tl.load(logsumexp_ptr + row_idx)
y = tl.exp(x - logsumexp)
y = tl.where(col_offsets == label_idx, y - 1.0, y)
if DO_LOGIT_SCALING:
y = LOGIT_SCALE * y
pass
tl.store(logits_ptr + col_offsets, dloss * y, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/tdrussell/qlora-pipe/blob/6fb7c8eeae52a0e36c41f00628985f29d8330684/kernels/cross_entropy_loss.py |
0269e925-e1dc-4528-851c-455458868afd | sb_varlen_fwd.py | shawntan/stickbreaking-attention | stickbreaking_attention/sb_varlen/sb_varlen_fwd.py | 8dd32ad5e58f0ee0232fd4782dc53d354ff8d283 | 0 | @triton.jit
def load_kv(K_blk_ptrs, V_blk_ptrs, N_mask, NO_N_MASK, D_mask, NO_D_MASK:
tl.constexpr):
if NO_D_MASK:
if NO_N_MASK:
k = tl.load(K_blk_ptrs)
v = tl.load(V_blk_ptrs)
else:
k = tl.load(K_blk_ptrs, mask=N_mask[:, None])
v = tl.load(V_blk_ptrs, mask=N_mask[:, None])
else:
mask = N_mask[:, None] & D_mask[None, :]
k = tl.load(K_blk_ptrs, mask=mask)
v = tl.load(V_blk_ptrs, mask=mask)
return k, v
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py |
d237c845-8362-47df-b7e1-eaff14494c4f | mhmoe.py | dtadpole/triton-playground | mhmoe.py | 2d317976722d63080133b1bf88b1f0cdec98f831 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E':
32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_B': 64,
'BLOCK_SIZE_E': 32}, num_stages=2, num_warps=4), triton.Config({
'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 64}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_B': 64, 'BLOCK_SIZE_E': 64}, num_stages=2,
num_warps=4)], key=['H', 'B', 'D', 'E'])
@triton.jit
def mlp_wide_kernel_bwd2(x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr,
dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb, stride_xd,
stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od,
stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e,
stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr):
"""Kernel for computing the mlp
Z = X @ W1, H = f(Z), O = H @ W2
- X has shape (B, D)
- W1 has shape (D, E)
- W2 has shape (E, D)
- O has shape (B, D)
- dX has shape (B, D)
- dW1 has shape (D, E)
- dW2 has shape (E, D)
- dO has shape (B, D)
"""
pid = tl.program_id(axis=0)
pid_x_w = 0
batch_groups_e = tl.cdiv(E, BLOCK_SIZE_E)
batch_groups_b = tl.cdiv(B, BLOCK_SIZE_B)
idx = pid % (batch_groups_e + batch_groups_b)
pid_h = pid // (batch_groups_e + batch_groups_b)
TARGET_TYPE = x_ptr.type.element_ty
offs_b = tl.arange(0, BLOCK_SIZE_B)
offs_d = tl.arange(0, D)
offs_e = tl.arange(0, BLOCK_SIZE_E)
if idx >= batch_groups_e:
pid_b = idx - batch_groups_e
dx_ptrs = dx_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:,
None]) * stride_dxb + offs_d[None, :] * stride_dxd)
dx_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[
None, :] < D)
dx = tl.zeros((BLOCK_SIZE_B, D), dtype=tl.float32)
dx = _mlp_wide_kernel_bwd_dx(dx, pid_h, pid_b, x_ptr, w1_ptr,
w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D, E,
stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e,
stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd,
stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob,
stride_dod, BLOCK_SIZE_B, BLOCK_SIZE_E, ACTIVATION)
tl.store(dx_ptrs, dx.to(TARGET_TYPE), mask=dx_mask)
else:
pid_e = idx
dw1_ptrs = dw1_ptr + ((pid_h * D + offs_d[:, None]) * stride_dw1d +
(pid_e * BLOCK_SIZE_E + offs_e[None, :]) * stride_dw1e)
dw1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - pid_e *
BLOCK_SIZE_E)
dw2_ptrs = dw2_ptr + ((pid_h * E + pid_e * BLOCK_SIZE_E + offs_e[:,
None]) * stride_dw2e + offs_d[None, :] * stride_dw2d)
dw2_mask = (offs_e[:, None] < E - pid_e * BLOCK_SIZE_E) & (offs_d[
None, :] < D)
dw1 = tl.zeros((D, BLOCK_SIZE_E), dtype=tl.float32)
dw2 = tl.zeros((BLOCK_SIZE_E, D), dtype=tl.float32)
dw1, dw2 = _mlp_wide_kernel_bwd_dw1w2(dw1, dw2, pid_h, pid_e, x_ptr,
w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B,
D, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e,
stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd,
stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob,
stride_dod, BLOCK_SIZE_B, BLOCK_SIZE_E, ACTIVATION)
tl.store(dw1_ptrs, dw1.to(TARGET_TYPE), mask=dw1_mask)
tl.store(dw2_ptrs, dw2.to(TARGET_TYPE), mask=dw2_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py |
a20e0fd8-9354-44cf-b64c-1476d6ce796a | fused_norm_gate.py | sustcsonglin/flash-linear-attention | fla/modules/fused_norm_gate.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'RECOMPUTE_OUTPUT': lambda args: args['Y'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['N', 'HAS_DRESIDUAL', 'STORE_DRESIDUAL',
'IS_RMS_NORM', 'HAS_BIAS'])
@triton.jit
def layer_norm_bwd_kernel(X, O, W, B, Y, DY, DX, DO, DW, DB, DRESIDUAL,
DRESIDUAL_IN, Mean, Rstd, stride_x_row, stride_y_row, stride_dy_row,
stride_dx_row, stride_dres_row, stride_dres_in_row, M, N, eps,
rows_per_program, IS_RMS_NORM: tl.constexpr, BLOCK_N: tl.constexpr,
HAS_DRESIDUAL: tl.constexpr, STORE_DRESIDUAL: tl.constexpr, HAS_WEIGHT:
tl.constexpr, HAS_BIAS: tl.constexpr, RECOMPUTE_OUTPUT: tl.constexpr):
row_block_id = tl.program_id(0)
row_start = row_block_id * rows_per_program
cols = tl.arange(0, BLOCK_N)
mask = cols < N
X += row_start * stride_x_row
O += row_start * stride_x_row
if HAS_DRESIDUAL:
DRESIDUAL += row_start * stride_dres_row
if STORE_DRESIDUAL:
DRESIDUAL_IN += row_start * stride_dres_in_row
DY += row_start * stride_dy_row
DX += row_start * stride_dx_row
DO += row_start * stride_dx_row
if RECOMPUTE_OUTPUT:
Y += row_start * stride_y_row
if HAS_WEIGHT:
w = tl.load(W + cols, mask=mask).to(tl.float32)
dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
if RECOMPUTE_OUTPUT and HAS_BIAS:
b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
if HAS_BIAS:
db = tl.zeros((BLOCK_N,), dtype=tl.float32)
row_end = min((row_block_id + 1) * rows_per_program, M)
for row in range(row_start, row_end):
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
o = tl.load(O + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
if not IS_RMS_NORM:
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
xhat = tl.where(mask, xhat, 0.0)
y = xhat * w if HAS_WEIGHT else xhat
if HAS_BIAS:
y = y + b
if RECOMPUTE_OUTPUT:
tl.store(Y + cols, y, mask=mask)
sigmoid_o = tl.sigmoid(o)
do = dy * y * (sigmoid_o + o * sigmoid_o * (1 - sigmoid_o))
dy = dy * o * sigmoid_o
wdy = dy
if HAS_WEIGHT:
wdy = dy * w
dw += dy * xhat
if HAS_BIAS:
db += dy
if not IS_RMS_NORM:
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
else:
c1 = tl.sum(xhat * wdy, axis=0) / N
dx = (wdy - xhat * c1) * rstd
if HAS_DRESIDUAL:
dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
dx += dres
if STORE_DRESIDUAL:
tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
tl.store(DX + cols, dx, mask=mask)
tl.store(DO + cols, do, mask=mask)
X += stride_x_row
O += stride_x_row
if HAS_DRESIDUAL:
DRESIDUAL += stride_dres_row
if STORE_DRESIDUAL:
DRESIDUAL_IN += stride_dres_in_row
if RECOMPUTE_OUTPUT:
Y += stride_y_row
DY += stride_dy_row
DX += stride_dx_row
DO += stride_dx_row
if HAS_WEIGHT:
tl.store(DW + row_block_id * N + cols, dw, mask=mask)
if HAS_BIAS:
tl.store(DB + row_block_id * N + cols, db, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_norm_gate.py |
d5f138ed-d6b1-4147-92bd-4d9d2bf1a5a2 | parallel.py | sustcsonglin/flash-linear-attention | fla/ops/simple_gla/parallel.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'NV': lambda args: triton.cdiv(args['V'], args['BV']),
'OUTPUT_ATTENTIONS': lambda args: args['attn'] is not None})
@triton.jit
def parallel_simple_gla_fwd_kernel(q, k, v, g, o, attn, s_k_h, s_k_t, s_v_h,
s_v_t, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr, BK: tl.
constexpr, BV: tl.constexpr, NV: tl.constexpr, OUTPUT_ATTENTIONS: tl.
constexpr):
i_kv, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_k, i_v = i_kv // NV, i_kv % NV
p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + (i_k * B * H + i_bh) * T * T, (T, T),
(T, 1), (i_t * BT, 0), (BT, BS), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i_s in range(0, i_t * BT, BS):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (1, s_k_t), (i_k *
BK, i_s), (BK, BS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, 1), (i_s,
i_v * BV), (BS, BV), (1, 0))
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0,))
b_gn = tl.load(g + i_bh * T + min(i_s + BS, T) - 1)
b_gp = tl.load(g + i_bh * T + i_s - 1) if i_s % BT > 0 else 0.0
b_kg = (b_k * tl.exp(b_gn - b_g)).to(b_k.dtype)
b_s = tl.dot(b_q, b_kg, allow_tf32=False)
if i_s > 0:
b_o = b_o * tl.exp(b_gn - b_gp)
b_o += tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)
if OUTPUT_ATTENTIONS:
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
p_a = tl.advance(p_a, (0, BS))
tl.debug_barrier()
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,))
b_gq = tl.load(p_g, boundary_check=(0,))
b_o *= tl.exp(b_gq)[:, None]
if OUTPUT_ATTENTIONS:
p_a = tl.make_block_ptr(attn + (i_k * B * H + i_bh) * T * T, (T, T),
(T, 1), (i_t * BT, i_t * BT), (BT, BS), (1, 0))
o_q = i_t * BT + tl.arange(0, BT)
o_k = i_t * BT + tl.arange(0, BS)
for i_s in range(i_t * BT, min((i_t + 1) * BT, T), BS):
p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (1, s_k_t), (i_k *
BK, i_s), (BK, BS), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, 1), (i_s,
i_v * BV), (BS, BV), (1, 0))
p_gk = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_s,), (BS,), (0,))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_gk = tl.load(p_gk, boundary_check=(0,))
m_s = o_q[:, None] >= o_k[None, :]
b_s = tl.where(m_s, tl.dot(b_q, b_k, allow_tf32=False) * tl.exp(
b_gq[:, None] - b_gk[None, :]), 0)
b_o += tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
if OUTPUT_ATTENTIONS:
tl.store(p_a, b_s.to(p_a.dtype.element_ty), boundary_check=(0, 1))
p_a = tl.advance(p_a, (0, BS))
o_k += BS
p_o = tl.make_block_ptr(o + (i_bh + B * H * i_k) * s_v_h, (T, V), (
s_v_t, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/parallel.py |
24e6a887-0de6-4d46-817d-187228e035a0 | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gsa/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.jit
def chunk_gsa_fwd_k_kernel_intra(v, g, o, A, offsets, indices, T: tl.
constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl.constexpr, BT: tl.
constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl.constexpr, NG: tl
.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_bg = i_bh // NG
i_b, i_hq = i_bh // HQ, i_bh % HQ
i_h = i_hq // NG
i_t, i_i = i_c // NC, i_c % NC
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
o_v = i_v * BV + tl.arange(0, BV)
m_v = o_v < V
if i_t * BT + i_i * BC > T:
return
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + min(i_t *
BT + i_i * BC, T) * V + o_v, BV), BV)
else:
p_g = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + min(i_t * BT +
i_i * BC, T)) * H * V + i_h * V + o_v, BV), BV)
b_gn = tl.load(p_gn, mask=m_v, other=0)
b_o = tl.zeros([BC, BV], dtype=tl.float32)
for i_j in range(0, i_i):
if HEAD_FIRST:
p_A = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (
i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
p_v = tl.make_block_ptr(v + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_gv = tl.make_block_ptr(g + i_bg * T * V, (T, V), (V, 1), (i_t *
BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_A = tl.make_block_ptr(A + (bos * HQ + i_hq) * BT, (T, BT), (
HQ * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
p_gv = tl.make_block_ptr(g + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT + i_j * BC, i_v * BV), (BC, BV), (1, 0))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_vg = (b_v * tl.exp(b_gn[None, :] - b_gv)).to(b_v.dtype)
b_A = tl.load(p_A, boundary_check=(0, 1))
b_o += tl.dot(b_A, b_vg)
b_g = tl.load(p_g, boundary_check=(0, 1))
b_o *= tl.exp(b_g - b_gn[None, :])
o_i = tl.arange(0, BC)
if HEAD_FIRST:
o_A = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC)
) * BT + i_i * BC
else:
o_A = (bos + i_t * BT + i_i * BC + tl.arange(0, BC)
) * HQ * BT + i_hq * BT + i_i * BC
m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
if HEAD_FIRST:
p_v = tl.max_contiguous(tl.multiple_of(v + i_bg * T * V + (i_t *
BT + i_i * BC + j) * V + o_v, BV), BV)
p_gv = tl.max_contiguous(tl.multiple_of(g + i_bg * T * V + (i_t *
BT + i_i * BC + j) * V + o_v, BV), BV)
else:
p_v = tl.max_contiguous(tl.multiple_of(v + (bos + i_t * BT +
i_i * BC + j) * H * V + i_h * V + o_v, BV), BV)
p_gv = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT +
i_i * BC + j) * H * V + i_h * V + o_v, BV), BV)
b_A = tl.load(A + o_A + j, mask=m_A, other=0)
b_v = tl.load(p_v, mask=m_v, other=0).to(tl.float32)
b_gv = tl.load(p_gv, mask=m_v, other=0).to(tl.float32)
b_vg = b_v[None, :] * tl.exp(b_g - b_gv[None, :])
b_o += tl.where(o_i[:, None] >= j, b_A[:, None] * b_vg, 0.0)
if HEAD_FIRST:
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT +
i_i * BC, i_v * BV), (BC, BV), (1, 0))
else:
p_o = tl.make_block_ptr(o + (bos * HQ + i_hq) * V, (T, V), (HQ * V,
1), (i_t * BT + i_i * BC, i_v * BV), (BC, BV), (1, 0))
b_o += tl.load(p_o, boundary_check=(0, 1))
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py |
7a21ee06-6228-4111-8cca-8a85dc7cb97b | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def jagged2_to_padded_dense_kernel(x_ptr, lengths_ptr, offsets_ptr,
output_dense_ptr, stride_b, stride_m, stride_n, max_length, BLOCK_M: tl
.constexpr, BLOCK_N: tl.constexpr):
pid_batch = tl.program_id(2)
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
begin = tl.load(offsets_ptr + pid_batch)
seqlen = tl.load(lengths_ptr + pid_batch)
seqlen = tl.minimum(seqlen, max_length)
if seqlen == 0:
return
offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
x_ptrs = x_ptr + begin + offs_m[:, None] * seqlen + offs_n[None, :]
x = tl.load(x_ptrs, mask=(offs_m[:, None] < seqlen) & (offs_n[None, :] <
seqlen))
out_ptrs = output_dense_ptr + pid_batch * stride_b + offs_m[:, None
] * stride_m + offs_n[None, :] * stride_n
tl.store(out_ptrs, x, mask=(offs_m[:, None] < seqlen) & (offs_n[None, :
] < seqlen))
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [
"Strided Access",
"Blocked Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
1bcc5bf5-0201-4106-9714-19fc02c62ac9 | p_loss_kernels.py | BobMcDear/attorch | attorch/p_loss_kernels.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.autotune(configs=element_wise_kernel_configs(), key=['size'])
@triton.jit
def p_loss_backward_kernel(output_grad_pointer, input_pointer,
target_pointer, input_grad_pointer, target_grad_pointer, size, p_loss:
tl.constexpr, reduction: tl.constexpr, BLOCK_SIZE: tl.constexpr):
"""
Calculates the input gradient of the mean absolute error or
mean squared error.
Args:
output_grad_pointer: Pointer to the error's output gradients.
The output gradients must be a scalar or of shape [size].
input_pointer: Pointer to the input.
The input must be of shape [size].
target_pointer: Pointer to the target.
The target must be of shape [size].
input_grad_pointer: Pointer to a container the input's gradients are written to.
The container must be of shape [size].
target_grad_pointer: Pointer to a container the target's gradients are written to.
The container must be of shape [size].
size: Number of elements in the input and target.
p_loss: p-norm used to compute the error whose gradient is calculated.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output whose gradient is calculated.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
BLOCK_SIZE: Block size.
"""
pid = tl.program_id(axis=0)
offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offset < size
output_grad_mask = None
if reduction == 'none':
output_grad_pointer += offset
output_grad_mask = mask
input = tl.load(input_pointer + offset, mask=mask).to(tl.float32)
target = tl.load(target_pointer + offset, mask=mask).to(tl.float32)
output_grad = tl.load(output_grad_pointer, mask=output_grad_mask).to(tl
.float32)
if p_loss == 1:
input_grad = tl.where(target <= input, 1, -1)
elif p_loss == 2:
input_grad = 2 * (input - target)
if reduction == 'mean':
input_grad /= size
input_grad *= output_grad
tl.store(input_grad_pointer + offset, input_grad, mask=mask)
tl.store(target_grad_pointer + offset, -input_grad, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/p_loss_kernels.py |
1e4b6bf4-c537-4599-9578-cf786aca1b31 | single.py | shawntan/scattermoe | scattermoe/kernels/single.py | 63b76a2f5f28c052fb4cd7c34479a54158354052 | 0 | @triton.jit
def _single2scatter(X_ptr, stride_xm, stride_xk, W_ptr, stride_we,
stride_wk, stride_wn, Y_ptr, stride_ym, stride_yn, expert_idxs_ptr,
FAN_OUT: tl.constexpr, K: tl.constexpr, N: tl.constexpr, E: tl.
constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ACC_TYPE: tl.
constexpr):
pid0 = tl.program_id(axis=0)
pid1 = tl.program_id(axis=1)
N_block_id = pid0
if FAN_OUT == 1:
in_idx = pid1
else:
in_idx = 0
out_idx = pid1
K_block = tl.arange(0, BLOCK_K)
N_block = tl.max_contiguous(tl.multiple_of((N_block_id * BLOCK_N + tl.
arange(0, BLOCK_N)) % N, BLOCK_N), BLOCK_N)
E_idx = tl.load(expert_idxs_ptr + pid1)
X_blk_ptrs = X_ptr + in_idx * stride_xm + K_block[:, None] * stride_xk
W_blk_ptrs = W_ptr + E_idx * stride_we + K_block[:, None
] * stride_wk + N_block[None, :] * stride_wn
acc = tl.zeros((1, BLOCK_N), dtype=ACC_TYPE)
for K_block_id in range(0, tl.cdiv(K, BLOCK_K)):
x = tl.load(X_blk_ptrs)
w = tl.load(W_blk_ptrs)
acc += tl.sum(x * w, axis=0)[None, :]
X_blk_ptrs += BLOCK_K * stride_xk
W_blk_ptrs += BLOCK_K * stride_wk
Y_blk_ptrs = Y_ptr + out_idx * stride_ym + N_block[None, :] * stride_yn
tl.store(Y_blk_ptrs, acc)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/single.py |
4a649b03-1ebe-4b88-b47e-1980960298a0 | W4A16_MatMul.py | MDK8888/GPTFast | GPTFast/Kernels/Triton/GPTQ/Matmul/W4A16_MatMul.py | 926b7553cfbaf1ec2a702a4bfb477132ce98c2e1 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_M': 16, 'BLOCK_N': 32,
'BLOCK_K': 64}, num_stages=2, num_warps=2), triton.Config({'BLOCK_M':
16, 'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=3, num_warps=4), triton.
Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=2,
num_warps=2), triton.Config({'BLOCK_M': 16, 'BLOCK_N': 32, 'BLOCK_K':
64}, num_stages=3, num_warps=4)], key=[])
@triton.jit
def int4_matmul_kernel_3d(a_ptr, b_ptr, c_ptr, scales_ptr, zeros_ptr,
stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn,
stride_scales_n, stride_scales_g, stride_zeros_n, stride_zeros_g, M, N,
K, groupsize, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl
.constexpr):
m_id = tl.program_id(0)
n_id = tl.program_id(1)
k_id = tl.program_id(2)
offs_am = m_id * BLOCK_M + tl.arange(0, BLOCK_M)
offs_bn = n_id * BLOCK_N + tl.arange(0, BLOCK_N)
offs_k = k_id * BLOCK_K + tl.arange(0, BLOCK_K)
mask_m = offs_am < M
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_ptrs = b_ptr + offs_k[:, None] // 8 * stride_bk + offs_bn[None, :
] * stride_bn
scales_ptrs = scales_ptr + offs_bn[None, :] * stride_scales_n + offs_k[
:, None] // groupsize * stride_scales_g
zeros_ptrs = zeros_ptr + offs_bn[None, :] * stride_zeros_n + offs_k[:, None
] // groupsize * stride_zeros_g
a = tl.load(a_ptrs, mask=mask_m[:, None], other=0.0)
b = tl.load(b_ptrs)
scales = tl.load(scales_ptrs)
zeros = tl.load(zeros_ptrs)
mask = tl.arange(0, BLOCK_K)[:, None] % 8
b = b >> mask * 4 & 15
b = (b - 8) * scales + zeros
acc = tl.dot(a, b.to(tl.float16))
c_ptrs = c_ptr + (offs_am[:, None] * stride_cm + offs_bn[None, :] *
stride_cn)
tl.atomic_add(c_ptrs, acc.to(tl.float16))
| {
"Data Type": [
"int8"
],
"Functionality": [
"Matrix Multiplication",
"Quantization"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/MDK8888/GPTFast/blob/926b7553cfbaf1ec2a702a4bfb477132ce98c2e1/GPTFast/Kernels/Triton/GPTQ/Matmul/W4A16_MatMul.py |
ecca4ed7-6a0c-4e5e-845f-085135f8040b | ops.py | shawntan/scattermoe | scattermoe/kernels/ops.py | 63b76a2f5f28c052fb4cd7c34479a54158354052 | 0 | @triton.autotune(configs=_config_XtY(), key=['M', 'N', 'K'])
@triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] ==
0, 'NO_N_MASK': lambda args: args['N'] % args['BLOCK_N'] == 0})
@triton.jit
def _groupXtY(DY_ptr, stride_dym, stride_dyk, X_ptr, stride_xm, stride_xn,
DW_ptr, stride_dwe, stride_dwk, stride_dwn, expert_offsets_ptr, M, K:
tl.constexpr, N: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.
constexpr, BLOCK_K: tl.constexpr, ACC_TYPE: tl.constexpr, allow_tf32:
tl.constexpr, NO_K_MASK: tl.constexpr, NO_N_MASK: tl.constexpr):
pid0 = tl.program_id(axis=0)
pid1 = tl.program_id(axis=1)
num0 = tl.num_programs(0)
num1 = tl.num_programs(1)
pid0, pid1 = tl.swizzle2d(pid0, pid1, num0, num1, 4)
K_BLOCK_COUNT = tl.cdiv(K, BLOCK_K)
E_idx = pid0 // K_BLOCK_COUNT
K_block_id = pid0 % K_BLOCK_COUNT
N_block_id = pid1
if E_idx == 0:
start_idx = 0
else:
start_idx = tl.load(expert_offsets_ptr + E_idx - 1).to(tl.int32)
end_idx = tl.load(expert_offsets_ptr + E_idx).to(tl.int32)
if end_idx > start_idx:
M_block = tl.max_contiguous(start_idx + tl.arange(0, BLOCK_M), BLOCK_M)
K_block = K_block_id * BLOCK_K + tl.arange(0, BLOCK_K)
K_mask = K_block < K
K_block = tl.max_contiguous(tl.multiple_of(K_block % K, BLOCK_K),
BLOCK_K)
N_block = N_block_id * BLOCK_N + tl.arange(0, BLOCK_N)
N_mask = N_block < N
N_block = tl.max_contiguous(tl.multiple_of(N_block % N, BLOCK_N),
BLOCK_N)
M_idxs = M_block
xt_blk_ptrs = X_ptr + K_block[:, None] * stride_xn + M_idxs[None, :
] * stride_xm
dy_blk_ptrs = DY_ptr + M_idxs[:, None] * stride_dym + N_block[None, :
] * stride_dyk
acc = tl.zeros((BLOCK_K, BLOCK_N), dtype=ACC_TYPE)
iters = tl.cdiv(end_idx - start_idx, BLOCK_M)
for i in range(0, iters):
M_mask = i * BLOCK_M + M_block < end_idx
if NO_K_MASK:
xt = tl.load(xt_blk_ptrs, mask=M_mask[None, :])
else:
xt = tl.load(xt_blk_ptrs, mask=K_mask[:, None] & M_mask[
None, :])
if NO_N_MASK:
dy = tl.load(dy_blk_ptrs, mask=M_mask[:, None])
else:
dy = tl.load(dy_blk_ptrs, mask=M_mask[:, None] & N_mask[
None, :])
xt_blk_ptrs += BLOCK_M * stride_xm
dy_blk_ptrs += BLOCK_M * stride_dym
acc += tl.dot(xt, dy, out_dtype=ACC_TYPE, allow_tf32=allow_tf32)
DW_blk_ptrs = DW_ptr + E_idx * stride_dwe + K_block[:, None
] * stride_dwk + N_block[None, :] * stride_dwn
acc = acc.to(DW_blk_ptrs.dtype.element_ty)
tl.store(DW_blk_ptrs, acc, mask=K_mask[:, None] & N_mask[None, :])
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py |
87fdafb5-a70c-4967-b40f-bb70db039d6c | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def apply_dropout(x, offsets, p, seed, mask_val=float('-inf')):
rand = tl.rand(seed, offsets)
scale = 1 / (1 - p)
return tl.where(rand > p, x * scale, mask_val)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
0f85b722-d956-44c9-8c49-e98decba5b86 | y_3.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_3.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def third_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor,
sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.
constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr,
output_stride: tl.constexpr):
block_id = tl.program_id(0)
coord_stride = 3
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset <
output_numel)
g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask=
output_row_offset + 1 < output_numel)
g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask=
output_row_offset + 2 < output_numel)
g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask=
output_row_offset + 3 < output_numel)
g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask=
output_row_offset + 4 < output_numel)
g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask=
output_row_offset + 5 < output_numel)
g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask=
output_row_offset + 6 < output_numel)
CONST002 = 6.48074069840786
CONST005 = 12.9614813968157
CONST007 = -3.96862696659689
CONST008 = -12.5499003980111
CONST009 = -10.2469507659596
CONST010 = -7.93725393319377
CONST011 = -6.27495019900557
CONST012 = -5.1234753829798
CONST013 = -4.8605555238059
CONST014 = -3.24037034920393
CONST015 = -1.62018517460197
VAR08 = x * x
VAR17 = y * y
VAR26 = z * z
g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask=
coord_row_offset + 1 < coord_numel)
g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask=
coord_row_offset + 2 < coord_numel)
g_x += (CONST008 * g_6 * x * z - CONST009 * g_1 * y * z + CONST009 *
g_5 * x * y + CONST010 * g_3 * x * y + CONST014 * g_4 * x * z + g_0 *
(CONST011 * VAR08 - CONST011 * VAR26) + g_2 * (CONST002 * VAR17 +
CONST013 * VAR08 + CONST015 * VAR26))
g_y += (CONST005 * g_2 * x * y + CONST005 * g_4 * y * z - CONST009 *
g_1 * x * z + g_3 * (CONST007 * VAR08 + CONST007 * VAR26 - CONST010 *
VAR17) + g_5 * (CONST012 * VAR08 - CONST012 * VAR26))
g_z += (-CONST008 * g_0 * x * z - CONST009 * g_1 * x * y - CONST009 *
g_5 * y * z + CONST010 * g_3 * y * z + CONST014 * g_2 * x * z + g_4 *
(CONST002 * VAR17 + CONST013 * VAR26 + CONST015 * VAR08) + g_6 * (
CONST011 * VAR08 - CONST011 * VAR26))
tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset <
coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask=
coord_row_offset + 1 < coord_numel)
tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask=
coord_row_offset + 2 < coord_numel)
| {
"Data Type": [],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_3.py |
08bb518c-fbda-4604-9322-dfac40c7b421 | apply_token_bitmask_inplace_triton.py | mlc-ai/xgrammar | python/xgrammar/kernels/apply_token_bitmask_inplace_triton.py | 49655f4e5992a0c00183c9bd43d78b49c4e668ab | 0 | @triton.jit
def apply_token_bitmask_inplace_kernel(logits_ptr, bitmask_ptr, indices_ptr,
num_rows, vocab_size, bitmask_size, NUM_SMS: tl.constexpr, BLOCK_SIZE:
tl.constexpr):
pid = tl.program_id(0)
num_blocks = tl.cdiv(vocab_size, BLOCK_SIZE)
for work_id in tl.range(pid, num_rows * num_blocks, NUM_SMS):
block_offset = work_id % num_blocks * BLOCK_SIZE
row_id = work_id // num_blocks
batch_id = tl.load(indices_ptr + row_id)
offsets = block_offset + tl.arange(0, BLOCK_SIZE)
bitmask_offsets = block_offset // 32 + tl.arange(0, BLOCK_SIZE // 32)
vocab_mask = offsets < vocab_size
packed_bitmask_mask = bitmask_offsets < bitmask_size
packed_bitmask = tl.load(bitmask_ptr + batch_id * bitmask_size +
bitmask_offsets, packed_bitmask_mask)
bitmask = packed_bitmask[:, None] >> tl.arange(0, 32)[None, :] & 1 == 0
bitmask = bitmask.reshape(BLOCK_SIZE)
tl.store(logits_ptr + batch_id * vocab_size + offsets, -float('inf'
), vocab_mask & bitmask)
| {
"Data Type": [],
"Functionality": [
"Softmax",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"Apache"
] | https://github.com/mlc-ai/xgrammar/blob/49655f4e5992a0c00183c9bd43d78b49c4e668ab/python/xgrammar/kernels/apply_token_bitmask_inplace_triton.py |
e7510983-2f6d-4849-8c80-611f47f2a9cc | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gla/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps) for BK in [32, 64] for BV in [64, 128] for num_warps in [2,
4, 8]], key=['BT'])
@triton.jit
def chunk_gla_bwd_kernel_inter(q, k, v, h, g, do, dh, dq, dk, dq2, dk2, dg,
offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.
constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.
constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
if HEAD_FIRST:
p_gk = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + i_bh * T * K + (min(T,
i_t * BT + BT) - 1) * K + o_k, BK), BK)
else:
p_gk = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1
), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + min(T, i_t * BT +
BT) - 1) * H * K + i_h * K + o_k, BK), BK)
b_gn = tl.load(p_gn, mask=m_k, other=0)
b_dq = tl.zeros([BT, BK], dtype=tl.float32)
b_dk = tl.zeros([BT, BK], dtype=tl.float32)
b_dgk = tl.zeros([BK], dtype=tl.float32)
for i_v in range(tl.cdiv(V, BV)):
if HEAD_FIRST:
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (
i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + i_bh * NT * K * V + i_t * K * V, (V,
K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + i_bh * NT * K * V + i_t * K * V,
(V, K), (1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
else:
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H *
V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (V, K), (
1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (V, K),
(1, V), (i_v * BV, i_k * BK), (BV, BK), (0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_do = tl.load(p_do, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_dh = tl.load(p_dh, boundary_check=(0, 1))
b_dgk += tl.sum(b_h * b_dh, axis=0)
b_dq += tl.dot(b_do, b_h.to(b_do.dtype))
b_dk += tl.dot(b_v, b_dh.to(b_v.dtype))
b_dgk *= tl.exp(b_gn)
b_dq *= scale
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_dq = b_dq * tl.exp(b_gk)
b_dk = b_dk * tl.exp(b_gn[None, :] - b_gk)
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT,
i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1),
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_dgk += tl.sum(b_dk * b_k, axis=0)
b_dq += tl.load(p_dq, boundary_check=(0, 1))
b_dk += tl.load(p_dk, boundary_check=(0, 1))
b_dg = b_q * b_dq - b_k * b_dk
b_dg = b_dg - tl.cumsum(b_dg, axis=0) + tl.sum(b_dg, axis=0)[None, :
] + b_dgk[None, :]
if HEAD_FIRST:
p_dq = tl.make_block_ptr(dq2 + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk2 + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
else:
p_dq = tl.make_block_ptr(dq2 + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dk = tl.make_block_ptr(dk2 + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_dg = tl.make_block_ptr(dg + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py |
735840ce-064d-4ebd-af98-c2aba99b4e2a | copy.py | chengzeyi/stable-fast | src/sfast/triton/ops/copy.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @eval(
"""triton.heuristics({
'BLOCK_M': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_0'])),
'BLOCK_N': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_1'])),
'BLOCK_K': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_2'])),
'BATCH_STRIDE_INP_IS_1': lambda kwargs: kwargs['batch_stride_inp'] == 1,
'STRIDE_INP_0_IS_1': lambda kwargs: kwargs['stride_inp_0'] == 1,
'STRIDE_INP_1_IS_1': lambda kwargs: kwargs['stride_inp_1'] == 1,
'STRIDE_INP_2_IS_1': lambda kwargs: kwargs['stride_inp_2'] == 1,
'BATCH_STRIDE_OUT_IS_1': lambda kwargs: kwargs['batch_stride_out'] == 1,
'STRIDE_OUT_0_IS_1': lambda kwargs: kwargs['stride_out_0'] == 1,
'STRIDE_OUT_1_IS_1': lambda kwargs: kwargs['stride_out_1'] == 1,
'STRIDE_OUT_2_IS_1': lambda kwargs: kwargs['stride_out_2'] == 1,
})"""
)
@eval(
"""triton.heuristics({
'num_warps': lambda kwargs: max(1, min(16, kwargs['BLOCK_M'] * kwargs['BLOCK_N'] * kwargs['BLOCK_K'] // 32)),
})"""
)
@triton.jit
def copy_4d_kernel(output_ptr, input_ptr, bs, size_inp_0, size_inp_1,
size_inp_2, batch_stride_inp, stride_inp_0, stride_inp_1, stride_inp_2,
batch_stride_out, stride_out_0, stride_out_1, stride_out_2,
BATCH_STRIDE_INP_IS_1: tl.constexpr, STRIDE_INP_0_IS_1: tl.constexpr,
STRIDE_INP_1_IS_1: tl.constexpr, STRIDE_INP_2_IS_1: tl.constexpr,
BATCH_STRIDE_OUT_IS_1: tl.constexpr, STRIDE_OUT_0_IS_1: tl.constexpr,
STRIDE_OUT_1_IS_1: tl.constexpr, STRIDE_OUT_2_IS_1: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr):
pid = tl.program_id(0)
pid_batch = tl.program_id(1)
grid_m = tl.cdiv(size_inp_0, BLOCK_M)
grid_n = tl.cdiv(size_inp_1, BLOCK_N)
grid_k = tl.cdiv(size_inp_2, BLOCK_K)
pid_m = pid // (grid_n * grid_k)
pid_nk = pid - pid_m * (grid_n * grid_k)
pid_n = pid_nk // grid_k
pid_k = pid_nk - pid_n * grid_k
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
rk = pid_k * BLOCK_K + tl.arange(0, BLOCK_K)
A = input_ptr + (1 if BATCH_STRIDE_INP_IS_1 else batch_stride_inp
) * pid_batch + (rm[:, None, None] * (1 if STRIDE_INP_0_IS_1 else
stride_inp_0) + rn[None, :, None] * (1 if STRIDE_INP_1_IS_1 else
stride_inp_1) + rk[None, None, :] * (1 if STRIDE_INP_2_IS_1 else
stride_inp_2))
B = output_ptr + (1 if BATCH_STRIDE_OUT_IS_1 else batch_stride_out
) * pid_batch + (rm[:, None, None] * (1 if STRIDE_OUT_0_IS_1 else
stride_out_0) + rn[None, :, None] * (1 if STRIDE_OUT_1_IS_1 else
stride_out_1) + rk[None, None, :] * (1 if STRIDE_OUT_2_IS_1 else
stride_out_2))
mask = (rm < size_inp_0)[:, None, None] & (rn < size_inp_1)[None, :, None
] & (rk < size_inp_2)[None, None, :]
a = tl.load(A, mask=mask)
tl.store(B, a, mask=mask)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/copy.py |
7856aa29-242e-4335-8d5c-122814a3f128 | attn_torch_function.py | ROCm/aotriton | tritonsrc/attn_torch_function.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.autotune(configs=TRITON_CONFIG_LIST_FWD, key=['max_seqlen_q',
'max_seqlen_k', 'CAUSAL'])
@triton.jit
def tuned_attn_fwd(Q, K, V, B, sm_scale, M, Out, stride_qz, stride_qh,
stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn, stride_bz, stride_bh,
stride_bm, stride_bn, stride_oz, stride_oh, stride_om, stride_on,
num_head_q, num_head_k, cu_seqlens_q, cu_seqlens_k, num_seqlens,
max_seqlen_q, max_seqlen_k, head_dim, dropout_p, philox_seed_ptr,
philox_offset1, philox_offset2, philox_seed_output,
philox_offset_output, encoded_softmax, CAUSAL: tl.constexpr, BLOCK_M:
tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
pre_load_v: tl.constexpr, ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr, PADDED_HEAD: tl.constexpr,
BIAS_TYPE: tl.constexpr):
bare_attn_fwd(Q, K, V, B, sm_scale, M, Out, stride_qz, stride_qh,
stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn, stride_bz, stride_bh,
stride_bm, stride_bn, stride_oz, stride_oh, stride_om, stride_on,
num_head_q, num_head_k, cu_seqlens_q, cu_seqlens_k, num_seqlens,
max_seqlen_q, max_seqlen_k, head_dim, dropout_p, philox_seed_ptr,
philox_offset1, philox_offset2, philox_seed_output,
philox_offset_output, encoded_softmax, CAUSAL, BLOCK_M,
BLOCK_DMODEL, BLOCK_N, pre_load_v, ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX, PADDED_HEAD, BIAS_TYPE=BIAS_TYPE)
| {
"Data Type": [],
"Functionality": [
"Attention Mechanisms",
"Softmax"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/attn_torch_function.py |
cda10c52-c03b-4cd3-bcd2-bf5e77672cfe | triton_rms_norm.py | vladmandic/dcae | dcae/nn/triton_rms_norm.py | 5223970c7e6c6acfe282e18be7e3821b61511673 | 0 | @triton.jit
def _rms_norm_2d_bwd_dx_fused(DX, DY, DW, DB, X, W, B, Rrms, M, C, N,
num_blocks, eps, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr,
BLOCK_SIZE_C: tl.constexpr):
m_n = tl.program_id(0)
m, n = m_n // num_blocks, m_n % num_blocks
X += m * C * N
DY += m * C * N
DX += m * C * N
Rrms += m * N
cols = n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = cols < N
DW = DW + m_n * C
DB = DB + m_n * C
rrms = tl.load(Rrms + cols, mask=mask, other=1)
c1 = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, C):
pos = off * N + cols
x = tl.load(X + pos, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + pos, mask=mask, other=0).to(tl.float32)
w = tl.load(W + off).to(tl.float32)
xhat = x * rrms
wdy = w * dy
xhat = tl.where(mask, xhat, 0.0)
wdy = tl.where(mask, wdy, 0.0)
c1 += xhat * wdy
tl.store(DW + off, tl.sum((dy * xhat).to(w.dtype), axis=0))
tl.store(DB + off, tl.sum(dy.to(w.dtype), axis=0))
c1 /= C
for off in range(0, C):
pos = off * N + cols
x = tl.load(X + pos, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + pos, mask=mask, other=0).to(tl.float32)
w = tl.load(W + off).to(tl.float32)
xhat = x * rrms
wdy = w * dy
dx = (wdy - xhat * c1) * rrms
tl.store(DX + pos, dx, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/vladmandic/dcae/blob/5223970c7e6c6acfe282e18be7e3821b61511673/dcae/nn/triton_rms_norm.py |
f37fbd18-5828-4317-9496-ead47311db58 | partition_k.py | pytorch-labs/tritonbench | tritonbench/operators/gemm/partition_k.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N':
32, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=2), triton.Config({
'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64}, num_stages
=5, num_warps=2), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 64}, num_stages=6, num_warps=2), triton.Config({
'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128},
num_stages=4, num_warps=2), triton.Config({'BLOCK_SIZE_M': 32,
'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K':
128}, num_stages=6, num_warps=2)], key=['M', 'N', 'K', 'PK'])
@triton.jit
def _matmul_partition_k(a_ptr, b_ptr, c_buf_ptr, M, N, K, PK, PK_SIZE,
stride_am, stride_ak, stride_bk, stride_bn, stride_cb_m, stride_cb_n,
stride_cb_k, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
pid_pk = tl.program_id(axis=2)
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = (pid_pk * PK_SIZE + tl.arange(0, BLOCK_SIZE_K)) % K
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] *
stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] *
stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(PK_SIZE, BLOCK_SIZE_K)):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
acc = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_ck = pid_pk
c_buf_ptrs = c_buf_ptr + stride_cb_m * offs_cm[:, None, None
] + stride_cb_n * offs_cn[None, :, None] + stride_cb_k * offs_ck[
None, None, :]
tl.store(c_buf_ptrs, acc[:, :, None])
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Blocked Access"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/gemm/partition_k.py |
c8196583-7032-4955-b22f-e6dfe1c1f392 | RzLinearBackward.py | apd10/RzLinear | python/rz_linear/impl/RzLinearBackward.py | eb56657b2de0a97f398f88af421b0fbcbc5469c9 | 0 | @triton.jit
def rz_linear_backward_weight_grad_kernel_notune(a_ptr, b_ptr, c_ptr,
init_factor, M, N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7:
int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int,
allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=
c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=
stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=
stride_bn, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=
BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py |
4ee02cfd-0063-451e-9188-dd563dfe40c5 | fused_chunk.py | sustcsonglin/flash-linear-attention | fla/ops/retention/fused_chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.jit
def fused_chunk_retention_fwd_kernel(q, k, v, o, h0, ht, scale, B: tl.
constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, CHECK:
tl.constexpr):
i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h = i_bh % H
o_i = tl.arange(0, BT)
b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0))
d_b, d_o, d_h = tl.math.exp2(BT * b_b), tl.math.exp2((o_i + 1) * b_b
), tl.math.exp2((BT - o_i - 1) * b_b)
m_s = o_i[:, None] >= o_i[None, :]
d_s = tl.where(m_s, tl.math.exp2((o_i[:, None] - o_i[None, :]) * b_b), 0)
b_h = tl.zeros([BK, BV], dtype=tl.float32)
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (0, i_k * BK),
(BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK, 0),
(BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (0, i_v * BV),
(BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (i_bh + i_k * B * H) * T * V, (T, V), (V, 1
), (0, i_v * BV), (BT, BV), (1, 0))
if USE_INITIAL_STATE:
p_h = tl.make_block_ptr(h0 + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h = tl.load(p_h, boundary_check=(0, 1)).to(tl.float32)
NT = tl.cdiv(T, BT)
for i in range(0, NT):
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_k.dtype)
b_s = tl.dot(b_q, b_k, allow_tf32=False) * d_s
b_o = tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False)
if CHECK and i == 0:
b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) * d_o[:,
None]
b_h = d_b * b_h + tl.dot(b_k, (b_v * d_h[:, None]).to(b_k.dtype
), allow_tf32=False)
else:
b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) * d_o[:,
None]
if i == NT - 1 and T % BT != 0:
d_b = tl.math.exp2(T % BT * b_b)
d_h = tl.math.exp2((T % BT - o_i - 1) * b_b)
b_h = d_b * b_h + tl.dot(b_k, (b_v * d_h[:, None]).to(b_k.dtype
), allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
p_q = tl.advance(p_q, (BT, 0))
p_k = tl.advance(p_k, (0, BT))
p_v = tl.advance(p_v, (BT, 0))
p_o = tl.advance(p_o, (BT, 0))
if STORE_FINAL_STATE:
p_ht = tl.make_block_ptr(ht + i_bh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/fused_chunk.py |
dbd509cd-7029-42a7-9a9f-50d56a9e82f4 | normalization.py | ai-compiler-study/triton-kernels | triton_kernels/kernels/normalization.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def _rms_norm_bwd(dY, dX, dW, X, W, Rstd, stride, N, BLOCK_SIZE: tl.constexpr):
row = tl.program_id(0)
X += row * stride
dY += row * stride
dX += row * stride
dW += row * stride
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < N
dy = tl.load(dY + cols, mask=mask, other=0.0)
x = tl.load(X + cols, mask=mask, other=0.0)
w = tl.load(W + cols, mask=mask, other=0.0)
rstd = tl.load(Rstd + row)
m = dy * w
dx = rstd * m
dx += rstd * -(1 / N) * rstd * rstd * tl.sum(m * x, axis=0) * x
dw = dy * (x * rstd)
tl.store(dX + cols, dx, mask=mask)
tl.store(dW + cols, dw, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/normalization.py |
10c2ce56-9173-429f-9c4d-35c04a889c0f | triton_ops.py | imoneoi/bf16_fused_adam | bf16_fused_adam/triton_ops.py | 66375343b528a00a483646a58a8a851a90834f9e | 0 | @triton.jit
def bit_split_kernel(x_ptr, output_hi_ptr, output_lo_ptr, n_elements,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask).to(tl.uint32, bitcast=True)
output_hi = (x >> 16).to(tl.uint16).to(tl.bfloat16, bitcast=True)
output_lo = x.to(tl.uint16).to(tl.bfloat16, bitcast=True)
tl.store(output_hi_ptr + offsets, output_hi, mask=mask)
tl.store(output_lo_ptr + offsets, output_lo, mask=mask)
| {
"Data Type": [
"bf16"
],
"Functionality": [
"Quantization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/imoneoi/bf16_fused_adam/blob/66375343b528a00a483646a58a8a851a90834f9e/bf16_fused_adam/triton_ops.py |
f5633053-7e55-4be5-a160-add1d765c266 | test_autodiff.py | srush/triton-autodiff | tests/test_autodiff.py | f9d1a04d048e3252bfd222646db7175ad60a3c7c | 0 | @triton.jit
def ub1(X, Y):
r = tl.arange(0, 16)
r2 = tl.arange(0, 32)
x = tl.load(X + 16 * r2[:, None] + r)
y = triton_unbroadcast(x, tl.arange(0, 16).shape)
tl.store(Y + r, y)
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py |
0a8208c8-85ba-4efb-b976-49ac81225dd3 | chunk_h.py | sustcsonglin/flash-linear-attention | fla/ops/common/chunk_h.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not
None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None,
'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps=
num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for
num_warps in [1, 2, 4, 8] for num_stages in [2, 3, 4]], key=['BT',
'USE_G', 'USE_GK', 'USE_GV'])
@triton.jit
def chunk_fwd_kernel_h(k, v, h, g, gk, gv, h0, ht, offsets, chunk_offsets,
T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT:
tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_G: tl.constexpr,
USE_GK: tl.constexpr, USE_GV: tl.constexpr, USE_INITIAL_STATE: tl.
constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr,
HEAD_FIRST: tl.constexpr):
i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_n, i_h = i_nh // H, i_nh % H
if USE_OFFSETS:
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
boh = tl.load(chunk_offsets + i_n).to(tl.int32)
else:
bos, eos = i_n * T, i_n * T + T
NT = tl.cdiv(T, BT)
boh = i_n * NT
b_h = tl.zeros([BK, BV], dtype=tl.float32)
if USE_INITIAL_STATE:
p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32)
for i_t in range(NT):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_nh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + i_nh * T * V, (T, V), (V, 1), (i_t *
BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + (i_nh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V,
1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_h = tl.make_block_ptr(h + ((boh + i_t) * H + i_h) * K * V, (K,
V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_v = tl.load(p_v, boundary_check=(0, 1))
last_idx = min((i_t + 1) * BT, T) - 1
if USE_G:
if HEAD_FIRST:
b_g_last = tl.load(g + i_nh * T + last_idx)
p_g = g + i_nh * T + i_t * BT + tl.arange(0, BT)
p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT)
else:
b_g_last = tl.load(g + bos * H + last_idx * H + i_h)
p_g = g + bos * H + (i_t * BT + tl.arange(0, BT)) * H + i_h
b_h *= tl.exp(b_g_last)
b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0)
b_v = (b_v * tl.exp(b_g_last - b_g)[:, None]).to(b_v.dtype)
if USE_GK:
if HEAD_FIRST:
p_gk = tl.make_block_ptr(gk + i_nh * T * K, (K, T), (1, K),
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = (gk + i_nh * T * K + last_idx * K + i_k * BK +
tl.arange(0, BK))
else:
p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T),
(1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_gk_last = gk + (bos + last_idx
) * H * K + i_h * K + i_k * BK + tl.arange(0, BK)
p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK)
b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) <
K, other=0.0)
b_h *= tl.exp(b_gk_last)[:, None]
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_k = (b_k * tl.exp(b_gk_last[:, None] - b_gk)).to(b_k.dtype)
if USE_GV:
if HEAD_FIRST:
p_gv = tl.make_block_ptr(gv + i_nh * T * V, (T, V), (V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_gv_last = (gv + i_nh * T * V + last_idx * V + i_v * BV +
tl.arange(0, BV))
else:
p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V),
(H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_gv_last = gv + (bos + last_idx
) * H * V + i_h * V + i_v * BV + tl.arange(0, BV)
p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV)
b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) <
V, other=0.0)
b_h *= tl.exp(b_gv_last)[None, :]
b_gv = tl.load(p_gv, boundary_check=(0, 1))
b_v = (b_v * tl.exp(b_gv_last[None, :] - b_gv)).to(b_v.dtype)
b_h += tl.dot(b_k, b_v)
if STORE_FINAL_STATE:
p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k *
BK, i_v * BV), (BK, BV), (1, 0))
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h.py |
bd50fa8d-7a93-45f1-be5a-1848956128a7 | matmul.py | jax-ml/jax-triton | examples/matmul.py | 859cc392bec876d132bd0790ea6c00b6c246dd2b | 0 | @triton.jit
def relu(x):
return tl.where(x >= 0, x, 0)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"Apache"
] | https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/examples/matmul.py |
24bff4b0-3174-46f7-90f5-71dbb3e12644 | softmax_naive.py | iclementine/optimize_softmax | softmax_naive.py | 6ddeee3481dd5e63f4a30b946c417e97bc4494bf | 0 | @triton.jit
def softmax_kernel(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr):
pid_m = tl.program_id(0)
n_offsets = tl.arange(0, TILE_N)
offset = pid_m * N + n_offsets
input_ptrs = input_ptr + offset
mask = n_offsets < N
inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr
.dtype.element_ty)
m = tl.max(inp, 0)
e = tl.exp(inp - m)
z = tl.sum(e, 0)
out = e / z
output_ptrs = output_ptr + offset
tl.store(output_ptrs, out, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_naive.py |
8a498a79-9878-41b0-9e3c-3a83716b2886 | swiglu.py | shauray8/continuity | continuity/diffusion_engine/models/triton_kernels/swiglu.py | a52ad077e4ed3162576c7417f302e792ccdf5eca | 0 | @triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE: tl.constexpr):
block_idx = tl.program_id(0)
offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
e_row = tl.load(e + offsets, mask=mask, other=0).to(tl.float32)
g_row = tl.load(g + offsets, mask=mask, other=0)
f_row = e_row * tl.sigmoid(e_row)
f_row = f_row.to(g_row.dtype)
h_row = f_row * g_row
tl.store(h + offsets, h_row, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Backpropagation",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/shauray8/continuity/blob/a52ad077e4ed3162576c7417f302e792ccdf5eca/continuity/diffusion_engine/models/triton_kernels/swiglu.py |
891bbd59-6bf6-4383-a905-e03f4293b257 | ops.py | srush/triton-autodiff | triton_autodiff/ops.py | f9d1a04d048e3252bfd222646db7175ad60a3c7c | 0 | @triton.jit
def add_grad(left, right):
right = triton_unbroadcast(right, left.shape)
return left + right
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/triton_autodiff/ops.py |
f4342a00-1b63-4e9d-90eb-9cd669123177 | activation.py | chengzeyi/stable-fast | src/sfast/triton/ops/activation.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @triton.jit
def gelu(x):
return 0.5 * x * (1.0 + tl.tanh(0.7978845608028654 * (x + 0.044715 * x *
x * x)))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [],
"Performance Objective": [
"High Throughput"
]
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py |
1f58dd3e-6916-457c-a58e-f4b968d69485 | activation.py | chengzeyi/stable-fast | src/sfast/triton/ops/activation.py | 3a6f35c7045f8f6812515957ca62ef37260ff080 | 0 | @triton.jit
def identity(x):
return x
| {
"Data Type": [],
"Functionality": [],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": []
} | [
"MIT"
] | https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py |
810d1a75-8358-49ce-890d-2b567c12cecf | chunk.py | sustcsonglin/flash-linear-attention | fla/ops/gated_delta_rule/chunk.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=4)], key=['BT', 'BK',
'BV'])
@triton.jit
def chunk_gated_delta_rule_fwd_kernel_o(q, k, v, h, g, o, offsets, indices,
scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.
constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr,
USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_tg = i_t
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
o_i = tl.arange(0, BT)
m_s = o_i[:, None] >= o_i[None, :]
b_o = tl.zeros([BT, BV], dtype=tl.float32)
b_s = tl.zeros([BT, BT], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k *
BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V),
(V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
else:
p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H *
K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), (
V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_h = tl.load(p_h, boundary_check=(0, 1))
b_o += tl.dot(b_q, b_h, allow_tf32=False)
b_s += tl.dot(b_q, b_k, allow_tf32=False)
if HEAD_FIRST:
p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT,
), (0,))
p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT,
i_v * BV), (BT, BV), (1, 0))
else:
p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,),
(BT,), (0,))
p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
p_o = tl.make_block_ptr(o + (bos * H + i_h) * V, (T, V), (H * V, 1),
(i_t * BT, i_v * BV), (BT, BV), (1, 0))
b_g = tl.load(p_g, boundary_check=(0,))
b_o = b_o * tl.exp(b_g)[:, None]
b_s = b_s * tl.exp(b_g[:, None] - b_g[None, :])
b_s = tl.where(m_s, b_s, 0)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_o = (b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) * scale
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32",
"fp16"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py |
f634d81f-eff7-4c83-ac4b-f2cf21f8ff28 | rope.py | ardywibowo/triton-mode | kernels/rope.py | 5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1 | 0 | @triton.jit
def triton_rope(q_buffer, q_buffer_stride, k_buffer, k_buffer_stride,
cos_values, cos_values_stride, sin_values, sin_values_stride,
seq_length, batch_size: tl.constexpr, num_q_heads: tl.constexpr,
num_k_heads: tl.constexpr, head_dim: tl.constexpr, padded_num_q_heads:
tl.constexpr, padded_num_k_heads: tl.constexpr, padded_head_dim: tl.
constexpr, TILE_SIZE: tl.constexpr, IS_BACKWARD: tl.constexpr=False):
prog_id = tl.program_id(0)
q_buffer = q_buffer + prog_id * q_buffer_stride
k_buffer = k_buffer + prog_id * k_buffer_stride
cos_index = prog_id % seq_length
cos_values = cos_values + cos_index * cos_values_stride
sin_values = sin_values + cos_index * sin_values_stride
cos_indices = tl.arange(0, padded_head_dim // 2)
cos_active_mask = cos_indices < head_dim // 2
cos_vec = tl.load(cos_values + cos_indices, mask=cos_active_mask, other=0)
sin_vec = tl.load(sin_values + cos_indices, mask=cos_active_mask, other=0)
q_half_offsets = tl.arange(0, padded_num_q_heads)[:, None
] * head_dim + tl.arange(0, padded_head_dim // 2)[None, :]
k_half_offsets = tl.arange(0, padded_num_k_heads)[:, None
] * head_dim + tl.arange(0, padded_head_dim // 2)[None, :]
q_mask = (tl.arange(0, padded_num_q_heads)[:, None] < num_q_heads) & (tl
.arange(0, padded_head_dim // 2)[None, :] < head_dim // 2)
k_mask = (tl.arange(0, padded_num_k_heads)[:, None] < num_k_heads) & (tl
.arange(0, padded_head_dim // 2)[None, :] < head_dim // 2)
q_tile_part1 = tl.load(q_buffer + q_half_offsets, mask=q_mask, other=0).to(
sin_vec.dtype)
k_tile_part1 = tl.load(k_buffer + k_half_offsets, mask=k_mask, other=0).to(
sin_vec.dtype)
q_half2_offsets = q_half_offsets + head_dim // 2
k_half2_offsets = k_half_offsets + head_dim // 2
q_half2_mask = q_mask
k_half2_mask = k_mask
q_tile_part2 = tl.load(q_buffer + q_half2_offsets, mask=q_half2_mask,
other=0).to(sin_vec.dtype)
k_tile_part2 = tl.load(k_buffer + k_half2_offsets, mask=k_half2_mask,
other=0).to(sin_vec.dtype)
if not IS_BACKWARD:
updated_q_part1 = q_tile_part1 * cos_vec - q_tile_part2 * sin_vec
tl.store(q_buffer + q_half_offsets, updated_q_part1, mask=q_mask)
updated_q_part2 = q_tile_part2 * cos_vec + q_tile_part1 * sin_vec
tl.store(q_buffer + q_half2_offsets, updated_q_part2, mask=q_half2_mask
)
updated_k_part1 = k_tile_part1 * cos_vec - k_tile_part2 * sin_vec
tl.store(k_buffer + k_half_offsets, updated_k_part1, mask=k_mask)
updated_k_part2 = k_tile_part2 * cos_vec + k_tile_part1 * sin_vec
tl.store(k_buffer + k_half2_offsets, updated_k_part2, mask=k_half2_mask
)
else:
reversed_q_part1 = q_tile_part1 * cos_vec + q_tile_part2 * sin_vec
tl.store(q_buffer + q_half_offsets, reversed_q_part1, mask=q_mask)
reversed_q_part2 = q_tile_part2 * cos_vec - q_tile_part1 * sin_vec
tl.store(q_buffer + q_half2_offsets, reversed_q_part2, mask=
q_half2_mask)
reversed_k_part1 = k_tile_part1 * cos_vec + k_tile_part2 * sin_vec
tl.store(k_buffer + k_half_offsets, reversed_k_part1, mask=k_mask)
reversed_k_part2 = k_tile_part2 * cos_vec - k_tile_part1 * sin_vec
tl.store(k_buffer + k_half2_offsets, reversed_k_part2, mask=
k_half2_mask)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/rope.py |
9636248c-a664-44e8-a77b-6cdc6b314b7e | silu.py | ai-compiler-study/triton-kernels | triton_kernels/ops/silu.py | 2308e5e9d965059fe2d19b4d535debac4970b69e | 0 | @triton.jit
def triton_silu(x_ptr, b_ptr, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x = tl.load(x_ptr + x0, mask=xmask).to(tl.float32)
output = (x * tl.sigmoid(x)).to(tl.float32)
tl.store(b_ptr + x0, output, xmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [
"Coalesced"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/silu.py |
736db1ea-c332-43df-a318-da529e0545d7 | rms_norm.py | dame-cell/Triformer | triformer/rms_norm.py | 0712537d576166b93fa09aa9509b2661b9ed8a68 | 0 | @triton.jit
def rmsnorm_forward(Y, Y_row_stride, X, X_row_stride, W, r, n_cols, eps,
BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
mask = col_offsets < n_cols
Y_ptr = Y + row_idx * Y_row_stride
X_ptr = X + row_idx * X_row_stride
r_ptr = r + row_idx
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0).to(tl.float32)
W_row = tl.load(W + col_offsets, mask=mask, other=0).to(tl.float32)
X_squared = X_row * X_row
mean_X_squared = tl.sum(X_squared, axis=0) / n_cols
rms = tl.math.rsqrt(mean_X_squared + eps)
tl.store(r_ptr, rms)
output = X_row * rms * W_row
tl.store(Y_ptr + col_offsets, output, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/rms_norm.py |
395d36cc-ba19-422f-b9e3-4d3f977cec4b | block_sparse_attention_lut.py | sparklesea/sparse-quant | sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py | e3d8b6ecab208c31b744913ed8c3caaa43605f86 | 0 | @triton.jit
def _sparse_attention_decode_fwd_kernel(Q, K, V, sm_scale, Out, L, M,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, stride_luth,
NNZ: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, lut):
start_n = tl.program_id(0)
off_hz = tl.program_id(1)
start_nnz = tl.load(lut + off_hz * stride_luth + start_n)
kv_offset = off_hz * stride_kh
q_offset = off_hz * stride_qh
o_offset = off_hz * stride_oh
Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(1,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(0, 0),
block_shape=(1, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + kv_offset, shape=(BLOCK_DMODEL,
N_CTX), strides=(stride_kk, stride_kn), offsets=(0, start_nnz *
BLOCK_N), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + kv_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(start_nnz *
BLOCK_N, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(NNZ,
BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_n, 0),
block_shape=(1, BLOCK_DMODEL), order=(1, 0))
qk_scale = sm_scale * 1.44269504
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(tl.float16)
k = tl.load(K_block_ptr)
qk = tl.expand_dims(tl.sum(tl.trans(q) * k, axis=0), axis=0).to(tl.float32)
k_indices = start_nnz * BLOCK_N + tl.arange(0, BLOCK_N)
qk = tl.where(k_indices < N_CTX, qk, float('-inf'))
m = tl.max(qk, axis=1, return_indices=False)
p = tl.math.exp2(qk - m)
l = tl.sum(p, axis=1)
v = tl.load(V_block_ptr).to(tl.float16)
p = p.to(tl.float16)
acc = tl.expand_dims(tl.sum(tl.trans(p) * v, axis=0), axis=0)
tl.store(O_block_ptr, acc)
l_ptrs = L + off_hz * NNZ + start_n + tl.arange(0, 1)
m_ptrs = M + off_hz * NNZ + start_n + tl.arange(0, 1)
tl.store(l_ptrs, l)
tl.store(m_ptrs, m)
| {
"Data Type": [
"fp16",
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache",
"BSD"
] | https://github.com/sparklesea/sparse-quant/blob/e3d8b6ecab208c31b744913ed8c3caaa43605f86/sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py |
89a686ef-90d0-45ff-bfe3-19e9d5250c8f | wy_fast.py | sustcsonglin/flash-linear-attention | fla/ops/delta_rule/wy_fast.py | 5968de9a22c096326b19859cfe05dac36155c31d | 0 | @triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None})
@triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for
num_warps in [1, 2, 4, 8, 16]], key=['BK'])
@triton.jit
def fwd_prepare_wy_repr_kernel_chunk64(k, beta, A, offsets, indices, T: tl.
constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.
constexpr, BC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.
constexpr):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if USE_OFFSETS:
i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices +
i_t * 2 + 1).to(tl.int32)
bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets +
i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
b_A = tl.zeros([BC, BC], dtype=tl.float32)
b_A2 = tl.zeros([BC, BC], dtype=tl.float32)
b_A3 = tl.zeros([BC, BC], dtype=tl.float32)
if HEAD_FIRST:
p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,),
(BC,), (0,))
else:
p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT,), (BC,), (0,))
b_beta = tl.load(p_beta, boundary_check=(0,))
if HEAD_FIRST:
p_beta2 = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT +
BC,), (BC,), (0,))
else:
p_beta2 = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t *
BT + BC,), (BC,), (0,))
b_beta2 = tl.load(p_beta2, boundary_check=(0,))
for i_k in range(tl.cdiv(K, BK)):
if HEAD_FIRST:
p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT, i_k * BK), (BC, BK), (1, 0))
p_k2 = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t *
BT + BC, i_k * BK), (BC, BK), (1, 0))
else:
p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K,
1), (i_t * BT, i_k * BK), (BC, BK), (1, 0))
p_k2 = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H *
K, 1), (i_t * BT + BC, i_k * BK), (BC, BK), (1, 0))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = (b_k * b_beta[:, None]).to(b_k.dtype)
b_k2 = tl.load(p_k2, boundary_check=(0, 1))
b_kb2 = (b_k2 * b_beta2[:, None]).to(b_k2.dtype)
b_A += tl.dot(b_kb, tl.trans(b_k), allow_tf32=False)
b_A2 += tl.dot(b_kb2, tl.trans(b_k2), allow_tf32=False)
b_A3 += tl.dot(b_kb2, tl.trans(b_k), allow_tf32=False)
b_A = -tl.where(tl.arange(0, BC)[:, None] > tl.arange(0, BC)[None, :],
b_A, 0)
b_A2 = -tl.where(tl.arange(0, BC)[:, None] > tl.arange(0, BC)[None, :],
b_A2, 0)
for i in range(1, BC):
mask = tl.arange(0, BC) == i
b_a = tl.sum(tl.where(mask[:, None], b_A, 0), 0)
b_a2 = tl.sum(tl.where(mask[:, None], b_A2, 0), 0)
b_a = b_a + tl.sum(b_a[:, None] * b_A, 0) * (tl.arange(0, BC) < i)
b_a2 = b_a2 + tl.sum(b_a2[:, None] * b_A2, 0) * (tl.arange(0, BC) < i)
b_A = tl.where(mask[:, None], b_a, b_A)
b_A2 = tl.where(mask[:, None], b_a2, b_A2)
b_A += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_A2 += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :]
b_A3 = -tl.dot(tl.dot(b_A2, b_A3, allow_tf32=False), b_A, allow_tf32=False)
if HEAD_FIRST:
p_A1 = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, 0), (BC, BC), (1, 0))
p_A2 = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + BC, BC), (BC, BC), (1, 0))
p_A3 = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT + BC, 0), (BC, BC), (1, 0))
p_A4 = tl.make_block_ptr(A + i_bh * T * BT, (T, BT), (BT, 1), (i_t *
BT, BC), (BC, BC), (1, 0))
else:
p_A1 = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, 0), (BC, BC), (1, 0))
p_A2 = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT + BC, BC), (BC, BC), (1, 0))
p_A3 = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT + BC, 0), (BC, BC), (1, 0))
p_A4 = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (H * BT,
1), (i_t * BT, BC), (BC, BC), (1, 0))
tl.store(p_A1, b_A.to(p_A1.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_A2, b_A2.to(p_A2.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_A3, b_A3.to(p_A3.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_A4, tl.zeros([BC, BC], dtype=tl.float32).to(p_A4.dtype.
element_ty), boundary_check=(0, 1))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access",
"Transposed Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py |
6ed4de31-dcc6-4dc9-a02d-409ffb40b3d9 | math.py | BobMcDear/attorch | attorch/math.py | da06cb6236bb47195e33fe3986ed21c675ed94cc | 0 | @triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Single Instance"
]
} | [
"MIT"
] | https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py |
f6af2c0f-1b6e-4f7e-a6be-7ccf22bd5782 | y_3.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/direct/y_3.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def third_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size:
tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr,
col_offset: tl.constexpr, output_stride: tl.constexpr):
coord_stride = 3
block_id = tl.program_id(0)
coord_striding = tl.arange(0, block_size) * coord_stride
coord_row_offset = coord_striding + block_size * coord_stride * block_id
x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset <
coord_numel)
y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 <
coord_numel)
z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 <
coord_numel)
CONST000 = 2.64575131106459
CONST002 = 5.1234753829798
CONST004 = 6.48074069840786
CONST005 = 10.2469507659596
CONST006 = -2.09165006633519
CONST007 = -1
CONST008 = -6.27495019900557
CONST009 = -3.96862696659689
CONST010 = -1.62018517460197
VAR07 = x * x * x
VAR08 = x * x
VAR16 = y * y * y
VAR17 = y * y
VAR25 = z * z * z
VAR26 = z * z
Y00 = CONST006 * VAR07 - CONST008 * VAR26 * x
Y01 = CONST005 * x * y * z
Y02 = CONST010 * VAR07 + x * (CONST004 * VAR17 + CONST010 * VAR26)
Y03 = CONST000 * VAR16 + CONST009 * VAR08 * y + CONST009 * VAR26 * y
Y04 = CONST010 * VAR25 + z * (CONST004 * VAR17 + CONST010 * VAR08)
Y05 = CONST002 * y * (CONST007 * VAR08 + VAR26)
Y06 = -CONST006 * VAR25 + CONST008 * VAR08 * z
output_striding = tl.arange(0, block_size) * output_stride
output_row_offset = (output_striding + block_size * output_stride *
block_id + col_offset)
tl.store(output_ptr + output_row_offset, Y00, mask=output_row_offset <
output_numel)
tl.store(output_ptr + output_row_offset + 1, Y01, mask=
output_row_offset + 1 < output_numel)
tl.store(output_ptr + output_row_offset + 2, Y02, mask=
output_row_offset + 2 < output_numel)
tl.store(output_ptr + output_row_offset + 3, Y03, mask=
output_row_offset + 3 < output_numel)
tl.store(output_ptr + output_row_offset + 4, Y04, mask=
output_row_offset + 4 < output_numel)
tl.store(output_ptr + output_row_offset + 5, Y05, mask=
output_row_offset + 5 < output_numel)
tl.store(output_ptr + output_row_offset + 6, Y06, mask=
output_row_offset + 6 < output_numel)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_3.py |
318e31bb-32df-41cc-97f5-6c170ad3dd15 | triton_kernels.py | IntelLabs/EquiTriton | src/equitriton/sph_harm/triton_kernels.py | 1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c | 0 | @triton.jit
def _triton_third_order_bwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl.
tensor, g_x_ptr: tl.tensor, g_y_ptr: tl.tensor, g_z_ptr: tl.tensor,
g_1_0_ptr: tl.tensor, g_1_1_ptr: tl.tensor, g_1_2_ptr: tl.tensor,
g_2_0_ptr: tl.tensor, g_2_1_ptr: tl.tensor, g_2_2_ptr: tl.tensor,
g_2_3_ptr: tl.tensor, g_2_4_ptr: tl.tensor, g_3_0_ptr: tl.tensor,
g_3_1_ptr: tl.tensor, g_3_2_ptr: tl.tensor, g_3_3_ptr: tl.tensor,
g_3_4_ptr: tl.tensor, g_3_5_ptr: tl.tensor, g_3_6_ptr: tl.tensor,
BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr):
sqrt_3 = 3 ** 0.5
sqrt_5 = 5 ** 0.5
sqrt_15 = 15 ** 0.5
block_id = tl.program_id(0)
offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id
x_row_start = x_ptr + offset
y_row_start = y_ptr + offset
z_row_start = z_ptr + offset
x = tl.load(x_row_start, mask=offset < vector_length)
y = tl.load(y_row_start, mask=offset < vector_length)
z = tl.load(z_row_start, mask=offset < vector_length)
g_1_0 = tl.load(g_1_0_ptr + offset, mask=offset < vector_length)
g_1_1 = tl.load(g_1_1_ptr + offset, mask=offset < vector_length)
g_1_2 = tl.load(g_1_2_ptr + offset, mask=offset < vector_length)
g_x = sqrt_3 * g_1_0
g_y = sqrt_3 * g_1_1
g_z = sqrt_3 * g_1_2
g_2_0 = tl.load(g_2_0_ptr + offset, mask=offset < vector_length)
g_2_1 = tl.load(g_2_1_ptr + offset, mask=offset < vector_length)
g_2_2 = tl.load(g_2_2_ptr + offset, mask=offset < vector_length)
g_2_3 = tl.load(g_2_3_ptr + offset, mask=offset < vector_length)
g_2_4 = tl.load(g_2_4_ptr + offset, mask=offset < vector_length)
g_x += sqrt_15 * z * g_2_0
g_z += sqrt_15 * x * g_2_0
g_x += sqrt_15 * y * g_2_1
g_y += sqrt_15 * x * g_2_1
g_y += sqrt_15 * z * g_2_2
g_z += sqrt_15 * y * g_2_2
g_x += -1.0 * sqrt_5 * x * g_2_3
g_y += 2.0 * sqrt_5 * y * g_2_3
g_z += -1.0 * sqrt_5 * z * g_2_3
g_x += -1.0 * sqrt_15 * x * g_2_4
g_z += sqrt_15 * z * g_2_4
g_3_0 = tl.load(g_3_0_ptr + offset, mask=offset < vector_length)
g_3_1 = tl.load(g_3_1_ptr + offset, mask=offset < vector_length)
g_3_2 = tl.load(g_3_2_ptr + offset, mask=offset < vector_length)
g_3_3 = tl.load(g_3_3_ptr + offset, mask=offset < vector_length)
g_3_4 = tl.load(g_3_4_ptr + offset, mask=offset < vector_length)
g_3_5 = tl.load(g_3_5_ptr + offset, mask=offset < vector_length)
g_3_6 = tl.load(g_3_6_ptr + offset, mask=offset < vector_length)
sq_x = x * x
sq_y = y * y
sq_z = z * z
g_x += sqrt_15 * g_3_0 * (-1.62018517460196 * sq_x + 1.08012344973464 *
sq_z + 0.540061724867322 * sq_z)
g_x += 2.64575131106459 * sqrt_15 * g_3_1 * y * z
g_x -= g_3_2 * (4.8605555238059 * sq_x - 6.48074069840786 * sq_y +
1.62018517460197 * sq_z)
g_x -= 7.93725393319377 * g_3_3 * x * y
g_x -= 3.24037034920393 * g_3_4 * x * z
g_x -= 2.64575131106459 * sqrt_15 * g_3_5 * x * y
g_x -= sqrt_15 * g_3_6 * z * (1.08012344973464 * x + 2.16024689946929 * x)
g_y += 2.64575131106459 * sqrt_15 * g_3_1 * x * z
g_y += 12.9614813968157 * g_3_2 * x * y
g_y -= g_3_3 * (3.96862696659689 * sq_x - 7.93725393319377 * sq_y +
3.96862696659689 * sq_z)
g_y += 12.9614813968157 * g_3_4 * y * z
g_y -= 1.3228756555323 * sqrt_15 * g_3_5 * (sq_x - sq_z)
g_z += sqrt_15 * g_3_0 * x * (1.08012344973464 * z + 2.16024689946929 * z)
g_z += 2.64575131106459 * sqrt_15 * g_3_1 * x * y
g_z -= 3.24037034920393 * g_3_2 * x * z
g_z -= 7.93725393319377 * g_3_3 * y * z
g_z -= g_3_4 * (1.62018517460197 * sq_x - 6.48074069840786 * sq_y +
4.8605555238059 * sq_z)
g_z += 2.64575131106459 * sqrt_15 * g_3_5 * y * z
g_z -= sqrt_15 * g_3_6 * (1.08012344973464 * sq_x + 0.540061724867322 *
sq_x - 1.62018517460196 * sq_z)
tl.store(g_x_ptr + offset, g_x, mask=offset < vector_length)
tl.store(g_y_ptr + offset, g_y, mask=offset < vector_length)
tl.store(g_z_ptr + offset, g_z, mask=offset < vector_length)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"Apache"
] | https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py |
6927aecc-280a-412c-8568-2e3046c2cd1c | triton_jagged_tensor_ops.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def triton_jagged_to_dense_optimization_2d(input_jagged_values_ptr,
input_jagged_offset_ptr, input_jagged_row_stride, output_dense_ptr,
output_dense_row_stride, output_dense_matrix_stride,
thread_block_row_size: tl.constexpr, thread_block_col_size: tl.
constexpr, padded_value, operation_function: tl.constexpr, operation_dense
) ->None:
pid = tl.program_id(0)
offset_idx = pid
begin = tl.load(input_jagged_offset_ptr + offset_idx)
end = tl.load(input_jagged_offset_ptr + offset_idx + 1)
cur_jagged_tensor_row_size = end - begin
output_dense_ptr += pid * output_dense_matrix_stride
input_jagged_values_ptr += begin * input_jagged_row_stride
if operation_function is not None:
operation_dense += pid * output_dense_matrix_stride
offset_row = tl.arange(0, thread_block_row_size)
dense_col_size = output_dense_row_stride
dense_row_size = output_dense_matrix_stride // output_dense_row_stride
for _i in range(0, dense_row_size, thread_block_row_size):
offset_col = tl.arange(0, thread_block_col_size)
block_offset = offset_row[:, None
] * output_dense_row_stride + offset_col[None, :]
for _j in range(0, dense_col_size, thread_block_col_size):
dense_mask = (offset_row[:, None] < dense_row_size) & (offset_col
[None, :] < dense_col_size)
jagged_mask = (offset_row[:, None] < cur_jagged_tensor_row_size
) & (offset_col[None, :] < input_jagged_row_stride)
jagged_val = tl.load(input_jagged_values_ptr + block_offset,
mask=jagged_mask, other=padded_value)
if operation_function is not None:
operation_dense_val = tl.load(operation_dense +
block_offset, mask=dense_mask, other=0.0)
jagged_val = operation_function(operation_dense_val, jagged_val
)
tl.store(output_dense_ptr + block_offset, jagged_val, mask=
dense_mask)
offset_col += thread_block_col_size
block_offset += thread_block_col_size
offset_row += thread_block_row_size
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py |
d73689df-125b-497e-b758-19f7ab9950f4 | 06-fused-attention.py | 2lambda123/triton | python/tutorials/06-fused-attention.py | 09e27725b89043a07f49c440db6a9aedcfba8432 | 0 | @triton.jit
def _bwd_preprocess(Out, DO, Delta, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
tl.store(Delta + off_m, delta)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/2lambda123/triton/blob/09e27725b89043a07f49c440db6a9aedcfba8432/python/tutorials/06-fused-attention.py |
8f081f68-2198-4cce-a6f5-0e54650bd725 | triton_sll.py | pytorch/FBGEMM | fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py | fe980ab54a6e28818d81c8694b6564e7f804418b | 0 | @triton.jit
def padded_dense_to_jagged2_kernel(x_ptr, lengths_ptr, offsets_ptr,
output_jagged_ptr, stride_b, stride_m, stride_n, max_length, BLOCK_M:
tl.constexpr, BLOCK_N: tl.constexpr):
pid_batch = tl.program_id(2)
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
begin = tl.load(offsets_ptr + pid_batch)
seqlen = tl.load(lengths_ptr + pid_batch)
seqlen = tl.minimum(seqlen, max_length)
if seqlen == 0:
return
offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
x_ptrs = x_ptr + pid_batch * stride_b + offs_m[:, None
] * stride_m + offs_n[None, :] * stride_n
x = tl.load(x_ptrs, mask=(offs_m[:, None] < seqlen) & (offs_n[None, :] <
seqlen))
out_ptrs = output_jagged_ptr + begin + offs_m[:, None] * seqlen + offs_n[
None, :]
tl.store(out_ptrs, x, mask=(offs_m[:, None] < seqlen) & (offs_n[None, :
] < seqlen))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"BSD",
"MIT"
] | https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py |
6d81ba3a-93a7-42c4-af16-ccbd76925f55 | 05-layer-norm.py | triton-lang/triton | python/tutorials/05-layer-norm.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.jit
def _layer_norm_bwd_dx_fused(DX, DY, DW, DB, X, W, Mean, Rstd, Lock, stride,
N, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.0)
wdy = tl.where(mask, wdy, 0.0)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
tl.store(DX + cols, dx, mask=mask)
partial_dw = (dy * xhat).to(w.dtype)
partial_db = dy.to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
tl.atomic_xchg(Lock, 0)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Normalization",
"Backpropagation"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/05-layer-norm.py |
5d85d5a2-0693-4087-ad1a-4430132d743a | gemm_splitk_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_splitk_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_M': 256, 'BLOCK_N': 256,
'BLOCK_K': 32, 'GROUP_M': 4, 'SPLIT_K': 4, 'grf_mode': 'large'},
num_stages=4, num_warps=32)], key=['M', 'N', 'K'])
@triton.jit
def _kernel(A, B, C, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr,
stride_am: tl.constexpr, stride_ak: tl.constexpr, stride_bk: tl.
constexpr, stride_bn: tl.constexpr, stride_cm: tl.constexpr, stride_cn:
tl.constexpr, acc_dtype: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N:
tl.constexpr, BLOCK_K: tl.constexpr, GROUP_M: tl.constexpr, SPLIT_K: tl
.constexpr):
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = tl.cdiv(M, BLOCK_M)
grid_n = tl.cdiv(N, BLOCK_N)
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + pid % group_size
pid_n = pid % width // group_size
a_block_ptr = tl.make_block_ptr(base=A, shape=(M, K), strides=(
stride_am, stride_ak), offsets=(pid_m * BLOCK_M, pid_z * BLOCK_K),
block_shape=(BLOCK_M, BLOCK_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=B, shape=(K, N), strides=(
stride_bk, stride_bn), offsets=(pid_z * BLOCK_K, pid_n * BLOCK_N),
block_shape=(BLOCK_K, BLOCK_N), order=(1, 0))
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=acc_dtype)
for _ in range(0, K, BLOCK_K * SPLIT_K):
a = tl.load(a_block_ptr)
b = tl.load(b_block_ptr)
acc += tl.dot(a, b, out_dtype=acc_dtype)
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_K * SPLIT_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_K * SPLIT_K, 0))
acc = acc.to(C.dtype.element_ty)
if SPLIT_K == 1:
c_block_ptr = tl.make_block_ptr(base=C, shape=(M, N), strides=(
stride_cm, stride_cn), offsets=(pid_m * BLOCK_M, pid_n *
BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0))
tl.store(c_block_ptr, acc, boundary_check=(0, 1))
else:
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.atomic_add(C, acc, mask=mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled",
"Coalesced"
],
"Parallelization Strategy": [
"Persistent Kernels"
],
"Performance Objective": [
"High Throughput",
"Memory-Bound"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_splitk_benchmark.py |
ff71555a-07ff-4b43-bde0-81003adfcfab | flash_attention.py | falkaer/multi-scale-music | seq/flash_attention.py | a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d | 0 | @triton.jit
def causal_mask(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl.
constexpr):
shift = N - M
mask = shift + offs_m[:, None] >= offs_n[None, :]
if not EVEN_M & EVEN_N:
mask = mask & make_bounds(offs_m, offs_n, M, N, EVEN_M, EVEN_N)
return tl.where(mask, 0, float('-inf'))
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Latency Sensitive"
]
} | [
"MIT"
] | https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py |
658e3aea-d371-4a6d-8906-df02e8f90b5c | blocksparse_attention_kernel.py | Charlie-XIAO/sparse-vllm | vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py | d228909a30b0c245c35417fb7d2acdf9a3690042 | 0 | @triton.jit
def _fwd_kernel_inner(acc, l_i, m_i, q, Q, k_block_col_idx, layout_col_ptr,
layout_col_stride_h, layout_col_stride_m, k_ptrs, v_ptrs, off_h, offs_m,
offs_n, offs_d, stride_kt, stride_vt, sm_scale, k_seqlen, past_len,
LAST_K_BLOCK: tl.constexpr, BLOCK_M_LOADING: tl.constexpr, BLOCK_N: tl.
constexpr, D_HEAD: tl.constexpr, EVEN_D: tl.constexpr, M_LT_N: tl.constexpr
):
k_block_id = tl.load(layout_col_ptr + off_h * layout_col_stride_h +
k_block_col_idx * layout_col_stride_m).to(tl.int32)
start_n = k_block_id * BLOCK_N
if LAST_K_BLOCK:
if EVEN_D:
k = tl.load(k_ptrs + start_n * stride_kt, mask=offs_n[None, :] +
start_n < k_seqlen)
else:
k = tl.load(k_ptrs + start_n * stride_kt, mask=(offs_n[None, :] +
start_n < k_seqlen) & (offs_d[:, None] < D_HEAD))
elif EVEN_D:
k = tl.load(k_ptrs + start_n * stride_kt)
else:
k = tl.load(k_ptrs + start_n * stride_kt, mask=offs_d[:, None] < D_HEAD
)
qk = tl.zeros([BLOCK_M_LOADING, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
qk *= sm_scale
if LAST_K_BLOCK | M_LT_N:
qk += tl.where(offs_m[:, None] + past_len >= start_n + offs_n[None,
:], 0, float('-inf'))
m_ij = tl.maximum(m_i, tl.max(qk, 1))
p = tl.math.exp2(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
m_i = m_ij
l_i = l_i * alpha + l_ij
p = p.to(Q.dtype.element_ty)
if LAST_K_BLOCK:
if EVEN_D:
v = tl.load(v_ptrs + start_n * stride_vt, mask=offs_n[:, None] +
start_n < k_seqlen)
else:
v = tl.load(v_ptrs + start_n * stride_vt, mask=(offs_n[:, None] +
start_n < k_seqlen) & (offs_d[None, :] < D_HEAD))
elif EVEN_D:
v = tl.load(v_ptrs + start_n * stride_vt)
else:
v = tl.load(v_ptrs + start_n * stride_vt, mask=offs_d[None, :] < D_HEAD
)
acc += tl.dot(p, v)
return acc, l_i, m_i
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Latency Sensitive"
]
} | [
"Apache"
] | https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py |
ca7c4674-22d1-465d-8d7e-f908478548b8 | sparse_copy.py | ServiceNow/Fast-LLM | fast_llm/functional/triton/sparse_copy.py | 8b46289079da67cba99628448a6b6083dac083cf | 0 | @triton.jit
def sparse_map_kernel(top_experts_ptr, expert_ends_ptr,
expert_pad_begins_ptr, sparse_rows_ptr, num_sparse_rows: tl.constexpr,
num_experts: tl.constexpr, pad_to_multiple: tl.constexpr, block_size:
tl.constexpr, block_size_expert: tl.constexpr, dtype: tl.constexpr):
"""
Since the methods we want (histogram, argsort) are not readily available in triton,
we use a one-hot representation to get the quantities we want.
TODO: Next triton release will support tl.histogram, maybe argsort.
"""
block_range = tl.arange(0, block_size)
expert_range = tl.arange(0, block_size_expert)
expert_mask = (None if block_size_expert == num_experts else
expert_range < num_experts)
if num_sparse_rows >= block_size:
expert_index = tl.load(top_experts_ptr + block_range)
else:
expert_index = tl.load(top_experts_ptr + block_range, mask=
block_range < num_sparse_rows, other=num_experts)
expert_counts = tl.sum((expert_index[:, None] == expert_range[None, :])
.to(dtype), 0)
for i in range(1, tl.cdiv(num_sparse_rows, block_size)):
block_range += block_size
if num_sparse_rows % block_size == 0:
expert_index = tl.load(top_experts_ptr + block_range)
else:
expert_index = tl.load(top_experts_ptr + block_range, mask=
block_range < num_sparse_rows, other=num_experts)
expert_counts += tl.sum((expert_index[:, None] == expert_range[None,
:]).to(dtype), 0)
if pad_to_multiple is None:
expert_counts_padded = expert_counts
else:
expert_counts_padded = (expert_counts + pad_to_multiple - 1
) // pad_to_multiple * pad_to_multiple
expert_ends = tl.cumsum(expert_counts_padded)
expert_begins = expert_ends - expert_counts_padded
if expert_ends_ptr is not None:
tl.store(expert_ends_ptr + expert_range, expert_ends, mask=expert_mask)
if expert_pad_begins_ptr is not None:
tl.store(expert_pad_begins_ptr + expert_range, expert_begins +
expert_counts, mask=expert_mask)
if sparse_rows_ptr is not None:
block_range = tl.arange(0, block_size)
for i in range(tl.cdiv(num_sparse_rows, block_size)):
if num_sparse_rows % block_size == 0:
mask = None
expert_index = tl.load(top_experts_ptr + block_range)
else:
mask = block_range < num_sparse_rows
expert_index = tl.load(top_experts_ptr + block_range, mask=
mask, other=num_experts)
expert_one_hot = (expert_index[:, None] == expert_range[None, :]
).to(dtype)
expert_offsets = (tl.cumsum(expert_one_hot, 0) + expert_begins[
None, :]) * expert_one_hot
tl.store(sparse_rows_ptr + block_range, tl.sum(expert_offsets,
1) - 1, mask=mask)
expert_begins += tl.sum(expert_one_hot, 0)
block_range += block_size
| {
"Data Type": [],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound"
]
} | [
"Apache"
] | https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_copy.py |
8a2be484-de09-45f5-adcd-8c5f11a156e6 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES, BLOCK_SIZES, NUM_WARPS, NUM_STAGES)],
key=['M'])
@triton.jit
def triton_jagged_sum_kernel_simple_fused_buffer_then_sum(input_ptr_values,
input_ptr_offsets, output_ptr, M, MAX_SEQLEN, BLOCK_SIZE_RAGGED: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_ragged = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_ragged
), tl.load(input_ptr_offsets + (pid_ragged + 1))
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
buffer += tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer_sum = tl.sum(buffer, axis=0)
buffer_view = buffer_sum.reshape((BLOCK_SIZE_M,))
output_offsets = offsets_m + pid_ragged * M
output_mask = output_offsets < M * (pid_ragged + 1)
tl.store(output_ptr + output_offsets, buffer_view, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_sum/kernels.py |
942e12b2-81c0-40e4-966c-b945f0b6e59f | parallel_scan.py | chengkai-liu/RecBLR | parallel_scan.py | 66e520c26e28c05a5425ba2e81c9169b7e0176e2 | 0 | @triton.jit
def forward_scan(gates, tokens, outputs, SEQUENCE_LENGTH: tl.constexpr):
sequence_id = tl.num_programs(axis=1) * tl.program_id(axis=0
) + tl.program_id(axis=1)
strides = tl.arange(0, SEQUENCE_LENGTH) + sequence_id * SEQUENCE_LENGTH
tokens_ = tl.load(tokens + strides)
gates_ = tl.load(gates + strides)
tuples = pack64(tokens_, gates_)
output_tuples_ = tl.associative_scan(tuples, axis=0, combine_fn=
first_order_op)
output_tokens_, output_gates_ = unpack64(output_tuples_)
tl.store(outputs + strides, output_tokens_)
| {
"Data Type": [],
"Functionality": [
"Recurrent Neural Networks"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"Low Latency"
]
} | [
"MIT"
] | https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py |
7d8a010b-7cc8-4d82-8e78-3e637e19e741 | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/jagged_mean/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r,
'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in
itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS,
NUM_STAGES)], key=['M'])
@triton.jit
def triton_jagged_mean_kernel_simple_fused_sum_then_buffer(input_ptr_values,
input_ptr_offsets, output_ptr, M, MAX_SEQLEN, BLOCK_SIZE_RAGGED: tl.
constexpr, BLOCK_SIZE_M: tl.constexpr):
pid = tl.program_id(axis=0)
pid_b = pid // tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M)
buffer = tl.zeros((1, BLOCK_SIZE_M), dtype=tl.float32)
block_start_m = pid_m * BLOCK_SIZE_M
offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M)
mask_m = offsets_m < M
ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_b), tl.load(
input_ptr_offsets + (pid_b + 1))
ragged_len = ragged_end - ragged_start
for block_pos in range(0, MAX_SEQLEN, BLOCK_SIZE_RAGGED):
block_start_ragged = ragged_start + block_pos
offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED)
mask_ragged = offsets_ragged < ragged_end
idxs = offsets_ragged[:, None] * M + offsets_m
mask = mask_ragged[:, None] & mask_m
input = tl.load(input_ptr_values + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_M,))
buffer_view_mean = buffer_view * (1 / ragged_len)
output_offsets = offsets_m + pid_b * M
output_mask = output_offsets < M * (pid_b + 1)
tl.store(output_ptr + output_offsets, buffer_view_mean, mask=output_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py |
fbeec137-387e-4b5f-a767-2a50d629e0ce | bwd_inner_dk_dv.py | ROCm/aotriton | tritonsrc/bwd_inner_dk_dv.py | 016f733e8ff746450e066f78bed68709ccd93e60 | 0 | @triton.jit
def bwd_inner_dk_dv(dk, dv, qk_scale, bias_scale, q_ptrs, q_stride, kt, vt,
B_block_ptr, do_ptrs, do_stride, l_ptrs, D_ptrs, seqlen_q, seqlen_k,
head_dim, start_k, lo, hi, overflow_size, dropout_p, dropout_scale,
philox_seed, batch_philox_offset, max_seqlen_k, BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, FULL_BLOCKS: tl.
constexpr, CAUSAL: tl.constexpr, ENABLE_DROPOUT: tl.constexpr,
PADDED_HEAD: tl.constexpr, BIAS_TYPE: tl.constexpr):
offs_k = start_k + tl.arange(0, BLOCK_N)
offs_q = tl.arange(0, BLOCK_M)
ld_offs_d = None if not PADDED_HEAD else tl.arange(0, BLOCK_DMODEL)
q_ptrs += lo * q_stride
do_ptrs += lo * do_stride
if BIAS_TYPE == 1:
B_block_ptr = tl.advance(B_block_ptr, (lo, 0))
"""
K1 K2 (d)V dO
Q1 qk11 qk12 (d)v1 dO1
Q2 qk21 qk22 (d)v2 dO2
QK: (seqlen_q, seqlen_k)
dO: (seqlen_q, hdim)
dV: (seqlen_k, hdim)
dV = (QK)^T dO
dV1 = qk11 dO1 + qk21 dO2 = q1 k1 dO1 + q2 k1 dO2
dV2 = qk12 dO1 + qk22 dO2 = q1 k2 dO1 + q2 k2 dO2
~~~~~ = 0
start_k: select k and dV
start_q: select q and dO
"""
for start_q in range(lo, hi, BLOCK_M):
offs_q_curr = offs_q[:, None] + start_q
if not FULL_BLOCKS:
q = load_fn(q_ptrs, offs_q + start_q, ld_offs_d, seqlen_q, head_dim
)
else:
q = load_fn(q_ptrs, None, ld_offs_d, seqlen_q, head_dim)
if not FULL_BLOCKS:
do = load_fn(do_ptrs, offs_q + start_q, ld_offs_d, seqlen_q,
head_dim)
else:
do = load_fn(do_ptrs, None, ld_offs_d, seqlen_q, head_dim)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if not FULL_BLOCKS:
if overflow_size > 0:
boundary_n = tl.full((BLOCK_N,), seqlen_q, dtype=tl.int32)
mask = offs_q_curr < boundary_n[None, :]
qk = tl.where(mask, qk, float('-inf'))
if CAUSAL:
qk = tl.where(offs_q_curr >= offs_k[None, :], qk, float('-inf'))
if BIAS_TYPE == 0:
pass
elif BIAS_TYPE == 1:
bias = tl.load(B_block_ptr, boundary_check=(0, 1),
padding_option='zero')
qk += bias * bias_scale
else:
tl.static_assert(False, f'Unsupported BIAS_TYPE {BIAS_TYPE}')
qk += dot(BLOCK_M, BLOCK_DMODEL, BLOCK_DMODEL, q, kt)
if FULL_BLOCKS:
Di = tl.load(D_ptrs + offs_q_curr)
l_i = tl.load(l_ptrs + offs_q_curr)
else:
boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=
tl.int32)
d_lse_ptrs_mask = boundary > tl.arange(0, BLOCK_M)
d_lse_padding = tl.full((BLOCK_M,), 0, dtype=tl.float32)
Di = tl.load(D_ptrs + offs_q_curr, mask=d_lse_ptrs_mask[:, None
], other=d_lse_padding[:, None])
l_i = tl.load(l_ptrs + offs_q_curr, mask=d_lse_ptrs_mask[:,
None], other=d_lse_padding[:, None])
p = tl.math.exp2(qk_scale * qk - l_i)
if not FULL_BLOCKS or CAUSAL:
if qk_scale == 0.0:
p = tl.where(libdevice.isnan(p), 0.0, p)
if ENABLE_DROPOUT:
philox_offset = (batch_philox_offset + start_q * max_seqlen_k +
start_k)
keep = dropout_mask(philox_seed, philox_offset, dropout_p,
BLOCK_M, BLOCK_N, max_seqlen_k)
if BLOCK_M == 1:
dv += tl.where(keep, p * dropout_scale, 0.0).to(q_ptrs.
dtype.element_ty) * do
else:
dv += tl.dot(tl.trans(tl.where(keep, p * dropout_scale, 0.0
)).to(q_ptrs.dtype.element_ty), do)
elif BLOCK_M == 1:
dv += p.to(q_ptrs.dtype.element_ty) * do
else:
dv += tl.dot(tl.trans(p.to(do.dtype)), do)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
dp += tl.dot(do, vt)
if ENABLE_DROPOUT:
dp = tl.where(keep, dp * dropout_scale, 0)
ds = p * (dp - Di)
if BLOCK_M == 1:
dk += ds.to(q_ptrs.dtype.element_ty) * q
else:
dk += tl.dot(tl.trans(ds.to(q_ptrs.dtype.element_ty)), q)
q_ptrs += q_stride * BLOCK_M
do_ptrs += do_stride * BLOCK_M
if BIAS_TYPE == 1:
B_block_ptr = tl.advance(B_block_ptr, (BLOCK_M, 0))
return dk, dv
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_inner_dk_dv.py |
b20ebc56-6a89-4549-b7b7-605c742067fe | flash_attention.py | drisspg/transformer_nuggets | transformer_nuggets/flash/flash_attention.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def masked_row(rows):
"""rows is BLOCK_M slice of the QK score
Returns:
BLOCK_M vector of boolean values indicating whether this
Query x Key position is fully masked
"""
return rows == float('-inf')
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": []
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/flash_attention.py |
3536e65e-9891-4926-b200-b5960ac48099 | triton_attention.py | pytorch-labs/tritonbench | tritonbench/operators/template_attention/triton_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64,
'BLOCK_DMODEL': 64}, num_stages=3, num_warps=4)], key=['num_queries'])
@triton.jit
def triton_tem_fused_no_exp2(arg_Q, arg_K, arg_V, out_ptr0, num_queries: tl
.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL:
tl.constexpr):
Q = arg_Q
K = arg_K
V = arg_V
stride_qz = 4194304
stride_qh = 262144
stride_qm = 64
stride_qk = 1
stride_kz = 4194304
stride_kh = 262144
stride_kn = 64
stride_kk = 1
stride_vz = 4194304
stride_vh = 262144
stride_vk = 64
stride_vn = 1
Z = 16
H = 16
N_CTX = 4096
qk_scale = 1.0
MATMUL_PRECISION = tl.float16
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qkv_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(base=Q + qkv_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m *
BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0))
K_block_ptr = tl.make_block_ptr(base=K + qkv_offset, shape=(
BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0
), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1))
V_block_ptr = tl.make_block_ptr(base=V + qkv_offset, shape=(N_CTX,
BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0))
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf')
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(MATMUL_PRECISION)
lo = 0
hi = N_CTX
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k.to(MATMUL_PRECISION))
tmp0 = tl.full([1], 1024, tl.int64)
tmp1 = offs_m[:, None] <= tmp0
tmp2 = start_n + offs_n[None, :] <= tmp0
tmp3 = tmp1 & tmp2
tmp4 = offs_m[:, None] >= start_n + offs_n[None, :]
tmp5 = tmp3 | tmp4
tmp6 = float('-inf')
tmp7 = tmp6.to(tl.float32)
tmp8 = tl.where(tmp5, qk, tmp7)
qk = tmp8
row_max = tl.max(qk, 1)
m_i_new = tl.maximum(m_i, row_max)
masked_out_rows = m_i_new == float('-inf')
alpha = tl.math.exp(m_i - m_i_new)
alpha = tl.where(masked_out_rows, 0, alpha)
p = tl.math.exp(qk - m_i_new[:, None])
p = tl.where(masked_out_rows[:, None], 0, p)
acc_scale = l_i * 0 + alpha
acc *= acc_scale[:, None]
acc += tl.dot(p.to(MATMUL_PRECISION), v.to(MATMUL_PRECISION))
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
acc = acc / l_i[:, None]
idx_z = tl.program_id(1) // H
idx_h = tl.program_id(1) % H
idx_m = offs_m[:, None]
idx_d = tl.arange(0, BLOCK_DMODEL)[None, :]
mask = (idx_m != -1) & (idx_d != -1)
xindex = idx_d + 64 * idx_m + 262144 * idx_h + 4194304 * idx_z
tl.store(out_ptr0 + xindex, acc, None)
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Attention Mechanisms",
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Thread-Block Mappings"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/template_attention/triton_attention.py |
2b4627a5-61b5-4d4b-aacb-1b0565738910 | masks.py | drisspg/transformer_nuggets | transformer_nuggets/flash/masks.py | a4c66bbeebaa479ad8b6ed82d7efbafa41b17260 | 0 | @triton.jit
def alibi_attention_triton(score, batch, head, seq_len_q, seq_len_kv, num_heads
):
alibi_scale = tl.math.exp2(-((head + 1) * 8.0 / num_heads))
bias = seq_len_kv - seq_len_q
score = score + alibi_scale * bias
return score
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py |
6b120d1b-0950-4333-ad5e-5a0ff072d2b4 | triton_fused_attention.py | pytorch-labs/tritonbench | tritonbench/kernels/triton_fused_attention.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(list(filter(keep, configsTma)), key=['N_CTX'])
@triton.jit
def _attn_fwd_tma(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o,
stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh,
stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.
constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl.
constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr,
ENABLE_WS: tl.constexpr):
tl.static_assert(BLOCK_N <= HEAD_DIM)
pid = tl.program_id(0)
off_hz = tl.program_id(1)
_attn_fwd_compute(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v,
desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz,
stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk,
stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid,
Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA,
LOOP_SCHEDULE)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Attention Mechanisms"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Cooperative Groups"
],
"Performance Objective": [
"High Throughput"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py |
ffd4e13b-2976-4dd9-bf5e-79d064300709 | 08-grouped-gemm.py | triton-lang/triton | python/tutorials/08-grouped-gemm.py | a2b398e0bb1b120f31cf386d6ae3261c3ab84207 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128,
'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'NUM_SM': 84}), triton.Config(
{'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'NUM_SM':
128}), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32, 'NUM_SM': 84}), triton.Config({'BLOCK_SIZE_M': 64,
'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'NUM_SM': 128})], key=[
'group_size'])
@triton.jit
def grouped_matmul_kernel(group_a_ptrs, group_b_ptrs, group_c_ptrs,
group_gemm_sizes, g_lds, group_size, NUM_SM: tl.constexpr, BLOCK_SIZE_M:
tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr):
tile_idx = tl.program_id(0)
last_problem_end = 0
for g in range(group_size):
gm = tl.load(group_gemm_sizes + g * 3)
gn = tl.load(group_gemm_sizes + g * 3 + 1)
gk = tl.load(group_gemm_sizes + g * 3 + 2)
num_m_tiles = tl.cdiv(gm, BLOCK_SIZE_M)
num_n_tiles = tl.cdiv(gn, BLOCK_SIZE_N)
num_tiles = num_m_tiles * num_n_tiles
while (tile_idx >= last_problem_end and tile_idx < last_problem_end +
num_tiles):
k = gk
lda = tl.load(g_lds + g * 3)
ldb = tl.load(g_lds + g * 3 + 1)
ldc = tl.load(g_lds + g * 3 + 2)
a_ptr = tl.load(group_a_ptrs + g).to(tl.pointer_type(tl.float16))
b_ptr = tl.load(group_b_ptrs + g).to(tl.pointer_type(tl.float16))
c_ptr = tl.load(group_c_ptrs + g).to(tl.pointer_type(tl.float16))
tile_idx_in_gemm = tile_idx - last_problem_end
tile_m_idx = tile_idx_in_gemm // num_n_tiles
tile_n_idx = tile_idx_in_gemm % num_n_tiles
offs_am = tile_m_idx * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = tile_n_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + offs_am[:, None] * lda + offs_k[None, :]
b_ptrs = b_ptr + offs_k[:, None] * ldb + offs_bn[None, :]
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.
float32)
for kk in range(0, tl.cdiv(k, BLOCK_SIZE_K)):
tl.multiple_of(a_ptrs, [16, 16])
tl.multiple_of(b_ptrs, [16, 16])
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K
b_ptrs += BLOCK_SIZE_K * ldb
c = accumulator.to(tl.float16)
offs_cm = tile_m_idx * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = tile_n_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + ldc * offs_cm[:, None] + offs_cn[None, :]
tl.store(c_ptrs, c)
tile_idx += NUM_SM
last_problem_end = last_problem_end + num_tiles
| {
"Data Type": [
"fp16"
],
"Functionality": [
"Matrix Multiplication"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"High Throughput",
"Compute Bound"
]
} | [
"MIT"
] | https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/08-grouped-gemm.py |
ea89f9e0-302f-4fd2-a737-0aff546ce52f | kernels.py | pytorch-labs/tritonbench | tritonbench/operators/sum/kernels.py | 3a5dccb159834968567a2e45e561dc1aeaa8f8a8 | 0 | @triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': b_n,
'BLOCK_SIZE_K': b_k}, num_warps=w) for b_n, b_k, w in itertools.product
([(4 ** n) for n in range(6)], [(4 ** n) for n in range(4)], [2, 4, 8])
], key=['N'])
@triton.jit
def triton_sum_kernel_2D_result_dim_1_sum_then_buffer(input_ptr, output_ptr,
M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, BLOCK_SIZE_N: tl.
constexpr, BLOCK_SIZE_K: tl.constexpr):
"""
Modification to triton_sum_kernel_2D_result_dim_1() which uses a buffer to store intermediate results,
enabling reducing over a large middle dimension for 3D input tensors
"""
pid = tl.program_id(axis=0)
pid_m = pid // tl.cdiv(K, BLOCK_SIZE_K)
pid_k = pid % tl.cdiv(K, BLOCK_SIZE_K)
buffer = tl.zeros((1, BLOCK_SIZE_K), dtype=tl.float32)
block_start_k = pid_k * BLOCK_SIZE_K
offsets_k = block_start_k + tl.arange(0, BLOCK_SIZE_K)
mask_k = offsets_k < K
for block_start_n in range(0, N, BLOCK_SIZE_N):
offsets_n = block_start_n + tl.arange(0, BLOCK_SIZE_N)
mask_n = offsets_n < N
idxs_base = offsets_n[:, None] * K + offsets_k
idxs = idxs_base + pid_m * N * K
mask = mask_n[:, None] & mask_k
input = tl.load(input_ptr + idxs, mask=mask, other=0)
buffer += tl.sum(input, axis=0)
buffer_view = buffer.reshape((BLOCK_SIZE_K,))
output_offsets = pid_m * K + offsets_k
tl.store(output_ptr + output_offsets, buffer_view, mask=mask_k)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Elementwise Operations"
],
"Memory Access Pattern": [
"Tiled"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Memory-Bound",
"Batch-Oriented"
]
} | [
"BSD"
] | https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py |
0d8babf1-950f-4d42-b5a5-310a8413e616 | fused_moe_fp16.py | AlibabaPAI/FLASHNN | flashnn/triton_kernels/fused_moe_fp16.py | 528a9301587f5fb135b25d973a87ba0a40a703a7 | 0 | @triton.jit
def _fused_moe_kernel(A, B, C, topk_weights_ptr, sorted_token_ids_ptr,
expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens,
stride_am, stride_ak, stride_be, stride_bn, stride_bk, stride_cm,
stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr,
MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + pid % num_pid_in_group % group_size_m
pid_n = pid % num_pid_in_group // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_token = tl.load(sorted_token_ids_ptr + offs_token_id)
token_mask = offs_token < num_valid_tokens
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = A + (offs_token[:, None] // top_k * stride_am + offs_k[None, :
] * stride_ak)
off_experts = tl.load(expert_ids_ptr + pid_m)
b_ptrs = B + off_experts * stride_be + (offs_k[:, None] * stride_bk +
offs_bn[None, :] * stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
_A0 = tl.zeros([1, 1], dtype=a_ptrs.dtype.element_ty)
_B0 = tl.zeros([1, 1], dtype=b_ptrs.dtype.element_ty)
for k in range(tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K -
k * BLOCK_SIZE_K), other=_A0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K,
other=_B0)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask,
other=0)
accumulator = accumulator * moe_weight[:, None]
accumulator = accumulator.to(A.dtype.element_ty)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = C + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Matrix Multiplication",
"Top-K Selection"
],
"Memory Access Pattern": [
"Blocked Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput",
"Batch-Oriented"
]
} | [
"Apache"
] | https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_fp16.py |
90a00adc-8b7b-4112-b132-f300bfc18a2a | rwkv_vanilla.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/rwkv_vanilla.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def wkv_triton_vanilla_backward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr,
k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b,
state_s_ab, state_s_t, state_s_c, gwkv_ptr, gwkv_s_b, gwkv_s_t,
gwkv_s_c, gstate_out_ptr, gstate_out_s_b, gstate_out_s_ab,
gstate_out_s_c, gw_ptr, gw_s_c, gu_ptr, gu_s_c, gk_ptr, gk_s_b, gk_s_t,
gk_s_c, gv_ptr, gv_s_b, gv_s_t, gv_s_c, gstate_ptr, gstate_s_b,
gstate_s_ab, gstate_s_c, tsz, chans, BLOCK_SIZE_C: tl.constexpr):
b_idx = tl.program_id(0)
c_idx = tl.program_id(1)
cs = c_idx * BLOCK_SIZE_C + tl.arange(0, BLOCK_SIZE_C)
cmask = cs < chans
k_ptr = k_ptr + b_idx * k_s_b
v_ptr = v_ptr + b_idx * v_s_b
alpha_ptr = state_ptr + b_idx * state_s_b
beta_ptr = state_ptr + b_idx * state_s_b + state_s_ab
gk_ptr = gk_ptr + b_idx * gk_s_b
gv_ptr = gv_ptr + b_idx * gv_s_b
gwkv_ptr = gwkv_ptr + b_idx * gwkv_s_b
galpha_out_ptr = gstate_out_ptr + b_idx * gstate_out_s_b
gbeta_out_ptr = gstate_out_ptr + b_idx * gstate_out_s_b + gstate_out_s_ab
galpha = tl.load(galpha_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl
.float32)
gbeta = tl.load(gbeta_out_ptr + gstate_out_s_c * cs, mask=cmask).to(tl.
float32)
w = tl.load(w_ptr + w_s_c * cs, mask=cmask).to(tl.float32)
u = tl.load(u_ptr + u_s_c * cs, mask=cmask).to(tl.float32)
ew = tl.exp(w)
gw = tl.zeros_like(w)
gu = tl.zeros_like(u)
for t in range(tsz):
tc = tsz - t - 1
kt = tl.load(k_ptr + tc * k_s_t + k_s_c * cs, mask=cmask).to(tl.float32
)
vt = tl.load(v_ptr + tc * v_s_t + v_s_c * cs, mask=cmask).to(tl.float32
)
alpha_prev = tl.load(alpha_ptr + tc * state_s_t + state_s_c * cs,
mask=cmask).to(tl.float32)
beta_prev = tl.load(beta_ptr + tc * state_s_t + state_s_c * cs,
mask=cmask).to(tl.float32)
euk = tl.exp(u + kt)
ek = tl.exp(kt)
denom = beta_prev + euk
denom_sq = denom * denom
gwkvt = tl.load(gwkv_ptr + tc * gwkv_s_t + gwkv_s_c * cs, mask=cmask
).to(tl.float32)
guk = gwkvt * euk * (beta_prev * vt - alpha_prev) / denom_sq
gu += guk
gk = guk
gv = gwkvt * euk / denom
galpha_wkv = gwkvt / denom
gbeta_wkv = -gwkvt * (euk * vt + alpha_prev) / denom_sq
gw += galpha * ew * alpha_prev
gk += galpha * ek * vt
gv += galpha * ek
gw += gbeta * ew * beta_prev
gk += gbeta * ek
tl.store(gk_ptr + tc * gk_s_t + gk_s_c * cs, gk, mask=cmask)
tl.store(gv_ptr + tc * gv_s_t + gv_s_c * cs, gv, mask=cmask)
galpha = galpha * ew + galpha_wkv
gbeta = gbeta * ew + gbeta_wkv
galpha_ptr = gstate_ptr + b_idx * gstate_s_b
gbeta_ptr = gstate_ptr + b_idx * gstate_s_b + gstate_s_ab
tl.store(galpha_ptr + gstate_s_c * cs, galpha, mask=cmask)
tl.store(gbeta_ptr + gstate_s_c * cs, gbeta, mask=cmask)
tl.atomic_add(gw_ptr + gw_s_c * cs, gw, mask=cmask)
tl.atomic_add(gu_ptr + gu_s_c * cs, gu, mask=cmask)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Batch-Oriented"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_vanilla.py |
52b81982-1106-4fa0-9262-3ccc64858017 | gemm_postop_gelu_benchmark.py | intel/intel-xpu-backend-for-triton | benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py | 6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2 | 0 | @triton.jit
def tanh(x):
return 2 * tl.sigmoid(2 * x) - 1
| {
"Data Type": [],
"Functionality": [
"Activation Functions",
"Elementwise Operations"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [],
"Performance Objective": [
"Low Latency",
"Single Instance"
]
} | [
"MIT"
] | https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py |
a830e503-24c8-412e-a532-a0dc3f91630f | k_softmax_dropout.py | kimiasa/Experiments | src/ops/triton/k_softmax_dropout.py | c4e73bfefd8290695ec52b6386b6b81838ca94a1 | 0 | @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({},
num_warps=2), triton.Config({}, num_warps=4), triton.Config({},
num_warps=8), triton.Config({}, num_warps=16), triton.Config({},
num_warps=32)], key=['K'])
@triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])})
@triton.heuristics({'IS_FP16': lambda nargs: nargs['GradIn'].dtype == torch
.float16})
@triton.jit
def _softmax_dropout_backward(GradIn, GradOut, Out, DropoutMask,
dropout_prob, stride_bm, stride_bn, stride_gm, stride_gn, stride_om,
stride_on, stride_mm, stride_mn, K, CAUSAL: tl.constexpr, DEPTH: tl.
constexpr, IS_FP16: tl.constexpr):
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
k = tl.arange(0, DEPTH)
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
dropout_mask_ptrs = DropoutMask + m * stride_mm + n * stride_mn + k
io_mask = k < K
if CAUSAL:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0))
o = tl.load(out_ptrs, mask=io_mask, other=float(0))
zero = float(0)
zero = zero.to(g.dtype)
if CAUSAL:
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
dropout_mask = tl.load(dropout_mask_ptrs, mask=io_mask, other=float(0))
g = tl.where(dropout_mask != 0, g / (1 - dropout_prob), zero)
s = tl.sum(g * o, 0)
grad_in = o * (g - s)
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K)
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Softmax"
],
"Memory Access Pattern": [
"Strided Access"
],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"High Throughput"
]
} | [
"Apache"
] | https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/ops/triton/k_softmax_dropout.py |
bef108ba-facf-4f6f-b328-e31add9c646e | real_rnn_tie_input_gate.py | berlino/seq_icl | src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py | 9b9223d15348b5a415fb453ed988ed5f7ab9fbdc | 0 | @triton.jit
def bwd_sequential_scan(grad_output, v, f, h, B, L, C, BLOCK_M: tl.constexpr):
offset_b = tl.program_id(0)
if offset_b >= B:
return
offset_n = tl.program_id(1)
ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + (L - 1
) * C + offset_n * BLOCK_M
grad_h = tl.zeros([BLOCK_M], dtype=tl.float32)
for time_step in range(L - 1, -1, -1):
grad = tl.load(grad_output + ptr).to(tl.float32)
grad_h += grad
decay = tl.load(f + ptr).to(tl.float32)
input = tl.load(v + ptr).to(tl.float32)
grad_v = (1 - decay) * grad_h
tl.store(v + ptr, grad_v.to(v.dtype.element_ty))
hidden_state = tl.load(h + ptr - C, mask=ptr >= offset_b * L * C +
C, other=0.0).to(tl.float32)
grad_f = grad_h * (hidden_state - input)
tl.store(f + ptr, grad_f.to(f.dtype.element_ty))
grad_h *= decay
ptr -= C
| {
"Data Type": [
"fp32"
],
"Functionality": [
"Backpropagation",
"Recurrent Neural Networks"
],
"Memory Access Pattern": [],
"Parallelization Strategy": [
"Grid-Stride Loops"
],
"Performance Objective": [
"Compute Bound",
"Batch-Oriented"
]
} | [
"Apache"
] | https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.