file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/topi/arm_cpu/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
import numpy as np
import tvm
from tvm import te
from ..utils import is_empty_shape
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
if len(sch[out].op.axis) >= 4:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 3:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 2:
sch[out].parallel(sch[out].op.axis[0])
return sch
def schedule_injective(outs):
"""ARM CPU schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
x = outs[0]
if list(s[x].op.axis):
# do not vectorize for broadcast
(io, ii) = s[x].split(list(s[x].op.axis)[-1], 16 // np.dtype(x.dtype).itemsize)
s[x].vectorize(ii)
tvm.te.schedule.AutoInlineInjective(s)
if not is_empty_shape(x.shape):
schedule_injective_from_existing(s, x)
return s
def schedule_concatenate(outs):
"""Schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description of concatenate in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
x = outs[0]
tvm.te.schedule.AutoInlineInjective(s)
if len(s[x].op.axis) >= 4:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])
s[x].parallel(fused)
elif len(s[x].op.axis) >= 3:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])
s[x].parallel(fused)
elif len(s[x].op.axis) >= 2:
s[x].parallel(s[x].op.axis[0])
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedules specialized for cortex-m DSP instructions."""
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of conv1d."""
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
from tvm import te
from tvm.topi.utils import simplify, traverse_inline
from tvm.topi.nn.pad import pad
from tvm.topi.nn.utils import get_pad_tuple1d
from tvm.tir.expr import Mul
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
def conv1d_nwc_dsp(*args, **kwargs):
"""Defines the v7e-m DSP instructions of conv1d on NWC layout."""
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
data, kernel = args[:2]
layout = args[-2]
cfg = autotvm.get_config()
args = [cfg] + args
assert layout == "NWC"
conv = conv1d_nwc_dsp_compute(*args)
sched = conv1d_nwc_dsp_schedule(cfg, [data, kernel, conv])
return sched, [data, kernel, conv]
conv1d_nwc_dsp.template_key = "dsp"
conv1d_nwc_dsp.default_data_layout = "NWC"
conv1d_nwc_dsp.default_kernel_layout = "WOI"
def conv1d_nwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of conv1d on NWC layout."""
if isinstance(strides, (tuple, list)):
strides = strides[0]
if isinstance(dilation, (tuple, list)):
dilation = dilation[0]
batch_size, data_width, in_channels = data.shape
kernel_size, out_channels, _ = kernel.shape
# Compute the output shape
dilated_kernel_size = (kernel_size - 1) * dilation + 1
pad_left, pad_right = get_pad_tuple1d(padding, (dilated_kernel_size,))
out_channels = simplify(out_channels)
out_width = simplify((data_width - dilated_kernel_size + pad_left + pad_right) // strides + 1)
# Apply padding
pad_before = [0, pad_left, 0]
pad_after = [0, pad_right, 0]
padded_data = pad(data, pad_before, pad_after, name="padded_data")
# Compute graph
rc = te.reduce_axis((0, in_channels), name="rc")
rw = te.reduce_axis((0, kernel_size), name="rw")
conv = te.compute(
(batch_size, out_width, out_channels),
lambda b, w, c: te.sum(
padded_data[b, w * strides + rw * dilation, rc].astype(out_dtype)
* kernel[rw, c, rc].astype(out_dtype),
axis=[rw, rc],
),
name="conv1d",
tag="conv1d_nwc",
)
###########################
# Config Space Definition #
###########################
n, ow, co = (
cfg.axis(batch_size.value),
cfg.axis(out_width.value),
cfg.axis(out_channels.value),
)
kw, ci = (
cfg.reduce_axis(kernel_size.value),
cfg.reduce_axis(in_channels.value),
)
owo, owi = cfg.define_split("tile_ow", ow, policy="factors", num_outputs=2)
cio, cii = cfg.define_split(
"tile_ci",
ci,
policy="factors",
num_outputs=2,
# TODO: check case with in_channels.value % 4 != 0 with AutoTVM
filter=None if cfg.is_fallback else lambda x: x.size[-1] % 4 == 0,
)
coo, coi = cfg.define_split("tile_co", co, policy="factors", num_outputs=2)
cfg.define_reorder(
"reorder_0_simd",
[n, owo, owi, coo, coi, kw, cio, cii],
policy="candidate",
candidate=[
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
],
)
cfg.define_knob("auto_unroll_max_step", [0, 2, 4, 8, 16, 32])
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
cfg.fallback_split("tile_ow", [-1, out_width.value])
cfg.fallback_split("tile_ci", [-1, in_channels.value])
cfg.fallback_split("tile_co", [-1, out_channels.value])
return conv
def conv1d_nwc_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv1d on NWC layout."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv1d_nwc" not in op.tag:
return
# extract tensors
output = op.output(0)
conv = op
data_vec = conv.input_tensors[0]
source_index_w = output.op.body[0].source[0].a.value.indices[1].a
stride_w = source_index_w.b.value if isinstance(source_index_w, Mul) else 1
# tile reduction axes
n, ow, co = sched[conv].op.axis
kw, ci = sched[conv].op.reduce_axis
M = cfg["tile_ow"].size[-1]
K = cfg["tile_ci"].size[-1]
N = cfg["tile_co"].size[-1]
owo, owi = cfg["tile_ow"].apply(sched, conv, ow)
cio, cii = cfg["tile_ci"].apply(sched, conv, ci)
coo, coi = cfg["tile_co"].apply(sched, conv, co)
cfg["reorder_0_simd"].apply(sched, conv, [n, owo, owi, coo, coi, kw, cio, cii])
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data_vec.dtype, output.dtype, stride_w)
sched[output].tensorize(owi, gemm)
sched[output].pragma(n, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
# this is the scope to attach global config inside this kernel
kernel_scope = n
# tune unroll
sched[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
sched[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(sched, outs[-1].op, _callback)
return sched
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of conv2d."""
from tvm import autotvm
from tvm.autotvm.task import deserialize_args
from tvm import te
from tvm.topi.utils import simplify, traverse_inline
from tvm.topi.nn.pad import pad
from tvm.topi.nn.utils import get_pad_tuple
from tvm.tir.expr import Mul
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
def conv2d_nhwc_dsp(*args, **kwargs):
"""Defines the v7e-m DSP instructions of conv2d."""
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
data, kernel = args[:2]
layout = args[-2]
cfg = autotvm.get_config()
args = [cfg] + args
assert layout == "NHWC"
conv = conv2d_nhwc_dsp_compute(*args)
sched = conv2d_nhwc_dsp_schedule(cfg, [data, kernel, conv])
return sched, [data, kernel, conv]
conv2d_nhwc_dsp.template_key = "dsp"
conv2d_nhwc_dsp.default_data_layout = "NHWC"
conv2d_nhwc_dsp.default_kernel_layout = "HWOI"
def conv2d_nhwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of conv2d."""
assert isinstance(strides, int) or len(strides) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch_size, in_height, in_width, in_channels = data.shape
kernel_h, kernel_w, out_channels, _ = kernel.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
padded_data = pad(data, pad_before, pad_after, name="padded_data")
rc = te.reduce_axis((0, in_channels), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch_size, out_height, out_width, out_channels),
lambda nn, yy, xx, ff: te.sum(
padded_data[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* kernel[ry, rx, ff, rc].astype(out_dtype),
axis=[ry, rx, rc],
),
name="conv2d",
tag="conv2d_nhwc",
)
###########################
# Config Space Definition #
###########################
n, oh, ow, co = (
cfg.axis(batch_size.value),
cfg.axis(out_height.value),
cfg.axis(out_width.value),
cfg.axis(out_channels.value),
)
kh, kw, ci = (
cfg.reduce_axis(kernel_h.value),
cfg.reduce_axis(kernel_w.value),
cfg.reduce_axis(in_channels.value),
)
owo, owi = cfg.define_split("tile_ow", ow, policy="factors", num_outputs=2)
cio, cii = cfg.define_split(
"tile_ci",
ci,
policy="factors",
num_outputs=2,
# TODO: check case with in_channels.value % 4 != 0 with AutoTVM
filter=None if cfg.is_fallback else lambda x: x.size[-1] % 4 == 0,
)
coo, coi = cfg.define_split("tile_co", co, policy="factors", num_outputs=2)
cfg.define_reorder(
"reorder_0_simd",
[n, oh, owo, owi, coo, coi, kh, kw, cio, cii],
policy="candidate",
candidate=[
[n, oh, kh, kw, owo, coo, cio, owi, coi, cii],
[n, oh, kh, kw, coo, owo, cio, owi, coi, cii],
[n, kh, kw, oh, owo, coo, cio, owi, coi, cii],
[n, kh, kw, oh, coo, owo, cio, owi, coi, cii],
],
)
cfg.define_knob("auto_unroll_max_step", [0, 2, 4, 8, 16, 32])
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
cfg.fallback_split("tile_ow", [-1, out_width.value])
cfg.fallback_split("tile_ci", [-1, in_channels.value])
cfg.fallback_split("tile_co", [-1, out_channels.value])
return conv
def conv2d_nhwc_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv2d."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc" not in op.tag:
return
# extract tensors
output = op.output(0)
conv = op
data_vec = conv.input_tensors[0]
kernel = conv.input_tensors[1] # pylint: disable=unused-variable
last = outs[0] # pylint: disable=unused-variable
source_index_w = output.op.body[0].source[0].a.value.indices[2].a
stride_w = source_index_w.b.value if isinstance(source_index_w, Mul) else 1
# tile reduction axes
n, oh, ow, co = sched[conv].op.axis
kh, kw, ci = sched[conv].op.reduce_axis
M = cfg["tile_ow"].size[-1]
K = cfg["tile_ci"].size[-1]
N = cfg["tile_co"].size[-1]
owo, owi = cfg["tile_ow"].apply(sched, conv, ow)
cio, cii = cfg["tile_ci"].apply(sched, conv, ci)
coo, coi = cfg["tile_co"].apply(sched, conv, co)
cfg["reorder_0_simd"].apply(sched, conv, [n, oh, owo, owi, coo, coi, kh, kw, cio, cii])
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data_vec.dtype, output.dtype, stride_w)
sched[output].tensorize(owi, gemm)
sched[output].pragma(n, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
# this is the scope to attach global config inside this kernel
kernel_scope = n
# tune unroll
sched[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
sched[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(sched, outs[-1].op, _callback)
return sched
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of dense."""
from tvm import te
from tvm.topi.utils import traverse_inline, get_const_tuple
from .micro_kernel.gemm import (
intrin_gemm_MxKxN,
gemm_MxKxN_impl,
)
from .... import tag
def dense_dsp_compute(cfg, data, weight, bias=None, out_dtype=None):
"""Defines the v7e-m DSP instructions of dense."""
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
cfg.define_split("tile_x", M, policy="factors", num_outputs=2)
cfg.define_split("tile_y", N, policy="factors", num_outputs=2)
cfg.define_split("tile_k", K, policy="factors", num_outputs=2)
k = te.reduce_axis((0, K), "k")
C = te.compute(
(M, N),
lambda x, y: te.sum(
data[x, k].astype(out_dtype) * weight[y, k].astype(out_dtype),
axis=k,
),
name="dense",
tag="dense_dsp",
)
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
def dense_dsp_schedule(cfg, outs):
"""Schedule function for v7e-m DSP instructions of dense."""
sched = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense" not in op.tag:
return
output = op.output(0)
dense = op
data = dense.input_tensors[0]
M = cfg["tile_x"].size[-1]
N = cfg["tile_y"].size[-1]
K = cfg["tile_k"].size[-1]
x, y = sched[dense].op.axis
k = sched[dense].op.reduce_axis[0]
x_o, x_i = cfg["tile_x"].apply(sched, dense, x)
y_o, y_i = cfg["tile_y"].apply(sched, dense, y)
k_o, k_i = cfg["tile_k"].apply(sched, dense, k)
sched[dense].reorder(x_o, y_o, k_o, x_i, y_i, k_i)
gemm, uniq_id = intrin_gemm_MxKxN(M, K, N, data.dtype, output.dtype, stride_w=1)
sched[output].tensorize(x_i, gemm)
sched[output].pragma(x_o, "import_c", gemm_MxKxN_impl(M, K, N, uniq_id))
traverse_inline(sched, outs[-1].op, _callback)
return sched
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""ARM Cortex-M DSP schedule for depthwise_conv2d"""
import random
import string
from tvm import te, topi
from tvm.topi.utils import traverse_inline
from tvm.topi.nn.pad import pad
from .micro_kernel.multi_channel_convolve import (
intrin_multi_channel_convolve,
multi_channel_convolve_impl,
)
from .micro_kernel.common import num_simd_lanes_per_word
def depthwise_conv2d_nhwc_dsp_compute(_cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of DepthwiseConv2D. Has a lot of requirements
for use - if not all apply, the fallback implementation will be used instead."""
assert isinstance(strides, int) or len(strides) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(strides, int):
stride_h = stride_w = strides
else:
stride_h, stride_w = strides
# We do not support dilation currently. It would be possible, but it would require
# modifying the way the kernel is packed. Gnarly.
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert dilation_h == dilation_w == 1
batch_size, height, width, channels = data.shape
kernel_h, kernel_w, _, _ = kernel.shape
simd_lanes = num_simd_lanes_per_word(data.dtype)
# We don't support different numbers of input and output channels.
assert channels == kernel.shape[2]
assert kernel.shape[3] == 1
# We take in int8 as our dtype, but we spit out int32. This is because we cannot
# round until we compute activations.
assert out_dtype == "int32"
# Padding the data requires COPYING THE ENTIRE INPUT TENSOR, which
# is slow and bad. We should really implement a strip mining
# routine to avoid this, but TVM has terrible support for that.
if padding == "SAME":
# This assumption makes the logic easier. Could be removed with work.
assert height % stride_h == width % stride_w == 0
output_h = height // stride_h
output_w = width // stride_w
# This padding behavior is consistent with other TVM depthwise_conv2d schedules. However it
# differs from the TensorFlow, which only pads the bottom right if stride > 1. This probably
# brings down accuracy slightly for models imported from TFLite.
pad_down = 1 if stride_h == 1 else 0
pad_right = 1 if stride_w == 1 else 0
padded_data = pad(
data,
[0, kernel_h // 2, kernel_w // 2, 0],
[0, pad_down, pad_right, 0],
name="padded_data",
)
elif padding == "VALID":
assert height > kernel_h and width > kernel_w
output_h = (height - kernel_h) // stride_h + 1
output_w = (width - kernel_w) // stride_w + 1
padded_data = data
elif isinstance(padding, tuple):
if len(padding) == 2:
pad_up, pad_down = padding[0]
pad_left, pad_right = padding[1]
else:
pad_up, pad_left, pad_down, pad_right = padding
output_h = (height - kernel_h + pad_up + pad_down) // stride_h + 1
output_w = (width - kernel_w + pad_left + pad_right) // stride_w + 1
padded_data = pad(
data,
[0, pad_up, pad_left, 0],
[0, pad_down, pad_right, 0],
name="padded_data",
)
else:
raise RuntimeError()
_, padded_h, padded_w, _ = padded_data.shape
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
reshaped_kernel = topi.reshape(kernel, (channels // simd_lanes, kernel_h, kernel_w, simd_lanes))
return te.compute(
(batch_size, output_h, output_w, channels),
lambda h, i, j, k: te.sum(
padded_data[h, (i * stride_h) + kh_i, (j * stride_w) + kw_i, k].astype("int32")
* reshaped_kernel[k // simd_lanes, kh_i, kw_i, k % simd_lanes].astype("int32"),
axis=(kh_i, kw_i),
),
name="depthwise_conv2d",
tag=f"depthwise_conv2d_nhwc_{padded_h}_{padded_w}_dsp",
)
def depthwise_conv2d_nhwc_dsp_schedule(_cfg, outs):
"""Schedule function for v7e-m DSP instructions of conv2d."""
schedule = te.create_schedule([x.op for x in outs])
def _callback(operator):
if "depthwise_conv2d_nhwc" not in operator.tag:
return
# extract tensors
output = operator.output(0)
padded_data = output.op.input_tensors[0]
reshaped_kernel = output.op.input_tensors[1]
in_dtype = padded_data.dtype
_, padded_h, padded_w, channels = padded_data.shape
_, kernel_h, kernel_w, _ = reshaped_kernel.shape
suffix = "".join(random.choices(string.ascii_uppercase, k=8))
b_ax, y_ax, x_ax, c_ax = schedule[output].op.axis
ky_ax, kx_ax = schedule[output].op.reduce_axis
simd_lanes = num_simd_lanes_per_word(in_dtype)
c_ax_o, c_ax_i = schedule[output].split(c_ax, factor=simd_lanes)
schedule[output].reorder(b_ax, c_ax_o, y_ax, x_ax, ky_ax, kx_ax, c_ax_i)
multi_channel_convolve = intrin_multi_channel_convolve(
in_dtype, padded_h, padded_w, channels, kernel_h, kernel_w, suffix
)
schedule[output].tensorize(ky_ax, multi_channel_convolve)
schedule[output].pragma(
b_ax,
"import_c",
multi_channel_convolve_impl(
in_dtype, padded_h, padded_w, channels, kernel_h, kernel_w, suffix
),
)
traverse_inline(schedule, outs[-1].op, _callback)
return schedule
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/avg_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines sum intrinsics for sum operation with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
def intrin_sum(shape, in_dtype, out_dtype, reset=False):
"""Defines a v7e-m DSP-accelerated sum operation."""
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
func_prefix = "sum16"
assert in_dtype == "int16"
assert out_dtype == "int16"
width = shape[-1]
x = te.placeholder(shape, name="x", dtype=in_dtype)
k = te.reduce_axis((0, width), name="rc")
def get_slice(indices, k):
s = list(indices)
s[-1] = s[-1] + k
return tuple(s)
z = te.compute(
(1,) * len(shape), lambda *i: te.sum(x[get_slice(i, k)], axis=[k]).astype(out_dtype)
)
def _intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype,
f"{func_prefix}_{width}_{uniq_id}",
aa.access_ptr("r"),
cc.access_ptr("w"),
aa.elem_offset,
1 if reset else 0,
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(cc.dtype, f"{func_prefix}_reset_{uniq_id}", cc.access_ptr("w"))
)
return ib.get()
def _reduce_update():
return _body()
return _body(), _reduce_reset(), _reduce_update()
binds = {
t: tvm.tir.decl_buffer(
t.shape,
t.dtype,
t.op.name,
strides=[te.var(f"{t.op.name}_s_{i}") for i in range(0, len(t.shape))],
offset_factor=1,
)
for t in [x, z]
}
intrin_decl = te.decl_tensor_intrin(z.op, _intrin_func, binds=binds)
return intrin_decl, uniq_id
def sum_impl(N, uniq_id):
"""Emit C code for sum impl."""
cc_code = (
common.common_includes
+ f"""
#ifdef __cplusplus
extern "C"
#endif // __cplusplus
__STATIC_FORCEINLINE int32_t sum16_reset_{uniq_id}(
int16_t *res) {{
*res = (int16_t)0;
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t sum16_{N}_{uniq_id}(
int16_t *arr,
int16_t *res16,
long arr_offset,
int reset) {{
int n;
int32_t *p32;
int32_t res = reset ? 0 : *res16;
if ( arr_offset % 4 != 0 ) {{
res += *arr;
p32 = (int32_t *)(&arr[1]);
n = {N} - 1;
}} else {{
p32 = (int32_t *)arr;
n = {N};
}}
for ( int i = 0; i < n / 2; ++ i ) {{
res = __SMLAD(*p32, 0x00010001, res);
++ p32;
}}
if ( n % 2 != 0 )
res += *(int16_t *)p32;
*res16 = res;
return 0;
}}
"""
)
return cc_code
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines common C code for all microkernel operations."""
common_includes = """
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <arm_nnsupportfunctions.h>
#include <tvm/runtime/crt/error_codes.h>
"""
MICRO_WORD_LENGTH_BITS = 32
def num_simd_lanes_per_word(dtype: str) -> int:
"""Takes a dtype, and returns how many of that dtype fit into a single microcontroller word.
>>> num_simd_lanes_per_word("int8")
4
>>> num_simd_lanes_per_word("int16")
2
"""
assert dtype.startswith("int")
dtype_width = int(dtype[3:])
return MICRO_WORD_LENGTH_BITS // dtype_width
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines gemm intrinsics for matrix multiplication with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
##########################
# MxKxN MatMul Intrinsic #
##########################
# NOTE this is transposed matmul (A * B^T)
def intrin_gemm_MxKxN(M, K, N, in_dtype, out_dtype, stride_w=1):
"""Defines a v7e-m DSP-accelerated transposed matmul."""
# we generate a unique ID for every intrinsic definition, to prevent name
# collisions in the generated source (e.g., if there are multiple operators
# in the same module that use the same intrinsic)
#
# TODO(weberlo, areusch): to cut down on memory usage, we should cache each intrinsic
# instantiation and include it only once, eliminating the need for unique
# IDs
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
if isinstance(M, tvm.tir.IntImm):
M = M.value
if isinstance(K, tvm.tir.IntImm):
K = K.value
if isinstance(N, tvm.tir.IntImm):
N = N.value
# TODO(weberlo, areusch): support more dtypes?
assert in_dtype in ("int8", "int16")
assert out_dtype == "int32"
A = te.placeholder((M * stride_w - (stride_w - 1), K), name="a", dtype=in_dtype)
B = te.placeholder((N, K), name="b", dtype=in_dtype)
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda i, j: te.sum(
A[i * stride_w, k].astype(out_dtype) * B[j, k].astype(out_dtype), axis=k
),
name="c",
)
A_buf = tvm.tir.decl_buffer(
A.shape, A.dtype, name="A", offset_factor=1, strides=[te.var("A_s"), 1]
)
B_buf = tvm.tir.decl_buffer(
B.shape, B.dtype, name="B", offset_factor=1, strides=[te.var("B_s"), 1]
)
C_buf = tvm.tir.decl_buffer(
C.shape, C.dtype, name="C", offset_factor=1, strides=[te.var("C_s"), 1]
)
def intrin_func(ins, outs):
aa, bb = ins
cc = outs[0]
gemm_func_prefix = "gemm" if in_dtype == "int8" else "gemm16"
def _reduce_update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32",
f"{gemm_func_prefix}_{M}x{K}x{N}_update_{uniq_id}",
aa.access_ptr("r"),
bb.access_ptr("r"),
cc.access_ptr("w"),
aa.strides[0] * stride_w,
bb.strides[0],
cc.strides[0],
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32", f"gemm_{M}x{K}x{N}_reset_{uniq_id}", cc.access_ptr("w"), cc.strides[0]
)
)
return ib.get()
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"int32",
f"{gemm_func_prefix}_{M}x{K}x{N}_body_{uniq_id}",
aa.access_ptr("r"),
bb.access_ptr("r"),
cc.access_ptr("w"),
aa.strides[0] * stride_w,
bb.strides[0],
cc.strides[0],
)
)
return ib.get()
return _body(), _reduce_reset(), _reduce_update()
intrin_decl = te.decl_tensor_intrin(C.op, intrin_func, binds={A: A_buf, B: B_buf, C: C_buf})
return intrin_decl, uniq_id
def gemm_MxKxN_impl(M, K, N, uniq_id):
"""Emit C code for gemm impl."""
# TODO(weberlo, areusch): are there any SIMD tricks to zero out arrays quickly?
# aa_pad_size = M * K
bb_pad_size = N * K
# code reference: CMSIS-NN paper (https://arxiv.org/abs/1801.06601)
cc_code = (
common.common_includes
+ f"""
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{N}_body_rest_{uniq_id}(
int K,
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 4) * 4;
switch ( K % 4 ) {{
case 1:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
break;
case 2:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1];
}}
}}
break;
case 3:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1]
+ (int32_t) a_ptr[2] * (int32_t) b_ptr[2];
}}
}}
break;
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{K}x{N}_body_loop_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{K}x{N}_body_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int16_t bb_pad[{bb_pad_size}];
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm_{M}x{K}x{N}_body_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {N}; i++)
for (int j = 0; j < {K} / 4; j++)
read_and_pad(&bb[i*B_stride + j*4], (int32_t*) &bb_pad[i*{K} + j*4], (int32_t*) &bb_pad[i*{K} + j*4 + 2]);
for (int i = 0; i < {M}; i++) {{
int16_t aa_pad_line[{K}];
for (int l = 0; l < {K} / 4; l++)
read_and_pad(&aa[i*A_stride + l*4], (int32_t*) &aa_pad_line[l*4], (int32_t*) &aa_pad_line[l*4 + 2]);
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) aa_pad_line;
int32_t *bb_ptr = (int32_t *) &bb_pad[j*{K}];
int32_t sum = 0;
for (int l = 0; l < 2 * ({K} / 4); l++) {{
sum = __SMLAD(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
if ( {K} % 4 != 0 )
gemm_{M}x{N}_body_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{N}_update_rest_{uniq_id}(
int K,
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 4) * 4;
switch ( K % 4 ) {{
case 1:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
break;
case 2:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1];
}}
}}
break;
case 3:
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int8_t *a_ptr = &aa[i * A_stride + k_base];
int8_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0]
+ (int32_t) a_ptr[1] * (int32_t) b_ptr[1]
+ (int32_t) a_ptr[2] * (int32_t) b_ptr[2];
}}
}}
break;
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{K}x{N}_update_loop_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
cc[i*C_stride + j] += sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{K}x{N}_update_{uniq_id}(
int8_t *aa, int8_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int16_t bb_pad[{bb_pad_size}];
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm_{M}x{K}x{N}_update_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {N}; i++)
for (int j = 0; j < {K} / 4; j++)
read_and_pad(&bb[i*B_stride + j*4], (int32_t*) &bb_pad[i*{K} + j*4], (int32_t*) &bb_pad[i*{K} + j*4 + 2]);
for (int i = 0; i < {M}; i++) {{
int16_t aa_pad_line[{K}];
for (int l = 0; l < {K} / 4; l++)
read_and_pad(&aa[i*A_stride + l*4], (int32_t*) &aa_pad_line[l*4], (int32_t*) &aa_pad_line[l*4 + 2]);
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) aa_pad_line;
int32_t *bb_ptr = (int32_t *) &bb_pad[j*{K}];
int32_t sum = 0;
for (int l = 0; l < 2 * ({K} / 4); l++) {{
sum = __SMLAD(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
cc[i*C_stride + j] += sum;
}}
}}
if ( {K} % 4 != 0 )
gemm_{M}x{N}_update_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{N}_body_rest_{uniq_id}(
int K,
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 2) * 2;
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int16_t *a_ptr = &aa[i * A_stride + k_base];
int16_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] = (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{K}x{N}_body_loop_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{K}x{N}_body_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm16_{M}x{K}x{N}_body_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
if(((uint32_t)aa & 0x3) != 0 || ((uint32_t)bb & 0x3) != 0){{
retcode = kTvmErrorFunctionCallInvalidArg;
goto out;
}}
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) &aa[i*A_stride];
int32_t *bb_ptr = (int32_t *) &bb[j*B_stride];
int32_t sum = 0;
for (int l = 0; l < {K} / 2; l++) {{
sum = __SMLAD(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
// NOTE: this is the line where `*_body` differs from `*_update`. here
// we're *setting* the result, instead of accumulating, because we know
// the `i` and `j` itervars span their entire respective axes.
cc[i*C_stride + j] = sum;
}}
}}
if ( {K} % 2 != 0 )
gemm16_{M}x{N}_body_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{N}_update_rest_{uniq_id}(
int K,
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int k_base = (K / 2) * 2;
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int16_t *a_ptr = &aa[i * A_stride + k_base];
int16_t *b_ptr = &bb[j * B_stride + k_base];
cc[i * C_stride + j] += (int32_t) a_ptr[0] * (int32_t) b_ptr[0];
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{K}x{N}_update_loop_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t sum = 0;
for (int l = 0; l < {K}; l++) {{
sum += (int32_t) aa[i*A_stride + l] * (int32_t) bb[j*B_stride + l];
}}
cc[i*C_stride + j] += sum;
}}
}}
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm16_{M}x{K}x{N}_update_{uniq_id}(
int16_t *aa, int16_t *bb, int32_t *cc,
int A_stride, int B_stride, int C_stride) {{
int32_t retcode = 0;
if ( {M} < 2 && {N} < 2 ) {{
retcode = gemm16_{M}x{K}x{N}_update_loop_{uniq_id}(aa, bb, cc, A_stride, B_stride, C_stride);
goto out;
}}
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
int32_t *aa_ptr = (int32_t *) &aa[i*A_stride];
int32_t *bb_ptr = (int32_t *) &bb[j*B_stride];
int32_t sum = 0;
for (int l = 0; l < {K} / 2; l++) {{
sum = __SMLAD(*aa_ptr, *bb_ptr, sum);
++ aa_ptr; ++ bb_ptr;
}}
cc[i*C_stride + j] += sum;
}}
}}
if ( {K} % 2 != 0 )
gemm16_{M}x{N}_update_rest_{uniq_id}({K}, aa, bb, cc, A_stride, B_stride, C_stride);
out:
return retcode;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t gemm_{M}x{K}x{N}_reset_{uniq_id}(int32_t *cc, int C_stride) {{
for (int i = 0; i < {M}; i++) {{
for (int j = 0; j < {N}; j++) {{
cc[i*C_stride + j] = 0;
}}
}}
return 0;
}}
"""
)
return cc_code
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/max_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Defines max intrinsics for elemwise max operation with v7e-m DSP instructions."""
import random
import string
import tvm
from tvm import te
from . import common
def intrin_max(shape, in_dtype, out_dtype):
"""Defines a v7e-m DSP-accelerated max pool."""
UNIQ_ID_LEN = 8
uniq_id = "".join(random.choices(string.ascii_uppercase, k=UNIQ_ID_LEN))
func_prefix = "max8"
assert in_dtype == "int8"
assert out_dtype == "int8"
x = te.placeholder(shape, name="x", dtype=in_dtype)
k = te.reduce_axis((0, 1), name="rc")
z = te.compute(shape, lambda *i: tvm.tir.max(x[i], axis=[k]).astype(out_dtype))
def _intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype,
f"{func_prefix}_{uniq_id}",
aa.access_ptr("r"),
cc.access_ptr("w"),
cc.strides[0],
)
)
return ib.get()
def _reduce_reset():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
cc.dtype, f"{func_prefix}_reset_{uniq_id}", cc.access_ptr("w"), cc.strides[0]
)
)
return ib.get()
def _reduce_update():
return _body()
return _body(), _reduce_reset(), _reduce_update()
binds = {
t: tvm.tir.decl_buffer(
t.shape,
t.dtype,
t.op.name,
strides=[te.var(f"{t.op.name}_s_{i}") for i in range(0, len(t.shape))],
offset_factor=1,
)
for t in [x, z]
}
intrin_decl = te.decl_tensor_intrin(z.op, _intrin_func, binds=binds)
return intrin_decl, uniq_id
def max_impl(uniq_id):
"""Emit C code for pool impl."""
cc_code = (
common.common_includes
+ f"""
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t max8_reset_{uniq_id}(
int8_t *res,
int N) {{
memset(res, (int8_t)-128, N * sizeof(*res));
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t max8_loop_{uniq_id}(
int8_t *arg,
int8_t *res,
int N) {{
for ( int i = 0; i < N; ++ i )
if ( arg[i] > res[i] )
res[i] = arg[i];
return 0;
}}
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t max8_{uniq_id}(
int8_t *arg,
int8_t *res,
int N) {{
int32_t *parg32, *pres32;
int una_arg = (int32_t)arg & 0x3, una_res = (int32_t)res & 0x3;
int32_t retcode = 0;
if ( N < 4 || ((una_arg || una_res) && una_arg != una_res) ) {{
retcode = max8_loop_{uniq_id}(arg, res, N);
goto out;
}}
if ( una_arg ) {{
int n = (4 - una_arg);
if ( n > N || (N - n) < 4 )
n = N;
retcode = max8_loop_{uniq_id}(arg, res, n);
N -= n;
if ( N == 0 )
goto out;
arg += n; res += n;
}}
parg32 = (int32_t *)arg;
pres32 = (int32_t *)res;
for ( int i = 0; i < N / 4; ++ i ) {{
int32_t arg32 = *parg32 ++;
int32_t res32 = *pres32;
__SSUB8(arg32, res32);
res32 = __SEL(arg32, res32);
*pres32 ++ = res32;
}}
if ( N & 0x3 ) {{
retcode = max8_loop_{uniq_id}((int8_t *)parg32, (int8_t *)pres32, N & 0x3);
goto out;
}}
out:
return retcode;
}}
"""
)
return cc_code
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/multi_channel_convolve.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This is a special intrinsic used for depthwise convolution using Cortex-M DSP instructions
(v7e-m). It takes as inputs an int8 HWC data tensor and an int8 CHWc kernel. This intrinsic "lays"
the kernel on top of the data tensors starting from a given pointer, performs signed sixteen-bit
multiplies on each pair of values, and sums all the products in an int32 accumlator. This process is
repeated four times giving four int32 outputs - one per channel."""
import textwrap
from tvm import te, tir
from .common import num_simd_lanes_per_word
def _get_func_name(in_dtype, tensor_w, channels, kernel_h, kernel_w, suffix):
"""Gets the C function name of the tensorized function."""
return f"kernel_convolve_{in_dtype}_w{tensor_w}_c{channels}_kh{kernel_h}_kw{kernel_w}_{suffix}"
def intrin_multi_channel_convolve(
in_dtype, _tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix
):
"""Defines a v7e-m DSP-accelerated multi-channel convolution. Works on two
channels if in_dtype==int16, and four channels if in_dtype==int8."""
simd_lanes = num_simd_lanes_per_word(in_dtype)
overlap_dims = (kernel_h, kernel_w, simd_lanes)
data_slice = te.placeholder(overlap_dims, name="data_slice", dtype=in_dtype)
kernel_slice = te.placeholder(overlap_dims, name="kernel_slice", dtype=in_dtype)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
output_slice = te.compute(
(simd_lanes,),
lambda k: te.sum(
data_slice[kh_i, kw_i, k].astype("int32") * kernel_slice[kh_i, kw_i, k].astype("int32"),
axis=(kh_i, kw_i),
),
name="c",
)
data_buf = tir.decl_buffer(
data_slice.shape,
data_slice.dtype,
name="data",
offset_factor=1,
strides=[tensor_w * channels, channels, 1],
)
kernel_buf = tir.decl_buffer(
kernel_slice.shape,
kernel_slice.dtype,
name="kernel",
offset_factor=1,
strides=[kernel_w * simd_lanes, simd_lanes, 1],
)
output_buf = tir.decl_buffer(
output_slice.shape, output_slice.dtype, name="output", offset_factor=1, strides=[1]
)
def intrin_func(ins, outs):
builder = tir.ir_builder.create()
builder.emit(
tir.call_extern(
"int32",
_get_func_name(in_dtype, tensor_w, channels, kernel_h, kernel_w, suffix),
outs[0].access_ptr("w"),
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
)
)
return builder.get()
return te.decl_tensor_intrin(
output_slice.op,
intrin_func,
binds={data_slice: data_buf, kernel_slice: kernel_buf, output_slice: output_buf},
)
def multi_channel_convolve_impl(in_dtype, *args) -> str:
"""Generates C code for a fast multi-channel convolution function for ARM Cortex-M. This is done
by calling a sub-function depending on the input data type, as since v7e-m has no quad multiply
accumulate instruction, the int8 and int16 cases work differently."""
if in_dtype == "int8":
return _quad_int8_channel_convolve_impl(*args)
if in_dtype == "int16":
return _dual_int16_channel_convolve_impl(*args)
raise NotImplementedError(f"No Cortex-M {in_dtype} depthwise_conv2d implementation exists!")
def _quad_int8_channel_convolve_impl(_tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix):
return textwrap.dedent(
(
f"""
#include <stdint.h>
#include <arm_nnsupportfunctions.h>
// __SXTB16(_ROR(X, Y)) is combined into one assembly instruction
#define TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP( \
arranged_kernel, \
tensor_c3210, \
sum_c0, sum_c1, sum_c2, sum_c3) {{ \
\
uint32_t kernel_c3210 = *arranged_kernel++; \
\
uint32_t tensor_c20 = __SXTB16(tensor_c3210); \
uint32_t kernel_c20 = __SXTB16(kernel_c3210); \
sum_c0 = __builtin_arm_smlabb(tensor_c20, kernel_c20, sum_c0); \
sum_c2 = __builtin_arm_smlatt(tensor_c20, kernel_c20, sum_c2); \
\
uint32_t tensor_c31 = __SXTB16(__ROR(tensor_c3210, 8)); \
uint32_t kernel_c31 = __SXTB16(__ROR(kernel_c3210, 8)); \
sum_c1 = __builtin_arm_smlabb(tensor_c31, kernel_c31, sum_c1); \
sum_c3 = __builtin_arm_smlatt(tensor_c31, kernel_c31, sum_c3); \
}}
/* We do four channels at once to get this speed boost. */
#ifdef __cplusplus
extern "C"
#endif
int32_t {_get_func_name("int8", tensor_w, channels, kernel_h, kernel_w, suffix)}(
uint32_t *out,
uint32_t *tensor,
uint32_t *kernel) {{
uint32_t sum_c0 = 0;
uint32_t sum_c1 = 0;
uint32_t sum_c2 = 0;
uint32_t sum_c3 = 0;
#pragma GCC unroll 3
for (int i = 0; i < {kernel_h}; i++) {{
#pragma GCC unroll 3
for (int j = 0; j < {kernel_w}; j++) {{
TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP(
kernel,
*(tensor + j * {channels // 4} + i * {tensor_w * (channels // 4)}),
sum_c0, sum_c1, sum_c2, sum_c3)
}}
}}
out[0] = sum_c0;
out[1] = sum_c1;
out[2] = sum_c2;
out[3] = sum_c3;
return 0;
}}
#undef TVMGEN_QUAD_INT8_CHANNEL_REARRANGE_SUM_DSP
"""
)
)
def _dual_int16_channel_convolve_impl(_tensor_h, tensor_w, channels, kernel_h, kernel_w, suffix):
return textwrap.dedent(
(
f"""
#include <stdint.h>
/* We do four channels at once to get this speed boost. */
#ifdef __cplusplus
extern "C"
#endif
int32_t {_get_func_name("int16", tensor_w, channels, kernel_h, kernel_w, suffix)}(
uint32_t *out,
uint32_t *tensor,
uint32_t *kernel) {{
uint32_t sum_c0 = 0;
uint32_t sum_c1 = 0;
#pragma GCC unroll 3
for (int i = 0; i < {kernel_h}; i++) {{
#pragma GCC unroll 3
for (int j = 0; j < {kernel_w}; j++) {{
uint32_t tensor_c10 = *(tensor + j * {channels // 2}
+ i * {tensor_w * (channels // 2)});
uint32_t kernel_c10 = *kernel++;
sum_c0 = __builtin_arm_smlabb(tensor_c10, kernel_c10, sum_c0);
sum_c1 = __builtin_arm_smlatt(tensor_c10, kernel_c10, sum_c1);
}}
}}
out[0] = sum_c0;
out[1] = sum_c1;
return 0;
}}
#undef TVMGEN_DUAL_INT16_CHANNEL_REARRANGE_SUM
"""
)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/micro_kernel/tensordot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Computes a "jumpy tensordot" operator, which can be used to tensorize many common operators
including regular conv2d, depthwise conv2d, and grouped conv2d provided the data and kernel layouts
are the optimal ones. When groups=1, the optimal data layout is NHWC and kernel layout is OHWI. When
this is a depthwise convolution, the optimal data layout is NCHW and kernel layout is OIHW."""
import textwrap
from tvm import te, tir
from .common import num_simd_lanes_per_word
def _get_func_name(in_dtype, tensor_h, jump, tensor_w, suffix):
"""Gets the C function name of the tensordot function."""
return f"tensordot_{in_dtype}_h{tensor_h}_j{jump}_w{tensor_w}_{suffix}"
def make_intrin_tensordot(slices, strides, tensordot_params):
"""Helper function for constructing tensordot intrinsic. We can't construct the whole thing here
(as multiple schedules use tensordot and each must build the intrinstic differently) but we can
build part here to simplify the code."""
# in_dtype, tensor_h, jump, tensor_w, suffix = tensordot_params
data, kernel, output = slices
data_strides, kernel_strides = strides
data_buf = tir.decl_buffer(
data.shape, data.dtype, name="data", offset_factor=1, strides=data_strides
)
kernel_buf = tir.decl_buffer(
kernel.shape,
kernel.dtype,
name="kernel",
offset_factor=1,
strides=kernel_strides,
)
output_buf = tir.decl_buffer(
output.shape, output.dtype, name="output", offset_factor=1, strides=[1]
)
def intrin_func(ins, outs):
builder = tir.ir_builder.create()
builder.emit(
tir.call_extern(
"int32",
_get_func_name(*tensordot_params),
outs[0].access_ptr("w"),
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
)
)
return builder.get()
return te.decl_tensor_intrin(
output.op,
intrin_func,
binds={data: data_buf, kernel: kernel_buf, output: output_buf},
)
def tensordot_impl(in_dtype: str, tensor_h: int, jump: int, tensor_w: int, suffix: str) -> str:
"""Generates C code for taking the dot products of two `tensor_h` * `tensor_w` tensors. Also has
a `jump` argument that advances the pointer of one tensor by that many words after each row. The
`jump` and `tensor_w` values must be word-aligned for the input data type, as non-word-aligned
memory access is slow on the Cortex-M series. Depending on the input datatype, the code may
contain DSP instructions for Arm v7e-m. C code contains DSP instructions for Arm v7e-m. See
the below pseudocode for reference:
tensordot(out_ptr, dat_ptr, ker_ptr) {
sum = 0;
for (i = 0; i < tensor_h; i++) {
for (j = 0; j < tensor_w; j++) {
sum += (*dat_ptr++) * (*ker_ptr++);
}
dat_ptr += jump;
}
*out_ptr = sum;
}
"""
simd_lanes = num_simd_lanes_per_word(in_dtype)
assert tensor_w % simd_lanes == 0
assert jump % simd_lanes == 0
if in_dtype == "int8":
inner_loop = """
uint32_t tensor_c20 = __SXTB16(tensor_batch);
uint32_t kernel_c20 = __SXTB16(kernel_batch);
sum = __SMLAD(tensor_c20, kernel_c20, sum);
uint32_t tensor_c31 = __SXTB16(__ROR(tensor_batch, 8));
uint32_t kernel_c31 = __SXTB16(__ROR(kernel_batch, 8));
sum = __SMLAD(tensor_c31, kernel_c31, sum);"""
elif in_dtype == "int16":
inner_loop = """
sum = __SMLAD(tensor_batch, kernel_batch, sum);"""
elif in_dtype == "int32":
inner_loop = """
// Compiles to a single MAC instruction
sum += tensor_batch * kernel_batch;"""
else:
raise ValueError(f"No tensordot implementation exists for dtype '{in_dtype}'!")
function_name = _get_func_name(in_dtype, tensor_h, jump, tensor_w, suffix)
return textwrap.dedent(
(
f"""
#include <stdint.h>
#include <arm_nnsupportfunctions.h>
#ifdef __cplusplus
extern "C"
#endif
__STATIC_FORCEINLINE int32_t {function_name}(
uint32_t *out,
uint32_t *tensor,
uint32_t *kernel) {{
uint32_t sum = 0;
#pragma GCC unroll {tensor_h}
for (int i = 0; i < {tensor_h}; i++) {{
#pragma GCC unroll {tensor_w // simd_lanes}
for (int j = 0; j < {tensor_w // simd_lanes}; j++) {{
uint32_t tensor_batch = *tensor++;
uint32_t kernel_batch = *kernel++;
{inner_loop.strip()}
}}
tensor += {jump // simd_lanes};
}}
out[0] = sum;
return 0;
}}
"""
)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-value-for-parameter
"""Direct implementation of pool."""
import logging
import tvm
from tvm import te
from tvm.topi.utils import traverse_inline
from .micro_kernel.max_pool import (
intrin_max,
max_impl,
)
from .micro_kernel.avg_pool import (
intrin_sum,
sum_impl,
)
logger = logging.getLogger("topi")
def schedule_maxpool_1d_nwc(s, op):
"""Schedule function for v7e-m DSP instructions of maxpool 1d NWC layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
channels = data_vec.shape[-1]
if isinstance(channels, tvm.tir.IntImm):
channels = channels.value
n, w, c = s[op].op.axis
(k,) = s[op].op.reduce_axis
s[op].reorder(n, w, k, c)
max_val, uniq_id = intrin_max((1, 1, channels), data_vec.dtype, output.dtype)
s[op].tensorize(c, max_val)
s[output].pragma(n, "import_c", max_impl(uniq_id))
def schedule_maxpool_2d_nhwc(s, op):
"""Schedule function for v7e-m DSP instructions of maxpool 2d NHWC layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
channels = data_vec.shape[-1]
if isinstance(channels, tvm.tir.IntImm):
channels = channels.value
n, h, w, c = s[op].op.axis
ko, ki = s[op].op.reduce_axis
s[op].reorder(n, h, w, ko, ki, c)
max_val, uniq_id = intrin_max((1, 1, 1, channels), data_vec.dtype, output.dtype)
s[op].tensorize(c, max_val)
s[output].pragma(n, "import_c", max_impl(uniq_id))
def schedule_avgpool_1d_ncw(s, op):
"""Schedule function for v7e-m DSP instructions of avgpool 1d NCW layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
n, _, _ = s[op].op.axis
(k,) = s[op].op.reduce_axis
pool_w = k.dom.extent.value
summary, uniq_id = intrin_sum((1, 1, pool_w), data_vec.dtype, output.dtype, reset=True)
s[op].tensorize(k, summary)
s[output].pragma(n, "import_c", sum_impl(pool_w, uniq_id))
def schedule_avgpool_2d_nchw(s, op):
"""Schedule function for v7e-m DSP instructions of avgpool 2d NCHW layout."""
output = op.output(0)
data_vec = op.input_tensors[0]
n, _, _, _ = s[op].op.axis
_, ki = s[op].op.reduce_axis
pool_w = ki.dom.extent.value
summary, uniq_id = intrin_sum((1, 1, 1, pool_w), data_vec.dtype, output.dtype)
s[op].tensorize(ki, summary)
s[output].pragma(n, "import_c", sum_impl(pool_w, uniq_id))
def pool_dsp_schedule(outs, layout):
"""Schedule function for v7e-m DSP instructions of pooling."""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "pool_max" in op.tag:
in_dtype = op.input_tensors[0].dtype
if in_dtype != "int8":
logger.warning("Does not have micro-kernel for %s maxpool.", in_dtype)
elif layout == "NWC":
schedule_maxpool_1d_nwc(s, op)
elif layout == "NHWC":
schedule_maxpool_2d_nhwc(s, op)
elif "pool_sum" in op.tag:
in_dtype = op.input_tensors[0].dtype
if in_dtype != "int16":
logger.warning("Does not have micro-kernel for %s avgpool.", in_dtype)
elif layout == "NCW":
schedule_avgpool_1d_ncw(s, op)
elif layout == "NCHW":
schedule_avgpool_2d_nchw(s, op)
traverse_inline(s, outs[-1].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/mprofile/dsp/tensordot_conv2ds.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implementations of several conv2d variations, all tensorized using tensordot and optimized for
Cortex-M DSP. Currently contains a standard conv2d and depthwise conv2d implementation, but could be
extended to add a grouped conv2d operator. Due to the way we tensorize, this schedule ONLY works
when the data and kernel layouts are NCHWxc and OIHWxi respectively, where x is the number of
input channels divided by the number of groups."""
import random
import string
from typing import Callable, Tuple, Union
import tvm
from tvm import te
from tvm.tir import indexdiv, indexmod
from tvm.topi.utils import traverse_inline
from tvm.topi.nn.pad import pad
from .micro_kernel.tensordot import (
make_intrin_tensordot,
tensordot_impl,
)
def _unpack_2d_argument(argument: Union[int, Tuple]) -> Tuple:
if isinstance(argument, int):
return (argument, argument)
assert len(argument) == 2
return argument
def _check_no_dilation(dilation: Union[int, Tuple]) -> None:
"""Takes a dilation argument as an integer or tuple, and makes sure both dimensions are 1.
Dilation prevents us from using DSP instructions, so this schedule can't work (aside from the
niche case where dilation_h == stride_h and dilation_w == stride_w, which is rare enough we
probably don't need to support it)."""
dilation_h, dilation_w = _unpack_2d_argument(dilation)
assert dilation_h == dilation_w == 1
def _unpack_padding(padding: Tuple) -> Tuple:
assert isinstance(padding, tuple)
if len(padding) == 2:
(pad_up, pad_down), (pad_left, pad_right) = padding
else:
pad_up, pad_left, pad_down, pad_right = padding
return pad_up, pad_left, pad_down, pad_right
def _pad_if_needed(data: te.tensor.Tensor, layout: str, padding: Tuple) -> te.tensor.Tensor:
"""Performs padding on a te.tensor.Tensor object if necessary. If padding = (0, 0, 0, 0), the
input tensor is returned unmodified. We only care about tuples here - "VALID" and "SAME" padding
will be converted by the importer TFLite importer if present."""
pad_up, pad_left, pad_down, pad_right = padding
if not any(padding):
return data
# We want to pad the "H" and "W" columns, and their position depends on the layout
pad_before, pad_after = [0, 0, 0, 0], [0, 0, 0, 0]
pad_before[layout.index("H")] = pad_up
pad_before[layout.index("W")] = pad_left
pad_after[layout.index("H")] = pad_down
pad_after[layout.index("W")] = pad_right
return pad(data, pad_before, pad_after, name="padded_data")
def _compute_output_dim(
data_dim: int, kernel_dim: int, pad_before: int, pad_after: int, stride: int
) -> int:
"""Computes an output dimension of a convolution, given the data dimension, kernel dimension,
padding, and stride along that axis. Note that when stride > 1, this division will often not
be perfectly even."""
return (data_dim + pad_before + pad_after - kernel_dim) // stride + 1
def _wrap_te_compute(
shape: Tuple,
fcompute: Callable[[int, int, int, int], tvm.ir.PrimExpr],
desired_out_layout: str,
current_out_layout: str = "NHWC",
**kwargs,
) -> te.tensor.Tensor:
"""Wrapper over te.compute that allows the output layout to be easily changed."""
assert current_out_layout.isalpha() and desired_out_layout.isalpha()
assert sorted(current_out_layout) == sorted(desired_out_layout)
forward_order = (current_out_layout.index(c) for c in desired_out_layout)
reverse_order = (desired_out_layout.index(c) for c in current_out_layout)
return te.compute(
tuple(shape[i] for i in forward_order),
lambda *args: fcompute(*(args[i] for i in reverse_order)),
**kwargs,
)
def _get_suffix() -> str:
"""Returns a random eight-character string to append to C function names. Prevents accidental
re-definition of functions if the same operator appears twice in a Relay graph."""
return "".join(random.choices(string.ascii_uppercase, k=8))
def conv2d_nhwc_ohwi_dsp_compute(
_cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype
):
"""Standard conv2d schedule that can be tensorized using tensordot."""
stride_h, stride_w = _unpack_2d_argument(strides)
pad_up, pad_left, pad_down, pad_right = _unpack_padding(padding)
_check_no_dilation(dilation)
batch_size, data_h, data_w, in_channels = data.shape
output_channels, kernel_h, kernel_w, _ = kernel.shape
assert kernel.shape[3] == in_channels
output_h = _compute_output_dim(data_h, kernel_h, pad_up, pad_down, stride_h)
output_w = _compute_output_dim(data_w, kernel_w, pad_left, pad_right, stride_w)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
kc_i = te.reduce_axis((0, in_channels), name="rc")
padded_data = _pad_if_needed(data, "NHWC", (pad_up, pad_left, pad_down, pad_right))
return _wrap_te_compute(
(batch_size, output_h, output_w, output_channels),
lambda n, y, x, c: te.sum(
padded_data[n, y * stride_h + kh_i, x * stride_w + kw_i, kc_i].astype(out_dtype)
* kernel[c, kh_i, kw_i, kc_i].astype(out_dtype),
axis=(kh_i, kw_i, kc_i),
),
out_layout,
name="conv2d",
tag="conv2d_nhwc_ohwi_dsp",
)
def _make_conv2d_tensorization(padded_data, kernel):
_, _, padded_w, in_channels = padded_data.shape
_, kernel_h, kernel_w, _ = kernel.shape
in_dtype = padded_data.dtype
suffix = _get_suffix()
assert in_dtype == kernel.dtype
data_slice = te.placeholder((kernel_h, kernel_w, in_channels), name="a", dtype=in_dtype)
kernel_slice = te.placeholder((kernel_h, kernel_w, in_channels), name="b", dtype=in_dtype)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
kc_i = te.reduce_axis((0, in_channels), name="kc_i")
output_slice = te.compute(
(1,),
lambda k: te.sum(
data_slice[kh_i, kw_i, kc_i].astype("int32")
* kernel_slice[kh_i, kw_i, kc_i].astype("int32"),
axis=[kh_i, kw_i, kc_i],
),
name="c",
)
# TVM has a really strange bug where the outer reduction axis (kh_i) having length 1 causes the
# decl_buffer strides check to fail. height_stride is a dark magic workaround for this.
height_stride = in_channels * padded_w if kernel_h > 1 else in_channels
jump = (padded_w - kernel_w) * in_channels
tensordot_params = (in_dtype, kernel_h, jump, kernel_w * in_channels, suffix)
intrin_tensordot = make_intrin_tensordot(
(data_slice, kernel_slice, output_slice),
([height_stride, in_channels, 1], [kernel_w * in_channels, in_channels, 1]),
tensordot_params,
)
tensordot_code = tensordot_impl(*tensordot_params)
return (intrin_tensordot, tensordot_code)
def depthwise_conv2d_nchw_oihw_dsp_compute(
_cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype
):
"""Depthwise conv2d schedule that can be tensorized using tensordot."""
stride_h, stride_w = _unpack_2d_argument(strides)
pad_up, pad_left, pad_down, pad_right = _unpack_padding(padding)
_check_no_dilation(dilation)
batch_size, in_channels, data_h, data_w = data.shape
_, c_mul, kernel_h, kernel_w = kernel.shape
output_channels = in_channels * c_mul
assert kernel.shape[0] == in_channels
output_h = _compute_output_dim(data_h, kernel_h, pad_up, pad_down, stride_h)
output_w = _compute_output_dim(data_w, kernel_w, pad_left, pad_right, stride_w)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
padded_data = _pad_if_needed(data, "NCHW", (pad_up, pad_left, pad_down, pad_right))
return _wrap_te_compute(
(batch_size, output_h, output_w, output_channels),
lambda n, y, x, c: te.sum(
padded_data[
n,
indexdiv(c, c_mul),
y * stride_h + kh_i,
x * stride_w + kw_i,
].astype(out_dtype)
* kernel[indexdiv(c, c_mul), indexmod(c, c_mul), kh_i, kw_i].astype(out_dtype),
axis=(kh_i, kw_i),
),
out_layout,
name="depthwise_conv2d",
tag="depthwise_conv2d_nchw_oihw_dsp",
)
def _make_depthwise_conv2d_tensorization(padded_data, kernel):
_, _, _, padded_w = padded_data.shape
_, _, kernel_h, kernel_w = kernel.shape
in_dtype = padded_data.dtype
suffix = _get_suffix()
assert in_dtype == kernel.dtype
data_slice = te.placeholder((kernel_h, kernel_w), name="a", dtype=in_dtype)
kernel_slice = te.placeholder((kernel_h, kernel_w), name="b", dtype=in_dtype)
kh_i = te.reduce_axis((0, kernel_h), name="kh_i")
kw_i = te.reduce_axis((0, kernel_w), name="kw_i")
output_slice = te.compute(
(1,),
lambda k: te.sum(
data_slice[kh_i, kw_i].astype("int32") * kernel_slice[kh_i, kw_i].astype("int32"),
axis=[kh_i, kw_i],
),
name="c",
)
jump = padded_w - kernel_w
tensordot_params = (in_dtype, kernel_h, jump, kernel_w, suffix)
intrin_tensordot = make_intrin_tensordot(
(data_slice, kernel_slice, output_slice),
([padded_w, 1], [kernel_w, 1]),
tensordot_params,
)
tensordot_code = tensordot_impl(*tensordot_params)
return (intrin_tensordot, tensordot_code)
def tensordot_conv2ds_schedule(_cfg, outs):
"""Schedule function using v7e-m DSP instructions for all the conv2d operators in this file. We
use one schedule function for them all, because they are tensorized with the same kernel."""
schedule = te.create_schedule([x.op for x in outs])
def _callback(operator):
if "conv2d" in operator.tag:
output = operator.output(0)
padded_data = output.op.input_tensors[0]
kernel = output.op.input_tensors[1]
if operator.tag == "conv2d_nhwc_ohwi_dsp":
b_ax, y_ax, x_ax, co_ax = schedule[output].op.axis
kh_ax, kw_ax, ci_ax = schedule[output].op.reduce_axis
schedule[output].reorder(b_ax, y_ax, x_ax, co_ax, kh_ax, kw_ax, ci_ax)
intrin, code = _make_conv2d_tensorization(padded_data, kernel)
elif operator.tag == "depthwise_conv2d_nchw_oihw_dsp":
b_ax, y_ax, x_ax, co_ax = schedule[output].op.axis
kh_ax, kw_ax = schedule[output].op.reduce_axis
schedule[output].reorder(b_ax, co_ax, y_ax, x_ax, kh_ax, kw_ax)
intrin, code = _make_depthwise_conv2d_tensorization(padded_data, kernel)
else:
raise ValueError(f"Cannot tensorize {operator.tag} with tensordot!")
schedule[output].tensorize(kh_ax, intrin)
schedule[output].pragma(b_ax, "import_c", code)
traverse_inline(schedule, outs[-1].op, _callback)
return schedule
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
from .mprofile.dsp.pool import pool_dsp_schedule
def schedule_pool(outs, layout):
"""Create schedule for avgpool/maxpool with dsp"""
return pool_dsp_schedule(outs, layout)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on ARM"""
import tvm
from tvm import te
from tvm.ir import register_intrin_lowering
def gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type):
"""
Int8 4x4 matrix multiplication and accumulation using a sequence of
umull -> uadalp -> umull2 -> uadalp instructions. This function
takes two arrays of int8 data type A[4][K] and B[4][K], and produces
a 4x4 matrix which is equal to A*B'.
The pseudo code is as follows.
.. code-block:: c
void gemm_4x4_int8_int8_int32(int8 A[4][K], int8 B[4][K], int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < K; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters
----------
M : int
rows of the matrix A
N : int
columns of the matrix B
K : int
columns of matrix A
unroll : bool
Unroll the loop accumulation if True
in_type : str, {'uint8', 'int8'}
Returns
-------
intrin : TensorIntrin
The ARM uint8/int8 TensorIntrin that can be used in tensorizing schedule
"""
assert in_type in ["uint8", "int8"]
A = te.placeholder((K // 16, te.var("m"), 16), dtype=in_type, name="A")
B = te.placeholder((K // 16, te.var("n"), 16), dtype=in_type, name="B")
dtype_vec = in_type + "x16"
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, K), "k")
C = te.compute(
(te.var("m"), te.var("n")),
lambda x, y: te.sum(
A[k // 16, x, idxm(k, 16)].astype("int32") * B[k // 16, y, idxm(k, 16)].astype("int32"),
axis=k,
),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
A.shape,
dtype=in_type,
name="a_buffer",
offset_factor=1,
strides=[te.var("sa_1"), te.var("sa_2"), 1],
)
b_buffer = tvm.tir.decl_buffer(
B.shape,
dtype=in_type,
name="b_buffer",
offset_factor=1,
strides=[te.var("sb_1"), te.var("sb_2"), 1],
)
c_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="c_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
# Intrinsics used in the following algorithm
umull_intrin = "llvm.aarch64.neon.umull" if in_type == "uint8" else "llvm.aarch64.neon.smull"
uaddlp_intrin = "llvm.aarch64.neon.uaddlp" if in_type == "uint8" else "llvm.aarch64.neon.saddlp"
addp_intrin = "llvm.aarch64.neon.addp"
def uadalp(a, b):
"""Add pair and accumulate
Parameters:
----------
a: int16x8 vector
b: int16x8 vector
Returns:
--------
return a int32x4 vector
Pseudocode:
----------
a += (b0+b1, b2+b3, b4+b5, b6+b7)
"""
return a + tvm.tir.call_llvm_pure_intrin(
"int32x4", uaddlp_intrin, tvm.tir.const(1, "uint32"), b
)
def umull(a, b):
"""Multiply long (higher part)
Parameters:
----------
a: int8x16 vector
b: int8x16 vector
Returns:
--------
return a int16x8 vector
Pseudocode:
----------
c = (a0*b0, a1*b1, a2*b2, a3*b3, a4*b4, a5*b5, a6*b6, a7*b7)
"""
a_high = tvm.tir.call_intrin("int8x8", "tir.vectorhigh", a)
b_high = tvm.tir.call_intrin("int8x8", "tir.vectorhigh", b)
c = tvm.tir.call_llvm_pure_intrin(
"int16x8", umull_intrin, tvm.tir.const(2, "uint32"), a_high, b_high
)
return c
def umull2(a, b):
"""Multiply long (lower part)
Parameters:
----------
a: int8x16 vector
b: int8x16 vector
Returns:
--------
return a int16x8 vector
Pseudocode:
----------
c = (a8*b8, a9*b9, a10*b10, a11*b11, a12*b12, a13*b13, a14*b14, a15*b15)
"""
a_low = tvm.tir.call_intrin("int8x8", "tir.vectorlow", a)
b_low = tvm.tir.call_intrin("int8x8", "tir.vectorlow", b)
c = tvm.tir.call_llvm_pure_intrin(
"int16x8", umull_intrin, tvm.tir.const(2, "uint32"), a_low, b_low
)
return c
def addp(a, b):
"""Add two vectors in pairs
Parameters:
----------
a: int32x4 vector
b: int32x4 vector
Returns:
--------
return a int32x4 vector
Pseudocode:
----------
c = (a0+a1, a2+a3, b0+b1, b0+b3)
"""
return tvm.tir.call_llvm_pure_intrin(
"int32x4", addp_intrin, tvm.tir.const(2, "uint32"), a, b
)
def accumulation_loop(M, N, ins, acc, tile_idx):
"""Internal tile accumulation. This function
takes two arrays of int8 data type A[tile_idx][4][16] and B[tile_idx][4][16], produces
a 4x4 matrix which is equal to A*B' and accumulates into C[4][4]
The pseudo code is as follows.
.. code-block:: c
void gemm_4x4_int8_int8_int32(int8 A[tile_idx][4][K],
int8 B[tile_idx][4][K],
int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < 16; k++){
C[i][j] += A[tile_idx][i][k] * B[tile_idx][j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters:
----------
M : int
Number of total rows of the output matrix
N : int
Number of total columns of the output matrix
ins : list of tvm.tir.buffer
Input buffers
acc : tvm.tir.ir_builder.BufferVar
Bank of register accumulators
tiled_idx : int
Index of a sub-tile of A and B in A[tile_idx][:][:] and B[tile_idx][:][:].
Please note that 0 <= tile_idx <= K//16
"""
a0 = ins[0].vload([tile_idx, 0, 0], dtype_vec)
a1 = tvm.tir.const(0, "int8x16")
if M > 1:
a1 = ins[0].vload([tile_idx, 1, 0], dtype_vec)
a2 = tvm.tir.const(0, "int8x16")
if M > 2:
a2 = ins[0].vload([tile_idx, 2, 0], dtype_vec)
a3 = tvm.tir.const(0, "int8x16")
if M > 3:
a3 = ins[0].vload([tile_idx, 3, 0], dtype_vec)
b0 = ins[1].vload([tile_idx, 0, 0], dtype_vec)
b1 = tvm.tir.const(0, "int8x16")
if N > 1:
b1 = ins[1].vload([tile_idx, 1, 0], dtype_vec)
b2 = tvm.tir.const(0, "int8x16")
if N > 2:
b2 = ins[1].vload([tile_idx, 2, 0], dtype_vec)
b3 = tvm.tir.const(0, "int8x16")
if N > 3:
b3 = ins[1].vload([tile_idx, 3, 0], dtype_vec)
# First half
# Lower part of a0 * {b0,b1,b2,b3}
d00 = umull(a0, b0)
d01 = umull(a0, b1)
d02 = umull(a0, b2)
d03 = umull(a0, b3)
# Lower part of a1 * {b0,b1,b2,b3}
d10 = umull(a1, b0)
d11 = umull(a1, b1)
d12 = umull(a1, b2)
d13 = umull(a1, b3)
# Accumulate
acc[0] = uadalp(acc[0], d00)
acc[1] = uadalp(acc[1], d01)
acc[2] = uadalp(acc[2], d02)
acc[3] = uadalp(acc[3], d03)
acc[4] = uadalp(acc[4], d10)
acc[5] = uadalp(acc[5], d11)
acc[6] = uadalp(acc[6], d12)
acc[7] = uadalp(acc[7], d13)
# Higher part of a0 * {b0,b1,b2,b3}
d00 = umull2(a0, b0)
d01 = umull2(a0, b1)
d02 = umull2(a0, b2)
d03 = umull2(a0, b3)
# Higher part of a1 * {b0,b1,b2,b3}
d10 = umull2(a1, b0)
d11 = umull2(a1, b1)
d12 = umull2(a1, b2)
d13 = umull2(a1, b3)
# Accumulate again
acc[0] = uadalp(acc[0], d00)
acc[1] = uadalp(acc[1], d01)
acc[2] = uadalp(acc[2], d02)
acc[3] = uadalp(acc[3], d03)
acc[4] = uadalp(acc[4], d10)
acc[5] = uadalp(acc[5], d11)
acc[6] = uadalp(acc[6], d12)
acc[7] = uadalp(acc[7], d13)
# Second half
# Lower part of a2 * {b0,b1,b2,b3}
d00 = umull(a2, b0)
d01 = umull(a2, b1)
d02 = umull(a2, b2)
d03 = umull(a2, b3)
# Lower part of a3 * {b0,b1,b2,b3}
d10 = umull(a3, b0)
d11 = umull(a3, b1)
d12 = umull(a3, b2)
d13 = umull(a3, b3)
# Accumulate
acc[8] = uadalp(acc[8], d00)
acc[9] = uadalp(acc[9], d01)
acc[10] = uadalp(acc[10], d02)
acc[11] = uadalp(acc[11], d03)
acc[12] = uadalp(acc[12], d10)
acc[13] = uadalp(acc[13], d11)
acc[14] = uadalp(acc[14], d12)
acc[15] = uadalp(acc[15], d13)
# Higher part of a2 * {b0,b1,b2,b3}
d00 = umull2(a2, b0)
d01 = umull2(a2, b1)
d02 = umull2(a2, b2)
d03 = umull2(a2, b3)
# Lower part of a3 * {b0,b1,b2,b3}
d10 = umull2(a3, b0)
d11 = umull2(a3, b1)
d12 = umull2(a3, b2)
d13 = umull2(a3, b3)
# Accumulate
acc[8] = uadalp(acc[8], d00)
acc[9] = uadalp(acc[9], d01)
acc[10] = uadalp(acc[10], d02)
acc[11] = uadalp(acc[11], d03)
acc[12] = uadalp(acc[12], d10)
acc[13] = uadalp(acc[13], d11)
acc[14] = uadalp(acc[14], d12)
acc[15] = uadalp(acc[15], d13)
def _intrin_func(ins, outs):
def _instr():
ib = tvm.tir.ir_builder.create()
# Allocate a local buffer (possibly translates to registers)
acc = ib.allocate("int32x4", 16, name="accs", scope="local")
m = outs[0].shape[0]
n = outs[0].shape[1]
# Initialization
for i in range(0, 16):
acc[i] = tvm.tir.const(0, "int32x4")
if unroll:
for i in range(0, int(K // 16)):
accumulation_loop(M, N, ins, acc, i)
else:
with ib.for_range(0, K // 16, name="i") as i:
accumulation_loop(M, N, ins, acc, i)
# Final accumulations
# acc[4*r + c] contains the partial accumulations of element C[r][c]
#
# In particular:
# acc[4*r] contains the partial sums of a[r,0:K].*b[0,0:K] -> (a,b,c,d)
# acc[4*r+1] contains the partial sums of a[r, 0:K].*b[1,0:K] -> (e,f,g,h)
# acc[4*r+2] contains the partial sums of a[r, 0:K].*b[2,0:K] -> (i,j,k,l)
# acc[4*r+3] contains the partial sums of a[r, 0:K].*b[3,0:K] -> (m,n,o,p)
#
# Please note that 0<= r, c < 4
acc[0] = addp(acc[0], acc[1]) # (a+b, c+d, e+f, g+h)
acc[1] = addp(acc[2], acc[3]) # (i+j, k+l, m+n, o+p)
acc[0] = addp(acc[0], acc[1]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[4] = addp(acc[4], acc[5]) # (a+b, c+d, e+f, g+h)
acc[5] = addp(acc[6], acc[7]) # (i+j, k+l, m+n, o+p)
acc[4] = addp(acc[4], acc[5]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[8] = addp(acc[8], acc[9]) # (a+b, c+d, e+f, g+h)
acc[9] = addp(acc[10], acc[11]) # (i+j, k+l, m+n, o+p)
acc[8] = addp(acc[8], acc[9]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
acc[12] = addp(acc[12], acc[13]) # (a+b, c+d, e+f, g+h)
acc[13] = addp(acc[14], acc[15]) # (i+j, k+l, m+n, o+p)
acc[12] = addp(acc[12], acc[13]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)
# Store the result
if N > 3:
out_0 = acc[0]
out_1 = acc[4]
out_2 = acc[8]
out_3 = acc[12]
elif N > 2:
out_0 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32x3", "tir.reinterpret", acc[12])
elif N > 1:
out_0 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32x2", "tir.reinterpret", acc[12])
else:
out_0 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[0])
out_1 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[4])
out_2 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[8])
out_3 = tvm.tir.call_intrin("int32", "tir.reinterpret", acc[12])
ib.emit(outs[0].vstore([0, 0], out_0))
if M > 1:
ib.emit(outs[0].vstore([1, 0], out_1))
if M > 2:
ib.emit(outs[0].vstore([2, 0], out_2))
if M > 3:
ib.emit(outs[0].vstore([3, 0], out_3))
return ib.get()
# body, reset, update
return _instr()
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: a_buffer, B: b_buffer, C: c_buffer},
default_buffer_params=buffer_params,
)
def dot_int8_int8_int32_neon_82(int32_lanes, dtype="uint"):
"""
Int8 dot product by every 4 elements using ARM v8.2 udot.
This function takes two arrays of int8 datatype -- data[4] and
kernel[int32_lanes][4] -- and computes a dot product of data[4] with every
4 elements of kernels, resulting in output[int32_lanes] of uint32 datatype.
The pseudo code is as follows.
.. code-block:: c
void dot_int8_int8_int32(int8 data[4], int8 kernel[16][4], int32 output[16]){
for (int i = 0; i < int32_lanes; i++){
out[i] = 0;
for (int k = 0; k < 4; k++){
out[i] += data[k] * kernel[i][k]
}
}
}
Physically, the kernel array sits in a vector register and
the data[4] is broadcasted to another vector register. This
function returns a TensorIntrin that can be used to tensorize
a schedule.
Parameters
----------
int32_lanes : int
How many int32/uint32 to produce
dtype : str, optional, {"uint", "int"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The ARM uint8 TensorIntrin that can be used in tensorizing schedule
"""
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype="%s8" % dtype, name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="%s8" % dtype, name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(
data[k].astype("%s32" % dtype) * kernel[i, k].astype("%s32" % dtype), axis=k
),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="%s8" % dtype, name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape,
dtype="%s8" % dtype,
name="b_buffer",
offset_factor=1,
strides=[te.var("s"), 1],
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, "%s32x%d" % (dtype, int32_lanes))))
return ib.get()
dtype_a = "%s8x%d" % (dtype, num_int8_elements)
dtype_b = "%s8x%d" % (dtype, int32_lanes * num_int8_elements)
dtype_c = "%s32x%d" % (dtype, int32_lanes)
a_int8 = ins[0].vload([0], dtype_a)
re_int32 = tvm.tir.call_intrin("%s32" % dtype, "tir.reinterpret", a_int8)
# broadcast a
vec_ai32 = re_int32.astype(dtype_c)
vec_a = tvm.tir.call_intrin(dtype_b, "tir.reinterpret", vec_ai32)
vec_b = ins[1].vload([0, 0], dtype_b)
vec_c = outs[0].vload([0], dtype_c)
inst = "udot" if dtype == "uint" else "sdot"
inst = "llvm.aarch64.neon.%s.v%di32.v%di8" % (
inst,
int32_lanes,
int32_lanes * num_int8_elements,
)
vdot = tvm.tir.call_llvm_pure_intrin(
dtype_c, inst, tvm.tir.const(3, "uint32"), vec_c, vec_a, vec_b
)
ib.emit(outs[0].vstore(0, vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def dot_int8_int8_int32_neon():
"""
Int8 dot product using vmlal instructions
.. code-block:: c
void dot_int8_int8_int32(int8 data[4], int8 kernel[4][4], int32 output[4]){
for (int i = 0; i < 4; i++){
out[i] = 0;
for (int k = 0; k < 4; k++){
out[i] += data[k] * kernel[i][k]
}
}
}
We use the smull and saddlp instructions to compute the dot product.
smull : int8x16 -> int8x16 -> int16x8 elementwise multiplication
saddlp: int16x8 -> int32x4 pairwise addition of elements
Data is broadcast across the register
int8 elements
| data | data |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
smull
int8 elements
| kernel[i] | kernel[i+1] |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
=
int16 elements
| data * kernel[i] | data * kernel[i+1] |
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
saddlp =
int32 elements
| partial sum(data * kernel[i]) | partial sum(data * kernel[i+1]) |
| 0 | 1 | 2 | 3 |
We apply the above kernel twice and use addp to compute the second set of pairwise additions
int32 elements (narrowed for so they fit on a line)
| psum d*k[i] | psum d*k[i+1] | | psum d*k[i+2] | psum d*k[i+3] |
| 0 | 1 | 2 | 3 | addp | 4 | 5 | 6 | 7 |
=
|sum d*ki |sum d*ki1|sum d*ki2|sum d*ki3|
| 0 | 1 | 2 | 3 |
"""
int32_lanes = 4 # 4 int32 lanes = 128
num_int8_elements = 4 # 4 int8 elements in int32
data = te.placeholder((num_int8_elements,), dtype="int8", name="data")
kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="int8", name="kernel")
k = te.reduce_axis((0, num_int8_elements), name="k")
C = te.compute(
(int32_lanes,),
lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
data.shape, dtype="int8", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
kernel.shape, dtype="int8", name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1]
)
def _intrin_func(ins, outs):
def _instr(index):
int_8xl = "int8x8"
int_32xl = "int32x4"
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, int_32xl)))
return ib.get()
# this broadcasts data to the vector size
a_int8 = ins[0].vload([0], "int8x4")
re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_int8)
vec_ai32 = re_int32.astype("int32x2")
vec_a = tvm.tir.call_intrin(int_8xl, "tir.reinterpret", vec_ai32)
vec_b = ins[1].vload([0, 0], "int8x16")
def pairwise_add_mul(extract_half):
vec_b_half = tvm.tir.call_intrin("int8x8", extract_half, vec_b)
multiply = tvm.tir.call_llvm_pure_intrin(
"int16x8",
"llvm.aarch64.neon.smull.v8i16", # saturating pairwise multiplication
tvm.tir.const(2, "uint32"),
vec_a,
vec_b_half,
)
pairwise_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x4",
"llvm.aarch64.neon.saddlp.v4i32.v8i16",
tvm.tir.const(1, "uint32"),
multiply,
)
return pairwise_reduction
pair_1 = pairwise_add_mul("tir.vectorlow")
pair_2 = pairwise_add_mul("tir.vectorhigh")
quad_reduction = tvm.tir.call_llvm_pure_intrin(
"int32x4",
"llvm.aarch64.neon.addp.v4i32",
tvm.tir.const(2, "uint32"),
pair_1,
pair_2,
)
if index == 0:
ib.emit(outs[0].vstore(0, quad_reduction))
else:
ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], int_32xl)))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={data: a_buffer, kernel: b_buffer},
default_buffer_params=buffer_params,
)
def select_word(vec, lane, dtype_vec):
"""
Utility function used to select a int8x4 word within a int8x16 vector
and replicate 4 times.
The pseudo-code for this operation is:
v = [x0, ..., x15]
vsub(lane) = v[4*lane:4*lane+3]
replicated_v(lane) = [vsub(lane), vsub(lane), vsub(lane), vsub(lane)]
Note that 0<=lane<4
Parameters
----------
vec : tvm.tir.Expr
int8x16 vector expression
lane : int
vector lane we want to replicate
dtype_vec : str
vector data type (e.g., int8x16)
Returns
----------
output : tvm.tir.Expr
replicated vector
"""
# Reinterpret vec_a as 4 int32 words
vec_int32 = tvm.tir.call_intrin("int32x4", "tir.reinterpret", vec)
# Broadcast the lane-th word
vec_int32_shuffled = tvm.tir.Shuffle([vec_int32], [lane, lane, lane, lane])
# Convert back to uint8x16
vec_int8_broadcast = tvm.tir.call_intrin(dtype_vec, "tir.reinterpret", vec_int32_shuffled)
return vec_int8_broadcast
def gemm_acc_4x4_int8_int8_int32(dtype):
"""
Int8 4x4 matrix multiplication and accumulation using sdot/udot
instructions. This function takes two arrays of int8 datatype
-- A[4][4] and B[4][4] and produces a 4x4 matrix
which is equal to A*B'.
The pseudo code is as follows.
.. code-block:: c
void gemm_acc_4x4_int8_int8_int32(int8 A[4][4], int8 B[4][4], int32 C[4][4]){
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
for (int k = 0; k < 4; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Notes:
* The tiling strategy is picked to maximize register usage.
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
# This needs to be a variable number of "rows" since TVM
# "thinks" I only need to compute one row because of
# padding
A = te.placeholder((te.var("rows"), 4), dtype, name="A")
B = te.placeholder((4, 4), dtype, name="B")
dtype_vec = dtype + "x16"
k = te.reduce_axis((0, 4), name="k")
C = te.compute(
(te.var("rows"), 4),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb"), 1]
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.sdot" if dtype == "int8" else "llvm.aarch64.neon.udot"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
for i in range(0, 4):
ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, "int32x4")))
return ib.get()
# Load all the elements of tile A.
# vec_a = [a, b, c, d,
# e, f, g, h,
# l, m, n, o,
# p, q, r, s];
vec_a = ins[0].vload([0, 0], dtype_vec)
# Replicate 4 times the i-th row of A. For instance,
# vec_a[0] = [a, b, c, d,
# a, b, c, d,
# a, b, c, d,
# a, b, c, d,];
vec_aa = [select_word(vec_a, i, dtype_vec) for i in range(0, 4)]
# Load all the elements of B. Remember that B
# is transposed:
# vec_b = [0, 4, 8, 12,
# 1, 5, 9, 13,
# 2, 6, 10, 14,
# 3, 7, 11, 15,];
vec_b = ins[1].vload([0, 0], dtype_vec)
# Execute the dot product
for i in range(0, 4):
vec_c = outs[0].vload([i, 0], "int32x4")
# Compute the product between the i-th row of A
# and all the rows of B. Remember that sdot/udot
# subdive the input vectors in 16 elements
# and then take the dot product among each group.
# The result is stored in a int32x4 register
#
# For instance, for i=0, we have:
# sdot(vec_aa[0], vec_b) = [a*0+b*4+c*8+d*12,
# a*1+b*5+c*9+d*13,
# a*2+b*6+c*10+d*14,
# a*3+b*7+c*11+d*15]
vdot = tvm.tir.call_llvm_intrin(
"int32x4",
llvm_intrin,
tvm.tir.const(3, "uint32"),
vec_c,
vec_b,
vec_aa[i],
)
# Store the result
ib.emit(outs[0].vstore([i, 0], vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def gemm_acc_nx16_int8_int8_int32(dtype, rows):
"""
Int8 nx16 matrix multiplication and accumulation using sdot/udot instructions
This function takes two arrays of int8 datatype -- A[n][4] and
B[4][16] and produces a rowsx16 matrix which is equal to A*B'
The pseudo code is as follows.
.. code-block:: c
void mmla_nx16_int8_int8_int32(int8 A[n][16], int8 B[4][16][4], int32 output[n][16]){
for (int i = 0; i < n; i++){
for (int j = 0; j < 16; j++){
for (int k = 0; k < 16; k++){
out[i][j] += A[i][k] * B[k//4][j][k%4]
}
}
}
}
Notes:
* The tile size of B is 16x4. Since the reduction variable k moves between 0 and 16
we need 4 tiles of B to compute a single row of the output. The first 4 values of
k will be fetched from B[0][j][k], the second batch of 4 from B[1][j][k] and so on
* The tiling strategy is picked to maximize register usage.
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
rows : int
Number of the output rows "n"
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
A = te.placeholder((rows, 16), dtype, name="A")
B = te.placeholder((4, 16, 4), dtype, name="B")
dtype_vec = dtype + "x16"
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, 16), name="k")
C = te.compute(
(rows, 16),
lambda i, j: te.sum(
A[i, k].astype("int32") * B[k // 4, j, idxm(k, 4)].astype("int32"), axis=k
),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape,
dtype,
name="bb_buffer",
offset_factor=1,
strides=[te.var("sb0"), te.var("sb1"), 1],
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.sdot" if dtype == "int8" else "llvm.aarch64.neon.udot"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
for i in range(0, rows):
ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, "int32x16")))
return ib.get()
# Iterate on the number of rows of the output
for k in range(0, rows):
# Load 16 elements of A
# vec_a = [a, b, c, d, e, f, g, h, l, m, n, o, p, q, r, s];
vec_a = ins[0].vload([k, 0], dtype_vec)
# Iterate over each of the 4 rowsx4 tiles of the output
for j in range(0, 4):
# Accumulate over each of the 4 (16x4) tiles contained in B
for i in range(0, 4):
# Replicate a single 4-element group of A (A[k, i:i+4])
vec_aa = select_word(vec_a, i, dtype_vec)
# Load 4 rows (each rows with 4 elements) from B (B[i:i+4, j:j+4])
# vec_b = [0, 16, 32, 48,
# 1, 17, 33, 49,
# 2, 18, 34, 50,
# 3, 19, 35, 51,];
vec_b = ins[1].vload([i, 4 * j, 0], dtype_vec)
# Accumulate in the correct part of the output
vec_c = outs[0].vload([k, 4 * j], "int32x4")
# Compute the dot product between the rowsx4 tile
# from A and the 4x4 tile from B
#
# For instance, for i=0, we have:
# sdot(vec_aa[0], vec_b) = [a*0+b*16+c*32+d*48,
# a*1+b*17+c*33+d*49,
# a*2+b*18+c*34+d*50,
# a*3+b*19+c*35+d*51]
vdot = tvm.tir.call_llvm_intrin(
"int32x4",
llvm_intrin,
tvm.tir.const(3, "uint32"),
vec_c,
vec_b,
vec_aa,
)
ib.emit(outs[0].vstore([k, 4 * j], vdot))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def smlal_int16_int32():
"""
Intrinsic to be used in order to load two int16x8 vectors and multiply
them together through a pair of smlal/smlal2 instructions. The pseudo-code
for the algorithm is as follows:
vec_a = vload(A, "int16x8")
vec_b = vload(B, "int16x8")
vec_c[0:4] += vec_a[0:4]*vec_b[0:4] // -> smlal instruction
vec_c[4:8] += vec_a[4:8]*vec_b[4:8] // -> smlal2 instruction
So we load a single int16x8 vector and we accumulate its lower (0:4) and
higher part separately.
"""
int16_lanes = 8
A = te.placeholder((int16_lanes,), dtype="int16", name="A")
B = te.placeholder((int16_lanes, 1), dtype="int16", name="B")
C = te.compute(
(int16_lanes,),
lambda i: A[i].astype("int32") * B[i, 0].astype("int32"),
name="C",
)
a_buffer = tvm.tir.decl_buffer(
A.shape, dtype="int16", name="a_buffer", offset_factor=1, strides=[1]
)
b_buffer = tvm.tir.decl_buffer(
B.shape,
dtype="int16",
name="b_buffer",
offset_factor=1,
strides=[te.var("sb"), 1],
)
c_buffer = tvm.tir.decl_buffer(
C.shape,
dtype="int32",
name="c_buffer",
offset_factor=1,
strides=[1],
)
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore(0, tvm.tir.const(0, "int32x8")))
return ib.get()
vec_a = ins[0].vload([0], "int16x8")
vec_b = ins[1].vload([0, 0], "int16x8")
inst = "llvm.aarch64.neon.smull"
# Higher part of the vector
vec_c_h = outs[0].vload([4], "int32x4")
vec_a_h = tvm.tir.call_intrin("int16x4", "tir.vectorhigh", vec_a)
vec_b_h = tvm.tir.call_intrin("int16x4", "tir.vectorhigh", vec_b)
vmull_h = tvm.tir.call_llvm_pure_intrin(
"int32x4", inst, tvm.tir.const(2, "uint32"), vec_a_h, vec_b_h
)
vec_out_h = vec_c_h + vmull_h
# Lower part of the vector
vec_c_l = outs[0].vload([0], "int32x4")
vec_a_l = tvm.tir.call_intrin("int16x4", "tir.vectorlow", vec_a)
vec_b_l = tvm.tir.call_intrin("int16x4", "tir.vectorlow", vec_b)
vmull_l = tvm.tir.call_llvm_pure_intrin(
"int32x4", inst, tvm.tir.const(2, "uint32"), vec_a_l, vec_b_l
)
vec_out_l = vec_c_l + vmull_l
# Combine higher and lower part in a single int32x8 vector to store
# (this will require two different store instructions, since the
# length of a NEON vector is fixed at 128
vec_out = tvm.tir.call_intrin("int32x8", "tir.vectorcombine", vec_out_l, vec_out_h)
ib.emit(outs[0].vstore(0, vec_out))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: a_buffer, B: b_buffer, C: c_buffer},
default_buffer_params=buffer_params,
)
def gemm_acc_2x2_int8_int8_int32(dtype):
"""
Int8 2x2 matrix multiplication using smmla/ummla instructions
This function takes two arrays of int8 datatype -- A[2][8] and
B[2][8] and produces a 2x2 matrix which is equal to A*B'
The pseudo code is as follows.
.. code-block:: c
void mmla_2x2_int8_int8_int32(int8 A[2][8], int8 B[2][8], int32 C[2][2]){
for (int i = 0; i < 2; i++){
for (int j = 0; j < 2; j++){
for (int k = 0; k < 8; k++){
C[i][j] += A[i][k] * B[j][k]
}
}
}
Parameters
----------
dtype : str, {"uint8", "int8"}
Whether it works on unsigned int or signed int
Returns
-------
intrin : TensorIntrin
The Arm TensorIntrin that can be used in tensorizing schedule
"""
assert dtype in ["uint8", "int8"]
A = te.placeholder((2, 8), dtype, name="A")
B = te.placeholder((2, 8), dtype, name="B")
dtype_vec = dtype + "x16"
k = te.reduce_axis((0, 8), name="k")
C = te.compute(
(2, 2),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
aa_buffer = tvm.tir.decl_buffer(
A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1]
)
bb_buffer = tvm.tir.decl_buffer(
B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb"), 1]
)
cc_buffer = tvm.tir.decl_buffer(
C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1]
)
llvm_intrin = "llvm.aarch64.neon.smmla" if dtype == "int8" else "llvm.aarch64.neon.ummla"
def _intrin_func(ins, outs):
def _instr(index):
ib = tvm.tir.ir_builder.create()
if index == 1:
ib.emit(outs[0].vstore([0, 0], tvm.tir.const(0, "int32x4")))
return ib.get()
# Load in vec_a the two rows of A
# vec_a = [a, b, c, d, e, f, g, h;
# i, j, k, l, m, n, o, p,]
vec_a = ins[0].vload([0, 0], dtype_vec)
# Load in vec_b the two rows of B
# vec_b = [0, 2, 4, 6, 8, 10, 12, 14;
# 1, 3, 5, 7, 9, 11, 13, 14,]
vec_b = ins[1].vload([0, 0], dtype_vec)
# Execute the matrix multiplication via (s/u)mmla:
# vec_c = [a*0 + b*2 + c*4 + d*6 +e*8 + f*10 + g*12 + h*14;
# a*1 + b*3 + c*5 + d*7 +e*9 + f*11 + g*13 + h*15;
# i*0 + j*2 + k*4 + l*6 +m*8 + n*10 + o*12 + p*14;
# i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15]
vec_c = outs[0].vload([0, 0], "int32x4")
vmmla = tvm.tir.call_llvm_intrin(
"int32x4",
llvm_intrin,
tvm.tir.const(3, "uint32"),
vec_c,
vec_a,
vec_b,
)
# Store the result
ib.emit(outs[0].vstore([0, 0], vmmla))
return ib.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
C.op,
_intrin_func,
binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},
default_buffer_params=buffer_params,
)
def _q_multiply_shift_arm(op):
"""
Implementation of q_multiply_shift_arm through arm intrinsics
sqrdmulh and srshl when q == 31.
Please note that this is introducing a small round-up error for
some corner cases. This is because we are rounding twice instead
than only once. I.e.:
* original q_multiply_shift: round(x*y*2^-s)
* arm q_multiply_shift: round(round(x*y)*2^-s)
"""
x = op.args[0]
y = op.args[1]
q = op.args[2]
s = op.args[3]
# Don't use this intrinsic if we don't have a int32x4 vector
# or if we are not multiplying q31 numbers
if x.dtype != "int32x4" or q.value != 31:
return op
# Case 1, shift is negative
sqrdmulh = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.sqrdmulh", tvm.tir.const(2, "uint32"), x, y
)
fixup = (sqrdmulh & (-s)) >> 31
fixed_up_x = sqrdmulh + fixup
out_1 = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.srshl", tvm.tir.const(2, "uint32"), sqrdmulh, s
)
# Case 2, shift is positive
x = x * (1 << (s))
out_2 = tvm.tir.call_llvm_intrin(
op.dtype, "llvm.aarch64.neon.sqrdmulh", tvm.tir.const(2, "uint32"), x, y
)
# Select depending on the shift
return tvm.tir.Select(s < 0, out_1, out_2)
register_intrin_lowering(
"tir.q_multiply_shift", target="llvm.aarch64", f=_q_multiply_shift_arm, level=99
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""ARM Mali GPU specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .gemm import *
from .conv2d import *
from .dense import *
from .depthwise_conv2d import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d schedule on ARM Mali (Bifrost) GPU"""
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .gemm import decl_winograd_gemm, schedule_gemm
from .transforms import tile_and_bind, tile_and_bind3d
from ..utils import traverse_inline, get_const_int, get_const_tuple
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
# reuse some compute declarations from ARM CPU
from ..arm_cpu.conv2d_spatial_pack import conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.bifrost")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for conv2d
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, in_channel, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return conv2d_spatial_pack_nchw(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=3
)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.bifrost")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""TOPI schedule callback for conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv2d_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec)
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
"""schedule the spatial packing for conv2d"""
data = s[data_vec].op.input_tensors[0]
max_unroll = 16
vec_size = [1, 2, 4, 8, 16]
# get tunable parameters (they are defined in compute)
BC, TC, VC = cfg["tile_co"].size
BH, TH, VH = cfg["tile_oh"].size
BW, TW, VW = cfg["tile_ow"].size
# schedule padding
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
s[data_pad].compute_inline()
# schedule data packing
if isinstance(data_vec.op, te.tensor.ComputeOp) and data_vec.op.name == "data_vec_undilated":
_, h, w, ci, _, _, vh, vw = s[data_vec].op.axis
else:
_, h, w, ci, vh, vw = s[data_vec].op.axis
tile_and_bind3d(s, data_vec, h, w, ci, 1)
if vh.dom.extent.value < max_unroll:
s[data_vec].unroll(vh)
if vw.dom.extent.value < max_unroll:
s[data_vec].unroll(vw)
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
s[kernel_vec].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_vec].bind(tt, te.thread_axis("threadIdx.x"))
if VC in vec_size:
s[kernel_vec].vectorize(vec)
# schedule convolution
n, c, h, w, vh, vw, vc = s[conv].op.axis
kc, kh, kw = s[conv].op.reduce_axis
cfg["reorder_0"].apply(s, conv, [n, c, h, w, kc, kh, kw, vh, vw, vc])
tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kernel_vec.shape[2]), get_const_int(kernel_vec.shape[3])],
max_unroll=max_unroll,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[VH, VW, VC],
max_unroll=max_unroll,
vec_size=vec_size,
cfg=cfg,
)
# schedule output
if output.op not in s.outputs: # has bias
s[output].compute_inline()
output = s.outputs[0]
_, co, oh, ow = s[output].op.axis
tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.bifrost")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Use Winograd as the convolution method"""
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.bifrost")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_conv2d_output" in op.tag:
_schedule_winograd(cfg, s, op)
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd_kernel_transform(kernel, tile_size, G):
"""Declare a Winograd kernel transform
This exists separately to allow for precomputation
The precomputation will most often happen on CPU
Parameters
----------
kernel : tvm.te.Tensor
The kernel to transform
tile_size : int
The size of the tile to use for the Winograd filter
Returns
-------
U : tvm.te.Tensor
Transformed kernel
"""
CO, CI, KH, KW = [get_const_int(x) for x in kernel.shape]
# Only support 32 bit floats
out_dtype = "float32"
alpha = G.shape[0]
K = CO
C = CI
def upround(x, align):
return (x + align - 1) // align * align
ALIGN = 16
K_round = upround(K, ALIGN)
# Padded Kernel [K_round, C, KH, KW]
# Pad the number of kernels to multiple of ALIGN
padded_kernel = te.compute(
(K_round, C, KH, KW),
lambda k, c, h, w: tvm.tir.if_then_else(
k < K, kernel[k][c][h][w], tvm.tir.const(0, out_dtype)
),
name="padded_kernel",
)
# U [alpha, alpha, K_round, C]
# Perform the kernel transform
r_kh = te.reduce_axis((0, KH), "r_kh")
r_kw = te.reduce_axis((0, KW), "r_kw")
U = te.compute(
(alpha, alpha, K_round, C),
lambda eps, nu, k, c: te.sum(
padded_kernel[k][c][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="U",
)
return U
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size=2):
"""Declare a winograd convolution - only tile_size=2 is currently supported"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if int(kernel.shape[2]) == 3:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI = get_const_tuple(kernel.shape)
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
def upround(x, align):
return (x + align - 1) // align * align
ALIGN = 16
P_round = upround(P, ALIGN)
K_round = upround(K, ALIGN)
# CONFIG
cfg.define_knob("data_transform_wgx", [1, 2, 4, 8, 16, 32, 64])
cfg.define_knob("data_transform_wgy", [1, 2, 4, 8, 16, 32, 64])
# Pack input tile
input_tile = te.compute((N, C, H + 2, W + 2), lambda n, c, h, w: data_pad[n][c][h][w], name="d")
if autotvm.GLOBAL_SCOPE.in_tuning:
VC = cfg["tile_k"].size[-1]
kvshape = (KH + tile_size - 1, KW + tile_size - 1, tvm.tir.indexdiv(CO, VC), CI, VC)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
if pre_computed:
U = kernel
else:
U = _decl_winograd_kernel_transform(kernel, tile_size, G)
# V [alpha * alpha, C, P_round)
# Perform the image transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
V = te.compute(
(alpha * alpha, C, P_round),
lambda epsnu, c, b: te.sum(
input_tile[b // (nH * nW)][c][b // nW % nH * m + r_eps][b % nW * m + r_nu]
* B[r_eps][epsnu // alpha]
* B[r_nu][epsnu % alpha],
axis=[r_eps, r_nu],
),
name="V",
)
# Winograd GEMM is a wrapper around batched GEMM to convert U to a 3D Tensor
_, M = decl_winograd_gemm(cfg, U, V)
# Y [K, P, m, m]
# Winograd output transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
Y = te.compute(
(K, P, m, m),
lambda k, b, vh, vw: te.sum(
M[r_eps * alpha + r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw], axis=[r_eps, r_nu]
),
name="Y",
)
# Output [N, K, H, W]
# Unpack back to NCHW format
# The last term ensures alignment is not lost to bound inference
output = te.compute(
(N, K, H, W),
lambda n, k, h, w: Y[k][n * nH * nW + (h // m) * nW + w // m][h % m][w % m]
+ tvm.tir.const(0, out_dtype) * M[(alpha * alpha) - 1][K_round - 1][P_round - 1],
name="output",
tag="winograd_conv2d_output",
)
return output
def _schedule_winograd(cfg, s, op):
"""Schedule Winograd convolution for Bifrost"""
# Get ops and tensors
output = op.output(0)
Y = op.input_tensors[0]
M, A = s[Y].op.input_tensors
U_3D, V = s[M].op.input_tensors
U = s[U_3D].op.input_tensors[0]
d, B = s[V].op.input_tensors
data_pad = s[d].op.input_tensors[0]
if isinstance(U.op, tvm.te.ComputeOp):
padded_kernel, G = s[U].op.input_tensors
kernel = s[padded_kernel].op.input_tensors[0]
s[G].compute_inline()
eps, _, _, _ = s[U].op.axis
y, _, _, _ = s[padded_kernel].op.axis
if not autotvm.GLOBAL_SCOPE.in_tuning:
# Pad kernel
y, x, ky, kx = s[padded_kernel].op.axis
s[padded_kernel].unroll(ky)
s[padded_kernel].unroll(kx)
tile_and_bind(s, padded_kernel, y, x, 1, 8)
# Transform kernel
eps, nu, k, c = s[U].op.axis
s[U].reorder(k, c, eps, nu)
r_kh, r_kw = s[U].op.reduce_axis
_ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]
yo, xo, yi, xi = tile_and_bind(s, U, k, c, 1, 4)
# Dilation
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# Pad data
s[data_pad].compute_inline()
# Pack data
n, c, h, w = s[d].op.axis
w, wi = s[d].split(w, 4)
s[d].unroll(wi)
b = s[d].fuse(n, c)
tile_and_bind3d(s, d, b, h, w, 1, 4, 2)
# Transform data
bIL_d = s.cache_read(d, "local", [V])
s[B].compute_inline()
epsnu, c, b = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, epsnu, r_nu, r_eps)
_ = [s[V].unroll(x) for x in [epsnu, r_eps, r_nu]]
yo, xo, yi, xi = tile_and_bind(
s, V, b, c, cfg["data_transform_wgy"].val, cfg["data_transform_wgx"].val
)
s[bIL_d].compute_at(s[V], xi)
n, c, h, w = s[bIL_d].op.axis
s[bIL_d].unroll(h)
s[bIL_d].vectorize(w)
# Batched GEMM
# Inline the 4D -> 3D tensor transform on the kernel
s[U_3D].compute_inline()
U_transform, V_transform = schedule_gemm(
cfg, s, U_3D, V, M, batched=True, schedule_transforms=True
)
# Inverse transform
CR_M = s.cache_read(M, "local", [Y])
CW_Y = s.cache_write(Y, "local")
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
fused = s[Y].fuse(vh, vw)
s[Y].vectorize(fused)
yo, xo, yi, xi = tile_and_bind(s, Y, k, b, 1, 4)
s[CR_M].compute_at(s[Y], xi)
k, b, epsnu = s[CR_M].op.axis
s[CR_M].unroll(k)
s[CW_Y].compute_at(s[Y], xi)
k, b, vh, vw = s[CW_Y].op.axis
r_eps, r_nu = s[CW_Y].op.reduce_axis
_ = [s[CW_Y].unroll(x) for x in [vh, vw, r_eps, r_nu]]
# Schedule output and fusion
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0]
_, k, h, w = s[output].op.axis
tile_and_bind3d(s, output, k, h, w, 1, 2, 2)
##### REGISTER ALTER OP LAYOUT #####
@nn.conv2d_alter_layout.register("bifrost")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
idxd = tvm.tir.indexdiv
if topi_tmpl == "conv2d_nchw_spatial_pack.bifrost":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = "OIHW%do" % VC
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.bifrost",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.bifrost":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
tile_size = 2
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size
)
weight_expr = relay.reshape(
weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, CO, CI)
)
new_attrs["tile_size"] = tile_size
new_data = data
new_kernel = te.placeholder((KH + tile_size - 1, KW + tile_size - 1, CO, CI), kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd.bifrost",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs
)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""dense schedule on ARM Mali Biforst GPU"""
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("dense.bifrost")
def dense(_, data, weight, bias=None, out_dtype=None):
"""Dense operator on Biforst"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense.bifrost")
def schedule_dense(cfg, outs):
"""Schedule for dense operator.
Parameters
----------
cfg: ConfigEntity
The config entity for this template
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
vec_size = [1, 2, 4, 8, 16]
max_unroll = 32
dense_out = op.output(0)
output = outs[0]
y, x = s[output].op.axis
c = s[dense_out].op.reduce_axis[0]
##### space definition begin #####
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64)
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.bifrost")
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
if dense_out.op in s.outputs:
dense_out = s.cache_write(output, "local")
by, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
if cfg["tile_y"].size[-1] < max_unroll:
s[output].unroll(yi)
if cfg["tile_x"].size[-1] in vec_size:
s[output].vectorize(xi)
s[dense_out].compute_at(s[output], tx)
k = s[dense_out].op.reduce_axis[0]
y, x = s[dense_out].op.axis
k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k)
s[dense_out].reorder(k, k_unroll, y, x)
s[dense_out].unroll(k_unroll)
if cfg["tile_y"].size[-1] < max_unroll:
s[dense_out].unroll(y)
if cfg["tile_x"].size[-1] in vec_size:
s[dense_out].vectorize(x)
traverse_inline(s, outs[0].op, _callback)
return s
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
"""fuse all the axis and bind to GPU threads"""
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
bx, tx = s[tensor].split(fused, num_thread)
s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
return bx, tx
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""depthwise_conv2d schedule on ARM Mali GPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import utils
from .. import tag
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(pad_data, kernel, conv):
raw_data = s[pad_data].op.input_tensors[0]
if conv.op not in s.outputs: # has bias or relu
output = outs[0]
else: # no bias or relu
output = conv
def tile_and_bind3d(tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
return zo, zi, yo, yi, xo, xi
# set tunable parameters
VH = 1
VW = 1
num_thread = 4
while utils.get_const_int(conv.shape[3]) % (VW * 2) == 0 and VW * 2 <= 4:
VW = VW * 2
while utils.get_const_int(conv.shape[2]) % (VH * 2) == 0 and VH * 2 <= 2:
VH = VH * 2
if raw_data.dtype == "float16":
if utils.get_const_int(conv.shape[3]) % (VW * 2) == 0:
VW *= 2
num_thread *= 2
else:
num_thread *= 2
# schedule padding
_, c, y, x = s[pad_data].op.axis
tile_and_bind3d(pad_data, c, y, x, num_thread, 1, 1)
# schedule conv
di, dj = s[conv].op.reduce_axis
s[conv].unroll(di)
s[conv].unroll(dj)
_, c, y, x = s[output].op.axis
y, x, yi, xi = s[output].tile(y, x, VH, VW)
s[output].unroll(yi)
s[output].vectorize(xi)
_, _, _, _, _, ji = tile_and_bind3d(output, c, y, x, num_thread, 1, 1)
if conv.op not in s.outputs:
_, c, y, x = s[conv].op.axis
y, x, yi, xi = s[conv].tile(y, x, VH, VW)
s[conv].unroll(yi)
s[conv].vectorize(xi)
s[conv].compute_at(s[output], ji)
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors:
traverse(tensor.op)
# schedule depthwise_conv2d
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
conv = op.output(0)
_schedule(pad_data, kernel, conv)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""GEMM schedules for Mali Bifrost"""
from tvm import te
from .transforms import tile_and_bind, tile_and_bind3d, interleave_transpose, transpose_interleave
from .. import utils
def decl_gemm(cfg, A, B):
"""Declare a single GEMM computation for Mali Bifrost GPUs
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
2D Tensor, shape [n, k]
B : tvm.te.Tensor
2D Tensor, shape [k, m]
Returns
-------
C : tvm.te.Tensor
2D Tensor, shape [n, m]
"""
cfg.define_knob("work_group_x", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("work_group_y", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("unroll_k_factor", [1, 2, 4])
cfg.define_knob("A_interleave", [1, 4, 8, 16, 24, 32, 48, 64])
cfg.define_knob("B_interleave", [1, 4, 8, 16, 32])
cfg.define_knob("split_k_factor", [1, 4, 16])
# Mutual k axis must be of equal extent
assert utils.get_const_int(A.shape[1]) == utils.get_const_int(B.shape[0])
n = A.shape[0]
m = B.shape[1]
k_size = utils.get_const_int(A.shape[1])
unroll_gemm = cfg["split_k_factor"].val
if unroll_gemm == 1:
# No unrolling case must have the same set of tensors to keep scheduling consistent
# Create identity tensors to take the place of A_unrolled, B_unrolled and R
A_unrolled = te.compute((n, k_size), lambda i, j: A[i, j], name="A_unrolled")
B_unrolled = te.compute((k_size, m), lambda i, j: B[i, j], name="B_unrolled")
# Declare standard GEMM
k = te.reduce_axis((0, A.shape[1]), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A_unrolled[i, k] * B_unrolled[k, j], axis=k), name="C"
)
R = te.compute((n, m), lambda i, j: C[i, j], name="R")
else:
unrolled_k_size = k_size // unroll_gemm
# Unroll the two input matrices along the shared k axis
A_unrolled = te.compute(
(unroll_gemm, n, unrolled_k_size),
lambda b, i, j: A[i][unrolled_k_size * b + j],
name="A_unrolled",
)
B_unrolled = te.compute(
(unroll_gemm, unrolled_k_size, m),
lambda b, i, j: B[unrolled_k_size * b + i][j],
name="B_unrolled",
)
# Declare a batched GEMM
k = te.reduce_axis((0, unrolled_k_size), name="k")
C = te.compute(
(unroll_gemm, n, m),
lambda b, i, j: te.sum(A_unrolled[b][i][k] * B_unrolled[b][k][j], axis=k),
name="C",
)
# Then declare a reduction to reduce the sub matrices
k = te.reduce_axis((0, unroll_gemm), name="k")
R = te.compute((n, m), lambda i, j: te.sum(C[k][i][j], axis=k), name="R")
return R
def decl_batched_gemm(cfg, A, B):
"""Declare a batched GEMM computation for Mali Bifrost GPUs
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
3D Tensor, shape [b, n, k]
B : tvm.te.Tensor
3D Tensor, shape [b, k, m]
Returns
-------
C : tvm.te.Tensor
3D Tensor, shape [b, n, m]
"""
# Mutual b and k axis must be of equal extent
assert utils.get_const_int(A.shape[2]) == utils.get_const_int(B.shape[1])
assert utils.get_const_int(A.shape[0]) == utils.get_const_int(B.shape[0])
cfg.define_knob("work_group_x", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("work_group_y", [1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 64])
cfg.define_knob("unroll_k_factor", [1, 2, 4])
cfg.define_knob("A_interleave", [1, 4, 8, 16, 32, 64])
cfg.define_knob("B_interleave", [1, 4, 8, 16, 32])
n = A.shape[1]
m = B.shape[2]
k_size = utils.get_const_int(A.shape[2])
b_size = utils.get_const_int(A.shape[0])
# Declare a batched GEMM
k = te.reduce_axis((0, k_size), name="k")
C = te.compute(
(b_size, n, m), lambda b, i, j: te.sum(A[b][i][k] * B[b][k][j], axis=k), name="C"
)
return C
def decl_winograd_gemm(cfg, A, B):
"""Declare a winograd GEMM for Mali Bifrost GPUs
Winograd uses batched GEMM, however the input tensors are 4D
This wraps decl_batched_gemm to provide it with 3D tensors
Parameters
----------
cfg : Config
Schedule configuration
A : tvm.te.Tensor
4D Tensor, shape [a, a, n, k]
B : tvm.te.Tensor
4D Tensor, shape [a * a, k, m]
Returns
-------
"""
alpha = utils.get_const_int(A.shape[0])
n = utils.get_const_int(A.shape[2])
k = utils.get_const_int(A.shape[3])
A_3D = te.compute(
(alpha * alpha, n, k), lambda b, i, j: A[b // alpha][b % alpha][i][j], name="A_3D"
)
C = decl_batched_gemm(cfg, A_3D, B)
return A_3D, C
def schedule_gemm(cfg, s, A, B, C, batched=False, schedule_transforms=True):
"""Schedule GEMM, single and batched
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
batched : bool
Whether the GEMM is batched
Returns
-------
"""
block_size_x = 4
block_size_y = 4
warp_size_x = 2
warp_size_y = 2
work_group_x = cfg["work_group_x"].val
work_group_y = cfg["work_group_y"].val
k_unroll = cfg["unroll_k_factor"].val
if not batched:
y_index, x_index = (0, 1)
else:
y_index, x_index = (1, 2)
trans_inter, A_transposed_interleaved = transpose_interleave(
s, A, cfg["A_interleave"].val, y_index, x_index, [C], batched=batched
)
inter_trans, B_interleaved_transposed = interleave_transpose(
s, B, cfg["B_interleave"].val, y_index, x_index, [C], batched=batched
)
if schedule_transforms:
# Schedule A
y, x = s[trans_inter].op.axis
y, x, yi, xi = s[trans_inter].tile(y, x, 1, 8)
s[trans_inter].unroll(yi)
s[trans_inter].unroll(xi)
tile_and_bind(s, trans_inter, y, x, 1, 4)
# Schedule B
y, x = s[inter_trans].op.axis
xo, xi = s[inter_trans].split(x, 4)
s[inter_trans].vectorize(xi)
tile_and_bind(s, inter_trans, y, xo, 4, 4)
# Schedule C
CR_A = s.cache_read(A_transposed_interleaved, "local", [C])
CR_B = s.cache_read(B_interleaved_transposed, "local", [C])
CW_C = s.cache_write(C, "local")
if not batched:
y, x = s[C].op.axis
else:
z, y, x = s[C].op.axis
y, x, yt, xt = s[C].tile(y, x, block_size_y, block_size_x)
s[C].unroll(yt)
s[C].vectorize(xt)
# Tile the global work space to generate 'square' warps -> 2x2 for warp size of 4
y, x, wy, wx = s[C].tile(y, x, warp_size_y, warp_size_x)
x = s[C].fuse(x, wy, wx)
if not batched:
yo, xo, yi, xi = tile_and_bind(s, C, y, x, work_group_y, work_group_x)
else:
# For batched GEMM bind batch to z axis
zo, yo, xo, zi, yi, xi = tile_and_bind3d(s, C, z, y, x, 1, work_group_y, work_group_x)
s[CW_C].compute_at(s[C], xi)
if not batched:
y, x = s[CW_C].op.axis
else:
_, y, x = s[CW_C].op.axis
y, x, yt, xt = s[CW_C].tile(y, x, block_size_y, block_size_x)
k = s[CW_C].op.reduce_axis[0]
s[CW_C].reorder(k, yt, xt)
ko, ki = s[CW_C].split(k, k_unroll)
s[CW_C].unroll(ki)
s[CW_C].unroll(yt)
s[CW_C].unroll(xt)
if not batched:
i, j = s[CR_A].op.axis
else:
_, i, j = s[CR_A].op.axis
s[CR_A].reorder(j, i)
s[CR_A].compute_at(s[CW_C], ki)
s[CR_A].unroll(j)
s[CR_A].vectorize(i)
if not batched:
i, j = s[CR_B].op.axis
else:
_, i, j = s[CR_B].op.axis
s[CR_B].compute_at(s[CW_C], ki)
s[CR_B].unroll(i)
s[CR_B].vectorize(j)
return trans_inter, inter_trans
def schedule_unrollable_gemm(cfg, s, A, B, C, R):
"""Schedule a GEMM that can be unrolled by a constant factor
along its inner dimension
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
R : tvm.te.Tensor
2D Tensor, shape [n, m]
"""
# If the GEMM is 2D, no unrolling has taken place
# Use non-batched GEMM schedule and inline identity matrices A, B and R
if len(C.op.axis) == 2:
s[A].compute_inline()
s[B].compute_inline()
schedule_gemm(cfg, s, A, B, C)
s[R].compute_inline()
# GEMM is 3D, use batched GEMM schedule, inline A and B and schedule R
else:
s[A].compute_inline()
s[B].compute_inline()
schedule_gemm(cfg, s, A, B, C, batched=True)
CR_C = s.cache_read(C, "local", [R])
y, x = s[R].op.axis
xo, xi = s[R].split(x, 4)
k = s[R].op.reduce_axis[0]
s[R].reorder(k, xi)
ko, ki = s[R].split(k, 4)
s[R].unroll(xi)
s[R].unroll(ki)
tile_and_bind(s, R, y, xo, 1, 2)
s[CR_C].compute_at(s[R], ko)
_, y, x = s[CR_C].op.axis
s[CR_C].unroll(y)
s[CR_C].vectorize(x)
def get_unrollable_gemm_ops(R):
"""Get all GEMM operators from the final reduction
This is a helper function to more easily get all the GEMM operations
from an operator
Parameters
----------
R : tvm.te.Tensor
Reduced tensor, final stage of GEMM
Returns
-------
A_unrolled : tvm.te.Tensor
Matrix A unrolled along k
B_unrolled: tvm.te.Tensor
Matrix B unrolled along k
C : tvm.te.Tensor
Result of batched GEMM
R : tvm.te.Tensor
Reduction of C, result of unrollable GEMM
"""
C = R.op.input_tensors[0]
A_unrolled, B_unrolled = C.op.input_tensors
return A_unrolled, B_unrolled, C, R
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/bifrost/transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Utility scheduling functions for the Bifrost schedules"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
"""Fuse all the axis and bind to GPU threads"""
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
bx, tx = s[tensor].split(fused, num_thread or max_threads)
s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
return bx, tx
def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):
"""Tile and bind to GPU threads"""
x_factor = x_factor or y_factor
yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
return yo, xo, yi, xi
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
"""Tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, te.thread_axis("blockIdx.z"))
s[tensor].bind(zi, te.thread_axis("threadIdx.z"))
s[tensor].bind(yo, te.thread_axis("blockIdx.y"))
s[tensor].bind(yi, te.thread_axis("threadIdx.y"))
s[tensor].bind(xo, te.thread_axis("blockIdx.x"))
s[tensor].bind(xi, te.thread_axis("threadIdx.x"))
return zo, yo, xo, zi, yi, xi
def pack_tensor(s, tensor, factor, readers):
"""Do transform X[n, m] -> X[n / factor, m, factor]"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis
yo, yi = s[tmp].split(y, factor)
s[tmp].reorder(yo, x, yi)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
def transpose(s, tensor, y_index, x_index, readers):
"""Do transform X[n, m] -> X[m, n]"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
s[tmp].reorder(x, y)
s[tmp].compute_inline()
A_transpose = s.cache_write(tmp, "global")
CR_A = s.cache_read(tensor, "local", [A_transpose])
CW_A_transpose = s.cache_write(A_transpose, "local")
y, x = s[A_transpose].op.axis[y_index], s[A_transpose].op.axis[x_index]
yo, xo, yi, xi = s[A_transpose].tile(y, x, 4, 4)
s[A_transpose].unroll(yi)
s[A_transpose].vectorize(xi)
_, _, _, xi = tile_and_bind(s, A_transpose, yo, xo, 32, 2)
s[CW_A_transpose].compute_at(s[A_transpose], xi)
y, x = s[CW_A_transpose].op.axis[y_index], s[CW_A_transpose].op.axis[x_index]
s[CW_A_transpose].unroll(x)
s[CW_A_transpose].unroll(y)
s[CR_A].compute_at(s[A_transpose], xi)
y, x = s[CR_A].op.axis[y_index], s[CR_A].op.axis[x_index]
s[CR_A].unroll(y)
s[CR_A].vectorize(x)
return tmp
def interleave_transpose(s, tensor, width, y_index, x_index, readers, batched=False):
"""Interleave the tensor, then transpose it"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
xo, xi = s[tmp].split(x, width)
s[tmp].reorder(xo, y, xi)
s[tmp].fuse(y, xi)
if batched:
z = s[tmp].op.axis[0]
s[tmp].fuse(z, xo)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
def transpose_interleave(s, tensor, width, y_index, x_index, readers, batched=False):
"""Transpose the tensor, then interleave it"""
tmp = s.cache_read(tensor, "global", readers)
y, x = s[tmp].op.axis[y_index], s[tmp].op.axis[x_index]
yo, yi = s[tmp].split(y, width)
s[tmp].reorder(yo, x, yi)
s[tmp].fuse(x, yi)
if batched:
z = s[tmp].op.axis[0]
s[tmp].fuse(z, yo)
s[tmp].compute_inline()
return s.cache_write(tmp, "global"), tmp
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/broadcast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Broadcast operators"""
from __future__ import absolute_import as _abs
from . import cpp as _cpp
def broadcast_to(data, shape):
"""Broadcast the src to the target shape
We follows the numpy broadcasting rule.
See also https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
Parameters
----------
data : tvm.te.Tensor
The input data
shape : list or tuple
The target shape to be broadcasted.
Returns
-------
ret : tvm.te.Tensor
"""
return _cpp.broadcast_to(data, shape)
def add(lhs, rhs):
"""Addition with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.add(lhs, rhs)
def subtract(lhs, rhs):
"""Subtraction with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.subtract(lhs, rhs)
def multiply(lhs, rhs):
"""Multiplication with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.multiply(lhs, rhs)
def divide(lhs, rhs):
"""Division with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.divide(lhs, rhs)
def floor_divide(lhs, rhs):
"""Floor division with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_divide(lhs, rhs)
def mod(lhs, rhs):
"""Modulus with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.mod(lhs, rhs)
def floor_mod(lhs, rhs):
"""Floor modulus with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.floor_mod(lhs, rhs)
def maximum(lhs, rhs):
"""Take element-wise maximum of two tensors with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.maximum(lhs, rhs)
def minimum(lhs, rhs):
"""Take element-wise maximum of two tensors with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.minimum(lhs, rhs)
def power(lhs, rhs):
"""Power with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.power(lhs, rhs)
def left_shift(lhs, rhs):
"""Left shift with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.left_shift(lhs, rhs)
def right_shift(lhs, rhs):
"""Right shift with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.right_shift(lhs, rhs)
def greater(lhs, rhs):
"""Compute (lhs>rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.greater(lhs, rhs)
def less(lhs, rhs):
"""Compute (lhs<rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.less(lhs, rhs)
def equal(lhs, rhs):
"""Compute (lhs==rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.equal(lhs, rhs)
def not_equal(lhs, rhs):
"""Compute (lhs!=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.not_equal(lhs, rhs)
def greater_equal(lhs, rhs):
"""Compute (lhs>=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.greater_equal(lhs, rhs)
def less_equal(lhs, rhs):
"""Compute (lhs<=rhs) with auto-broadcasting
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.less_equal(lhs, rhs)
def logical_and(lhs, rhs):
"""Compute element-wise logical and of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_and(lhs, rhs)
def logical_or(lhs, rhs):
"""Compute element-wise logical or of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_or(lhs, rhs)
def logical_xor(lhs, rhs):
"""Compute element-wise logical xor of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_xor(lhs, rhs)
def bitwise_and(lhs, rhs):
"""Compute element-wise bitwise and of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_and(lhs, rhs)
def bitwise_or(lhs, rhs):
"""Compute element-wise bitwise or of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_or(lhs, rhs)
def bitwise_xor(lhs, rhs):
"""Compute element-wise bitwise xor of data.
Parameters
----------
lhs : tvm.te.Tensor or Expr
The left operand
rhs : tvm.te.Tensor or Expr
The right operand
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if both operands are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_xor(lhs, rhs)
def logical_not(data):
"""Compute element-wise logical not of data.
Parameters
----------
data : tvm.te.Tensor or Expr
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if the operand are Expr.
Otherwise returns Tensor.
"""
return _cpp.logical_not(data)
def bitwise_not(data):
"""Compute element-wise bitwise not of data.
Parameters
----------
data : tvm.te.Tensor or Expr
Returns
-------
ret : tvm.te.Tensor or Expr
Returns Expr if the operand are Expr.
Otherwise returns Tensor.
"""
return _cpp.bitwise_not(data)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for C++ TOPI ops and schedules"""
from .impl import * # pylint: disable=wildcard-import
from . import cuda
from . import nn
from . import vision
from . import x86
from . import generic
from . import rocm
from . import utils
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for CUDA TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.cuda", "tvm.topi.cpp.cuda")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for generic TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.generic", "tvm.topi.cpp.generic")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/impl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Load Lib for C++ TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi", "tvm.topi.cpp")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for NN TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.nn", "tvm.topi.cpp.nn")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for Rocm TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.rocm", "tvm.topi.cpp.rocm")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for TOPI utility functions"""
import tvm._ffi
tvm._ffi._init_api("topi.utils", "tvm.topi.cpp.utils")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/vision/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for vision TOPI ops and schedules"""
import tvm._ffi
from . import yolo
tvm._ffi._init_api("topi.vision", "tvm.topi.cpp.vision")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/vision/yolo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for Yolo TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.vision.yolo", "tvm.topi.cpp.vision.yolo")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cpp/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI for x86 TOPI ops and schedules"""
import tvm._ffi
tvm._ffi._init_api("topi.x86", "tvm.topi.cpp.x86")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""CUDA specific declaration and schedules."""
from .conv1d import *
from .conv1d_transpose_ncw import *
from .conv2d import *
from .conv2d_hwcn import *
from .conv2d_int8 import *
from .conv2d_winograd import *
from .conv2d_nhwc_winograd import *
from .depthwise_conv2d import *
from .group_conv2d_nchw import *
from . import conv2d_alter_op
from .conv2d_transpose import *
from .conv3d_transpose_ncdhw import *
from .deformable_conv2d import *
from .conv3d import *
from .conv3d_winograd import *
from . import conv3d_alter_op
from .reduction import schedule_reduce
from .softmax import *
from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
from .dense import *
from .pooling import *
from .nn import schedule_lrn
from .batch_matmul import *
from .batch_matmul_tensorcore import *
from .vision import *
from .ssd import *
from .nms import get_valid_counts, non_max_suppression, all_class_non_max_suppression
from .rcnn import *
from .scatter import *
from .sort import *
from .conv2d_nhwc_tensorcore import *
from .conv3d_ndhwc_tensorcore import *
from .dense_tensorcore import *
from .conv2d_hwnc_tensorcore import *
from .correlation import *
from .sparse import *
from . import tensorcore_alter_op
from .argwhere import *
from .scan import *
from .sparse_reshape import *
from .transform import *
from .unique import *
from .searchsorted import *
from .stft import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments, invalid-name
"""Argwhere operator"""
import logging
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from .scan import exclusive_scan
from .. import tag
from ..utils import ceil_div, prod
from ..transform import reshape
from ..broadcast import not_equal
from ..math import cast
logger = logging.getLogger("topi")
fdiv = tvm.tir.floordiv
fmod = tvm.tir.floormod
def compact_nonzero_indices_ir(condition, write_indices, out, do_write_func):
"""Copy nonzero indices to the corresponding write locations.
Parameters
----------
condition : Buffer
The input condition.
write_indices : Buffer
The result of exclusive scan on a boolean array, where True indicates that
the condition is non zero at that position.
out : Buffer
The output buffer to copy indices to.
do_write_func : a function
A callback that accepts an output buffer, a dst index to write to, and a src index.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
size_1d = prod(condition.shape)
condition = ib.buffer_ptr(condition)
write_indices = ib.buffer_ptr(write_indices)
out = ib.buffer_ptr(out)
nthread_tx = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_bx = ceil_div(size_1d, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
with ib.new_scope():
idx = bx * nthread_tx + tx
with ib.if_scope(idx < size_1d):
with ib.if_scope(condition[idx] != 0):
do_write_func(out, write_indices[idx], idx)
return ib.get()
def argwhere_common(output_shape, condition, do_write_func):
"""A common compute used by argwhere of various ranks.
Parameters
----------
output_shape : list of int or tvm.tir.Any
Tensor with output shape info.
condition : tvm.te.Tensor
The input condition.
do_write_func : a function
A callback that accepts an output buffer, a dst index to write to, and a src index.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
flags = not_equal(condition, tvm.tir.const(0))
flags_1d = reshape(flags, (prod(flags.shape),))
write_indices = exclusive_scan(cast(flags_1d, dtype="int32"))
condition_buf = tvm.tir.decl_buffer(
condition.shape, condition.dtype, "data_buf", data_alignment=8
)
write_indices_buf = tvm.tir.decl_buffer(
write_indices.shape, write_indices.dtype, "write_indices_buf", data_alignment=8
)
out_buf = tvm.tir.decl_buffer(output_shape, "int32", "out_buf", data_alignment=8)
out = te.extern(
[output_shape],
[condition, write_indices],
lambda ins, outs: compact_nonzero_indices_ir(ins[0], ins[1], outs[0], do_write_func),
dtype=["int32"],
in_buffers=[condition_buf, write_indices_buf],
out_buffers=[out_buf],
name="argwhere",
tag="argwhere_gpu",
)
return out
def argwhere_1d(output_shape, condition):
"""Compute for argwhere 1D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
out[write_index] = idx
return argwhere_common(output_shape, condition, do_write)
def argwhere_2d(output_shape, condition):
"""Compute for argwhere 2D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
a1 = condition.shape[1]
out[write_index * 2] = tvm.tir.floordiv(idx, a1)
out[write_index * 2 + 1] = tvm.tir.floormod(idx, a1)
return argwhere_common(output_shape, condition, do_write)
def argwhere_3d(output_shape, condition):
"""Compute for argwhere 3D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2 = condition.shape
s1 = a1 * a2
out[write_index * 3] = fdiv(idx, s1)
out[write_index * 3 + 1] = fdiv(fmod(idx, s1), a2)
out[write_index * 3 + 2] = fmod(idx, a2)
return argwhere_common(output_shape, condition, do_write)
def argwhere_4d(output_shape, condition):
"""Compute for argwhere 4D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2, a3 = condition.shape
s1 = a2 * a3
s2 = a1 * s1
out[write_index * 4] = fdiv(idx, s2)
out[write_index * 4 + 1] = fdiv(fmod(idx, s2), s1)
out[write_index * 4 + 2] = fdiv(fmod(idx, s1), a3)
out[write_index * 4 + 3] = fmod(idx, a3)
return argwhere_common(output_shape, condition, do_write)
def argwhere_5d(output_shape, condition):
"""Compute for argwhere 5D
Parameters
----------
condition : list of int or tvm.tir.Any
The output shape
out : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def do_write(out, write_index, idx):
_, a1, a2, a3, a4 = condition.shape
s1 = a3 * a4
s2 = a2 * s1
s3 = a1 * s2
out[write_index * 5] = fdiv(idx, s3)
out[write_index * 5 + 1] = fdiv(fmod(idx, s3), s2)
out[write_index * 5 + 2] = fdiv(fmod(idx, s2), s1)
out[write_index * 5 + 3] = fdiv(fmod(idx, s1), a4)
out[write_index * 5 + 4] = fmod(idx, a4)
return argwhere_common(output_shape, condition, do_write)
def argwhere(output_shape, condition):
"""Find the indices of elements of a tensor that are non-zero.
Parameters
----------
output_shape : tvm.te.Tensor
Tensor with output shape info.
condition : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
if len(condition.shape) == 1:
return argwhere_1d(output_shape.shape, condition)
if len(condition.shape) == 2:
return argwhere_2d(output_shape.shape, condition)
if len(condition.shape) == 3:
return argwhere_3d(output_shape.shape, condition)
if len(condition.shape) == 4:
return argwhere_4d(output_shape.shape, condition)
if len(condition.shape) == 5:
return argwhere_5d(output_shape.shape, condition)
raise ValueError("Argwhere does not support rank higher than 5")
def schedule_argwhere(outs):
"""Schedule for argwhere on cuda.
Parameters
----------
outs: Array of Tensor
The computation graph description of argwhere
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for argwhere
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable,unused-argument
"""cuda batch_matmul operators"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.contrib import cublas
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn, generic
from ..utils import traverse_inline, get_const_tuple, get_max_power2_factor
from .tensor_intrin import dp4a
@autotvm.register_topi_compute("batch_matmul.cuda")
def batch_matmul(cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True):
"""Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
return nn.batch_matmul(
x,
y,
oshape=out_shape,
out_dtype=out_dtype,
transpose_a=transpose_a,
transpose_b=transpose_b,
)
@autotvm.register_topi_schedule("batch_matmul.cuda")
def schedule_batch_matmul(cfg, outs):
"""Schedule for batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, op):
C = op.output(0)
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
_, M, N = get_const_tuple(C.shape)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
if op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
b, y, x = s[C].op.axis
(k,) = s[CC].op.reduce_axis
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_k", k, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [8, 16, 32, 64])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
# llvm-based backends cannot do non-explicit unrolling
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
y_bn = get_max_power2_factor(M, 64)
x_bn = get_max_power2_factor(N, 64)
y_nthreads = min(y_bn, 8)
x_nthreads = min(x_bn, 8)
cfg["tile_x"] = SplitEntity([-1, x_nthreads, x_bn // x_nthreads])
cfg["tile_y"] = SplitEntity([-1, y_nthreads, y_bn // y_nthreads])
cfg["tile_k"] = SplitEntity([-1, 8])
cfg["auto_unroll_max_step"] = OtherOptionEntity(16)
by, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, tx, xi = cfg["tile_x"].apply(s, C, x)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[C].reorder(b, by, bx, ty, tx, yi, xi)
s[C].bind(b, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
s[CC].compute_at(s[C], tx)
_, yi, xi = s[CC].op.axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, ki, yi, xi)
s[CC].pragma(ki, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[CC].pragma(ki, "unroll_explicit", cfg["unroll_explicit"].val)
s[AA].compute_at(s[CC], ko)
s[AL].compute_at(s[CC], ki)
s[BB].compute_at(s[CC], ko)
s[BL].compute_at(s[CC], ki)
_, y, k = s[AA].op.axis
ty, yi = s[AA].split(y, nparts=cfg["tile_y"].size[1])
tx, ki = s[AA].split(k, nparts=cfg["tile_x"].size[1])
s[AA].reorder(ty, tx, yi, ki)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[AA].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
_, x, k = s[BB].op.axis
ty, xi = s[BB].split(x, nparts=cfg["tile_y"].size[1])
tx, ki = s[BB].split(k, nparts=cfg["tile_x"].size[1])
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s[BB].reorder(ty, tx, xi, ki)
s[BB].pragma(xi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[BB].pragma(xi, "unroll_explicit", cfg["unroll_explicit"].val)
def _callback(op):
if "batch_matmul" in op.tag:
_schedule(cfg, op)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("batch_matmul_cublas.cuda")
def batch_matmul_cublas(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch matrix multiplication of `x` and `y`.
Both `x` and `y` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
x : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
y : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
if transpose_a:
b, k, m = get_const_tuple(x.shape)
else:
b, m, k = get_const_tuple(x.shape)
if transpose_b:
b, n, k = get_const_tuple(y.shape)
else:
b, k, n = get_const_tuple(y.shape)
if all([isinstance(s, int) for s in [b, m, n, k]]):
cfg.add_flop(b * m * k * n * 2)
return cublas.batch_matmul(x, y, transa=transpose_a, transb=transpose_b, dtype=out_dtype)
@autotvm.register_topi_schedule("batch_matmul_cublas.cuda")
def schedule_batch_matmul_cublas(_, outs):
"""Schedule batch_matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("batch_matmul_int8.cuda")
def batch_matmul_int8(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Batch Matmul operator for int8 on CUDA.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
x : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
y : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
del out_shape
# TODO(jcf94): Deal with different transpose combinations
assert not transpose_a and transpose_b
if out_dtype is None:
out_dtype = x.dtype
x_shape = get_const_tuple(x.shape)
y_shape = get_const_tuple(y.shape)
assert len(x_shape) == 3 and len(y_shape) == 3, "only support 3-dim batch_matmul"
XB, M, XK = x.shape
YB, N, YK = y.shape
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistent"
nB = tvm.te.max(XB, YB)
nK = ((XK + 3) // 4) * 4
reduce_k = te.reduce_axis((0, nK), name="k")
# pad for _dp4a vectorize
pad_x = te.compute(
(XB, M, nK),
lambda b, i, j: tvm.te.if_then_else(
j >= XK, tvm.runtime.convert(0).astype(x.dtype), x[b, i, j]
),
)
pad_y = te.compute(
(YB, N, nK),
lambda b, i, j: tvm.te.if_then_else(
j >= YK, tvm.runtime.convert(0).astype(y.dtype), y[b, i, j]
),
)
out = te.compute(
(nB, M, N),
lambda b, i, j: te.sum(
pad_x[b if XB != 1 else 0, i, reduce_k].astype(out_dtype)
* pad_y[b if YB != 1 else 0, j, reduce_k].astype(out_dtype),
axis=[reduce_k],
),
tag="batch_matmul_int8",
)
cfg.add_flop(XB * M * N * nK * 2)
return out
@autotvm.register_topi_schedule("batch_matmul_int8.cuda")
def schedule_batch_matmul_int8(cfg, outs):
"""Batch Matmul schedule for int8 on CUDA"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "batch_matmul_int8" in op.tag:
_schedule_batch_matmul_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_batch_matmul_int8(cfg, s, output):
input_x, input_y = s[output].op.input_tensors
if len(input_y.op.input_tensors) == 1 and input_y.op.input_tensors[0] == input_x:
s[input_y].compute_inline()
B, M, K = get_const_tuple(input_x.shape)
_, N, _ = get_const_tuple(input_y.shape)
k_factor = 4
assert K % k_factor == 0, "Input dimension must divide {}".format(k_factor)
if K % 16 == 0:
k_factor = 16
cfg.define_split("tile_f", B, num_outputs=4)
cfg.define_split("tile_m", M, num_outputs=4)
cfg.define_split("tile_n", N, num_outputs=4)
cfg.define_split("tile_k", K // k_factor, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 256, 512, 1024])
batch_matmul_op = s.outputs[0]
s[input_x].compute_inline()
s[input_y].compute_inline()
x_cache = s.cache_read(input_x, "shared", [batch_matmul_op])
y_cache = s.cache_read(input_y, "shared", [batch_matmul_op])
batch_matmul_cache = s.cache_write(batch_matmul_op.output(0), "local")
# tile reduce axis
ko = batch_matmul_cache.op.reduce_axis[0]
ko, ki = s[batch_matmul_cache].split(ko, factor=4)
ko, kt = cfg["tile_k"].apply(s, batch_matmul_cache, ko)
# dp4a tensorize
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (input_x.dtype, input_y.dtype)
s[batch_matmul_cache].tensorize(ki, dp4a("shared", "shared", "local", dtypes))
# tile axis
f, m, n = batch_matmul_op.axis
kernel_scope, f = s[batch_matmul_op].split(f, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, batch_matmul_op, f)
bm, vm, tm, mi = cfg["tile_m"].apply(s, batch_matmul_op, m)
bn, vn, tn, ni = cfg["tile_n"].apply(s, batch_matmul_op, n)
s[batch_matmul_op].reorder(bf, bm, bn, vf, vm, vn, tf, tm, tn, fi, mi, ni)
# bind axis
s[batch_matmul_op].bind(bf, tvm.te.thread_axis("blockIdx.z"))
s[batch_matmul_op].bind(bm, tvm.te.thread_axis("blockIdx.y"))
s[batch_matmul_op].bind(bn, tvm.te.thread_axis("blockIdx.x"))
s[batch_matmul_op].bind(vf, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(vm, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(vn, tvm.te.thread_axis("vthread"))
s[batch_matmul_op].bind(tf, tvm.te.thread_axis("threadIdx.z"))
s[batch_matmul_op].bind(tm, tvm.te.thread_axis("threadIdx.y"))
s[batch_matmul_op].bind(tn, tvm.te.thread_axis("threadIdx.x"))
# cache compute at
s[batch_matmul_cache].compute_at(s[batch_matmul_op], tn)
fo, mo, no = batch_matmul_cache.op.axis[:3]
s[batch_matmul_cache].reorder(ko, kt, fo, mo, no, ki)
# for load in [splited_x_op, splited_y_op]
for load in [x_cache, y_cache]:
s[load].compute_at(s[batch_matmul_cache], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=k_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=cfg["tile_n"].size[2])
fused, ty = s[load].split(fused, factor=cfg["tile_m"].size[2])
fused, tz = s[load].split(fused, factor=cfg["tile_f"].size[2])
s[load].bind(tz, tvm.te.thread_axis("threadIdx.z"))
s[load].bind(ty, tvm.te.thread_axis("threadIdx.y"))
s[load].bind(tx, tvm.te.thread_axis("threadIdx.x"))
# max unroll
s[batch_matmul_op].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[batch_matmul_op].pragma(kernel_scope, "unroll_explicit", False)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/batch_matmul_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable,unused-argument
"""cuda batch_matmul operators"""
import tvm
from tvm import autotvm
from tvm import te
from ..utils import traverse_inline, get_const_tuple
from .tensor_intrin import (
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
intrin_wmma_gemm,
)
@autotvm.register_topi_compute("batch_matmul_tensorcore.cuda")
def batch_matmul_tensorcore(
cfg, x, y, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""batch matmul tensorcore operator on cuda"""
# TODO(jcf94): Deal with different transpose combinations
assert not transpose_a and transpose_b
# TODO(liuxin.ai): Deal with out_shape for broadcast
del out_shape
return batch_matmul_tensorcore_cuda(x, y, out_dtype)
@autotvm.register_topi_schedule("batch_matmul_tensorcore.cuda")
def schedule_batch_matmul_tensorcore(cfg, outs):
"""Schedule for batch_matmul operator using Tensorcore
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, C):
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
batch, m_dim, k_dim = get_const_tuple(A.shape)
batch, n_dim, k_dim = get_const_tuple(B.shape)
data_dtype = A.dtype
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "batch_matmul_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# Deal with op fusion, such as bias/relu and slice after padding
if C.op not in s.outputs and "injective" in s.outputs[0].tag:
s[C].compute_inline()
C = s.outputs[0].output(0)
# create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("offsetCS", [0, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
# Ensure that the default parameters are applicable when autotvm is not in use
if data_dtype in ["float16", "uint8", "int8"]:
if m_dim % 32 == 0 and n_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif m_dim % 16 == 0 and n_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif m_dim % 8 == 0 and n_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
wmma_k = 16
wmma_m = cfg["wmma_m"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
elif data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
raise ValueError("data dtype %s is not yet supported" % data_dtype)
warp_size = 32
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
vec = cfg["vec"].val
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offset
BS_align = chunk * wmma_k + offset
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_k, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for dense computation
block_factor_m = wmma_m * warp_row_tiles * block_row_warps
block_factor_n = wmma_n * warp_col_tiles * block_col_warps
b, m, n = C.op.axis
block_i, bc = s[C].split(m, factor=block_factor_m)
block_j, oc = s[C].split(n, factor=block_factor_n)
s[C].reorder(b, block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(b, block_z)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
bs, bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bs, bb, oo, bbii, ooii, bbi, ooi)
s[CS].bind(bb, thread_z)
s[CS].bind(oo, thread_y)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
bs, warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(bs, ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
bs, b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(bs, b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
bs, o, i = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(bs, o, i, o_ii, i_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
bs, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
BL_gemm = te.placeholder((wmma_n, wmma_k), name="BL_gemm", dtype=data_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[jj, k_gemm].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# lower the computation loops down to TensorCore hardware intrinsics
# by mapping the dense tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride,
AS_stride,
shape,
"row_major",
(wmma_m, wmma_k),
(wmma_m, wmma_k),
data_dtype,
),
)
s[BF].tensorize(
o_ii,
intrin_wmma_load_matrix_W(
BF_stride,
BS_stride,
shape,
"col_major",
(wmma_n, wmma_k),
(wmma_n, wmma_k),
data_dtype,
),
)
s[CF].tensorize(
_ii,
intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape),
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
def _callback(op):
if "batch_matmul_tensorcore" in op.tag:
_schedule(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def batch_matmul_tensorcore_cuda(x, y, out_dtype=None):
"""Computes batch matrix multiplication of `x` and `y` when `x` and `y` are
data in batch.
Parameters
----------
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(x.shape) == 3 and len(y.shape) == 3, "only support 3-dim batch_matmul"
x_shape = get_const_tuple(x.shape)
y_shape = get_const_tuple(y.shape)
assert x_shape[0] == y_shape[0], "batch dimension doesn't match"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent"
batch, M, K = x.shape
N = y.shape[1]
if out_dtype is None:
out_dtype = x.dtype
assert x.dtype == y.dtype
assert x.dtype in ["float16", "uint8", "int8", "uint4", "int4"]
if x.dtype in ["float16", "uint8", "int8"]:
assert (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
), "The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)"
else:
assert (
M % 8 == 0 and K % 32 == 0 and N % 8 == 0
), "The shape of (M, K, N) must be multiple of (8, 32, 8)"
k = te.reduce_axis((0, K), name="k")
return te.compute(
(batch, M, N),
lambda b, i, j: te.sum(x[b, i, k].astype(out_dtype) * y[b, j, k].astype(out_dtype), axis=k),
tag="batch_matmul_tensorcore",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv1d with cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline, get_const_tuple
@autotvm.register_topi_compute("conv1d_ncw.cuda")
def conv1d_ncw(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
return nn.conv1d_ncw(data, kernel, strides, padding, dilation, out_dtype)
def _schedule_conv1d_ncw(cfg, outs):
"""TOPI schedule callback of conv1d ncw for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_ncw" or op.tag == "group_conv1d_ncw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, bx, vn, vf, vx, tn, tf, tx, ni, fi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, x = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, f, x)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, f, x = s[load].op.axis
fused = s[load].fuse(f, x)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OW = get_const_tuple(output.shape)
_, CI, KW = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv1d_ncw.cuda")
def schedule_conv1d_ncw(cfg, outs):
return _schedule_conv1d_ncw(cfg, outs)
@autotvm.register_topi_compute("group_conv1d_ncw.cuda")
def group_conv1d_ncw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv1d_ncw(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv1d_ncw.cuda")
def schedule_group_conv1d_ncw(cfg, outs):
return _schedule_conv1d_ncw(cfg, outs)
@autotvm.register_topi_compute("conv1d_nwc.cuda")
def conv1d_nwc(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
return nn.conv1d_nwc(data, kernel, strides, padding, dilation, out_dtype)
def _schedule_conv1d_nwc(cfg, outs):
"""TOPI schedule callback of conv1d nwc for cuda gpu
Parameters
----------
cfg : ConfigEntity
the config for this template.
outs : Array of Tensor
The computation graph description of conv1d
in the format of an array of tensors.
Returns
-------
s : Schedule
The computation schedule for conv1d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_nwc" or op.tag == "group_conv1d_nwc":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, x, f = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
s[output].reorder(bn, bx, bf, vn, vx, vf, tn, tx, tf, ni, xi, fi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bx, te.thread_axis("blockIdx.y"))
s[output].bind(bf, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tf)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_x"].size[2]
n_tx = cfg["tile_f"].size[2]
# tile reduction axes
n, x, f = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, x, f)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, x, f = s[load].op.axis
fused = s[load].fuse(x, f)
tz, fused = s[load].split(fused, nparts=n_tz)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, OW, CO = get_const_tuple(output.shape)
KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OW * CO * KW * CI)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv1d_nwc.cuda")
def schedule_conv1d_nwc(cfg, outs):
return _schedule_conv1d_nwc(cfg, outs)
@autotvm.register_topi_compute("group_conv1d_nwc.cuda")
def group_conv1d_nwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv1d_nwc(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv1d_nwc.cuda")
def schedule_group_conv1d_nwc(cfg, outs):
return _schedule_conv1d_nwc(cfg, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv1d_transpose_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv1d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple, traverse_inline
@autotvm.task.register_topi_compute("conv1d_transpose_nchw.cuda")
def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype, output_padding):
"""Transposed 1D convolution ncw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
3-D with shape [batch, in_channel, inp_width]
Filter : tvm.te.Tensor
3-D with shape [in_channel, num_filter, kernel_size]
stride : tuple of one int
The spatial stride along width
padding : int, tuple, or string
int: padding size
tuple of 2 ints: (pad_left, pad_right) for left and right padding
string: ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : ints
Used to disambiguate the output shape.
Returns
-------
Output : tvm.te.Tensor
u 3-D with shape [batch, out_channel, out_width]
"""
if isinstance(stride, (tuple, list)):
stride = stride[0]
if isinstance(output_padding, (tuple, list)):
output_padding = output_padding[0]
assert output_padding < stride
cfg.stride = stride
cfg.output_padding = output_padding
batch, inp_channels, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_size = get_const_tuple(kernel.shape)
pad_left, pad_right = nn.get_pad_tuple1d(padding, kernel_size)
out_width = (inp_width - 1) * stride + kernel_size - pad_left - pad_right + output_padding
pad_left = kernel_size - 1 - pad_left
pad_right = kernel_size - 1 - pad_right + output_padding
padded_width = pad_left + inp_width + pad_right
padded_data = te.compute(
(batch, inp_channels, padded_width),
lambda n, c, x: tvm.tir.if_then_else(
tvm.tir.all(x >= pad_left, x < pad_left + inp_width),
data[n, c, x - pad_left],
tvm.tir.const(0.0, "float32"),
),
name="data_pad",
)
padded_kernel = te.compute(
(inp_channels, out_channels, kernel_size + stride - 1),
lambda ci, co, k: tvm.tir.if_then_else(
tvm.tir.all(k < kernel_size),
kernel[ci, co, kernel_size - k - 1],
tvm.tir.const(0.0, "float32"),
),
name="kernel_pad",
)
ci = te.reduce_axis((0, inp_channels), name="ci")
k = te.reduce_axis((0, tvm.tir.indexdiv(kernel_size + stride - 1, stride)), name="k")
border = pad_left * (stride - 1)
# Skip multiplication by 0 values in the input data inserted when stride is greater then 1.
# During multiplication of kernel by padded data:
# Kernel indices are: 0, 1 * stride, 2 * stride, ..., ceil(kernel_size / stride) plus
# data offset mod stride
data_out = te.compute(
(batch, out_channels, out_width),
lambda b, co, w: te.sum(
padded_data[b, ci, tvm.tir.indexdiv(border + w + stride - 1, stride) + k].astype(
out_dtype
)
* padded_kernel[
ci, co, k * stride + tvm.tir.indexmod(stride - w - border, stride)
].astype(out_dtype),
axis=[ci, k],
),
tag="conv1d_transpose_ncw",
)
return data_out
@autotvm.task.register_topi_schedule("conv1d_transpose_nchw.cuda")
def schedule_conv1d_transpose_ncw(cfg, outs):
"""TOPI Schedule callback for conv1d_transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv1d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv1d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv1d_transpose_ncw":
padded_data = op.input_tensors[0]
padded_kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
cfg.define_split("tile_n", cfg.axis(n if isinstance(n, int) else 1), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
s[padded_kernel].compute_inline()
s[padded_data].compute_inline()
# tile and bind spatial axes
n, f, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, bx, vn, vf, vx, tn, tf, tx, ni, fi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, x = s[OL].op.axis
rc, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, rx, rci, n, f, x)
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv2d with cuda backend"""
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import OtherOptionEntity
from tvm.contrib import cudnn
from .. import nn, generic
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from .conv2d_direct import schedule_direct_cuda
@autotvm.register_topi_compute("conv2d_nchw.cuda")
def conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with NCHW layout"""
return nn.conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nchw.cuda")
def schedule_conv2d_nchw(cfg, outs):
"""Create the schedule for conv2d_nchw"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_nchw":
schedule_direct_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_cudnn.cuda")
def conv2d_cudnn(
cfg, data, kernel, strides, padding, dilation, groups=1, layout="NCHW", out_dtype="float32"
):
"""Compute conv2d using CuDNN library"""
if layout == "NCHW":
tensor_format = 0 # CUDNN_TENSOR_NCHW
N, _, H, W = get_const_tuple(data.shape)
elif layout == "NHWC":
tensor_format = 1 # CUDNN_TENSOR_NHWC
N, H, W, _ = get_const_tuple(data.shape)
else:
raise ValueError("Unsupported layout %s in cudnn" % layout)
CO, CI, KH, KW = get_const_tuple(kernel.shape)
# handle dilation
stride_h, stride_w = (strides, strides) if isinstance(strides, int) else strides
dilation_h, dilation_w = (dilation, dilation) if isinstance(dilation, int) else dilation
KH_dilated = (KH - 1) * dilation_h + 1
KW_dilated = (KW - 1) * dilation_h + 1
pt, pl, pb, pr = get_pad_tuple(padding, (KH_dilated, KW_dilated))
if (pt != pb) or (pl != pr):
raise ValueError("Cudnn doesn't support asymmetric padding.")
OH = (H + pt + pb - KH) // stride_h + 1
OW = (W + pl + pr - KW) // stride_w + 1
if isinstance(N, int):
cfg.add_flop(
groups
* 2
* N
* OH
* OW
* CO
* CI
* ((KH - 1) * dilation_h + 1)
* ((KW - 1) * dilation_w + 1)
)
if data.dtype == "int8" or kernel.dtype == "int8":
if layout == "NCHW":
raise ValueError("NCHW layout do not support int8 in cudnn")
dtype = "int32"
else:
dtype = data.dtype
cfg.define_knob("algo", range(cudnn.algo_to_index("fwd", "CUDNN_CONVOLUTION_FWD_ALGO_COUNT")))
if cfg.is_fallback:
if cudnn.exists():
# Let CUDNN choose the best algo, based on benchmarks run
# on the local machine. In the future, this should be
# based on parameters stored in the Target.
cfg["algo"] = OtherOptionEntity(-1)
else:
cfg["algo"] = OtherOptionEntity(0)
return cudnn.conv_forward(
data,
kernel,
[pt, pl], # cudnn padding pt, pl on both sides of input
[stride_h, stride_w],
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=cfg["algo"].val,
conv_dtype=dtype,
groups=groups,
)
@autotvm.register_topi_schedule("conv2d_cudnn.cuda")
def schedule_conv2d_cudnn(cfg, outs):
"""Create the schedule for conv2d_cudnn"""
return generic.schedule_extern(outs)
def conv2d_backward_weight_cudnn(
dy, x, kernel_size, padding, stride, dilation, groups, layout, output_dtype
):
"""Compute conv2d wgrad using CuDNN library"""
assert layout in ["NCHW", "NHWC"]
if dy.dtype == "float16":
# cuDNN does not seem to support other combination.
assert output_dtype == "float16", "Only supports fp16 output for cuDNN fp16 wgrad."
conv_dtype = "float32" # Accumulation is always fp32
return cudnn.conv_backward_filter(
dy,
x,
kernel_size,
padding,
stride,
dilation,
conv_mode=1,
tensor_format=0 if layout == "NCHW" else 1,
conv_dtype=conv_dtype,
groups=groups,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Conv2D alter op and legalize functions for cuda backend"""
import logging
import tvm
from tvm import autotvm, relay, te
from .. import nn
from ..nn import conv2d_legalize
from ..utils import get_const_tuple, is_target
from .conv2d_winograd import _infer_tile_size
from .tensorcore_alter_op import pad_to_tensorcore
logger = logging.getLogger("topi")
@nn.conv2d_alter_layout.register(["cuda", "gpu"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
if not is_target(["vulkan", "rocm", "cuda"]):
return None
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template.
# It may be from the auto-scheduler
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
if data_layout == "NHWC" and kernel_layout == "HWIO":
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1], layout="NHWC")
# HWIO -> OIHW
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
elif data_layout == "NCHW" and kernel_layout == "OIHW":
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1], layout="NCHW")
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
inputs[1], tile_size=tile_size
)
# alpha, alpha, CI, CO
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
do_new_layout = False
if is_target(["vulkan", "rocm"]):
do_new_layout = "+dotprod" in target.mattr or target.supports_integer_dot_product
if not do_new_layout:
return None
topi_tmpl = workload[0]
if topi_tmpl == "conv2d_NCHWc_int8.cuda":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
assert CO % 4 == 0, "Number of output channels should be multiple of 4"
new_layout = "NCHW4c"
new_attrs["channels"] = CO
new_attrs["data_layout"] = new_layout
new_attrs["out_layout"] = new_layout
new_attrs["kernel_layout"] = "OIHW4o4i"
ic_block_factor = oc_block_factor = 4
# Store the same config for the altered operator (workload)
new_data = te.placeholder(
(N, CI // ic_block_factor, H, W, ic_block_factor), dtype=data.dtype
)
new_kernel = te.placeholder(
(
CO // oc_block_factor,
CI // ic_block_factor,
KH,
KW,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, new_layout, out_dtype],
"conv2d_NCHWc_int8.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.cuda":
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
# pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1])
weight = relay.nn.contrib_conv2d_winograd_weight_transform(inputs[1], tile_size=tile_size)
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if topi_tmpl in ("conv2d_nhwc_winograd_direct.cuda", "conv2d_nhwc_winograd_tensorcore.cuda"):
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data.shape)
KH, KW, _, CO = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(data, kernel, layout="NHWC")
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
weight = relay.transpose(weight, axes=[0, 1, 3, 2])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel.dtype
)
if topi_tmpl == "conv2d_nhwc_winograd_direct.cuda":
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nhwc_winograd_direct_without_weight_transform.cuda",
)
elif topi_tmpl == "conv2d_nhwc_winograd_tensorcore.cuda":
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if topi_tmpl == "group_conv2d_NCHWc_int8.cuda":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
new_layout = "NCHW4c"
new_attrs["channels"] = CO
new_attrs["data_layout"] = new_layout
new_attrs["out_layout"] = new_layout
new_attrs["kernel_layout"] = "OIHW4o4i"
ic_block_factor = oc_block_factor = 4
# Store the same config for the altered operator (workload)
new_data = te.placeholder(
(N, CI // ic_block_factor, H, W, ic_block_factor), dtype=data.dtype
)
new_kernel = te.placeholder(
(
CO // oc_block_factor,
CI // ic_block_factor // groups,
KH,
KW,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, groups, out_dtype],
"group_conv2d_NCHWc_int8.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_HWNCnc_tensorcore.cuda":
assert data_layout == "HWNC" and kernel_layout == "HWOI"
assert float(tvm.cuda(0).compute_version) >= 7.5
H, W, N, CI = get_const_tuple(data.shape)
KH, KW, CO, _ = get_const_tuple(kernel.shape)
if (
kernel.dtype in ["int4", "uint4"]
and (CI % 32 != 0 or CO % 8 != 0)
or kernel.dtype in ["int8", "uint8"]
and (CI % 16 != 0 or CO % 32 != 0)
):
return relay.nn.conv2d(*inputs, **new_attrs)
new_attrs["channels"] = CO
if kernel.dtype in ["int4", "uint4"]:
new_attrs["kernel_layout"] = "HWOI8o32i"
ic_block_factor = 32
oc_block_factor = 8
else:
new_attrs["kernel_layout"] = "HWOI32o16i"
ic_block_factor = 16
oc_block_factor = 32
new_kernel = te.placeholder(
(
KH,
KW,
CO // oc_block_factor,
CI // ic_block_factor,
oc_block_factor,
ic_block_factor,
),
dtype=kernel.dtype,
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_HWNCnc_tensorcore.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
return None
def _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor):
# Pad batch size
if db != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, db), (0, 0)))
# Pad input channel
if di != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
# Pad output channel
if do != 0:
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, do), (0, 0)))
if do != 0:
new_out_channel = out_channel + do
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
if db != 0 or do != 0:
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
return out
def _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor):
# Pad batch size
if db != 0:
data = relay.nn.pad(data, pad_width=((0, db), (0, 0), (0, 0), (0, 0)))
# Pad input channel
if di != 0:
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, di)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, di), (0, 0)))
# Pad output channel
if do != 0:
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, do)))
if do != 0:
new_out_channel = out_channel + do
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
if db != 0 or do != 0:
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
return out
@conv2d_legalize.register(["cuda", "gpu"])
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if not is_target(["vulkan", "rocm", "cuda"]):
return None
# Dilation not supported yet. Return None if dilation is not (1, 1)
dilation = attrs.get_int_tuple("dilation")
if not (dilation[0] == 1 and dilation[1] == 1):
return None
# No legalization for depthwise convolutions yet.
groups = attrs.get_int("groups")
if groups != 1:
return None
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# Get the conv attrs
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Get data layout. Return None if not NCHW
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
# Pad input and output channels to use int8 schedule.
if data_dtype in ["int8", "uint8"]:
if data_layout == "NCHW" and kernel_layout == "OIHW":
oc_modified = False
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
# Pad input channel
if in_channel % 4 != 0:
new_in_channel = ((in_channel + 4) // 4) * 4
diff = new_in_channel - in_channel
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
# Pad output channel
new_out_channel = out_channel
if out_channel % 4 != 0:
new_out_channel = ((out_channel + 4) // 4) * 4
diff = new_out_channel - out_channel
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
if oc_modified:
new_attrs["channels"] = new_out_channel
out = tvm.relay.nn.conv2d(data, kernel, **new_attrs)
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.nn.conv2d(data, kernel, **new_attrs)
return out
if data_layout == "NHWC" and kernel_layout == "HWIO":
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
if data_layout == "HWNC" and kernel_layout == "HWOI":
batch = data_tensor.shape[2].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[2].value
if batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0:
return None
candidates = [(8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
elif data_dtype in ["float16"]:
if data_layout == "NHWC" and kernel_layout == "HWIO":
if isinstance(data_tensor.shape[0], tvm.tir.expr.Any):
# Skip legalize when the batch size is dynamic
return None
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
elif data_dtype in ["int4", "uint4"]:
if data_layout == "NHWC" and kernel_layout == "HWIO":
batch = data_tensor.shape[0].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
if (
(batch % 8 == 0 and in_channel % 16 == 0 and out_channel % 32 == 0)
or (batch % 16 == 0 and in_channel % 16 == 0 and out_channel % 16 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and out_channel % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_NHWC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
if data_layout == "HWNC" and kernel_layout == "HWOI":
batch = data_tensor.shape[2].value
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[2].value
if batch % 8 == 0 and in_channel % 32 == 0 and out_channel % 8 == 0:
return None
candidates = [(8, 32, 8)]
(db, di, do), extra_flops = pad_to_tensorcore(
batch, in_channel, out_channel, candidates
)
if extra_flops > 2:
logger.info("conv2d pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("conv2d pad_to_tensorcore, extra_flops %s", extra_flops)
return _pad_conv2d_HWNC(db, di, do, data, kernel, out_channel, new_attrs, output_tensor)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_direct.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The templates for cuda conv2d operators"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_direct_cuda(cfg, s, conv):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nchw.cuda"
)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OH, OW = get_const_tuple(output.shape)
_, KH, KW, CI = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_hwcn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity
from .. import nn, tag
@autotvm.register_topi_compute("conv2d_hwcn.cuda")
def conv2d_hwcn(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with HWCN layout on CUDA"""
return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_hwcn.cuda")
def schedule_conv2d_hwcn(cfg, outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
hi, wi, fi, ni = sch[Out].op.axis
# Create tuning space
n_thread_cand = [1, 2, 4, 8, 16, 32]
vthread_cand = [1, 2, 4, 8]
cfg.define_split(
"tile_fi",
fi,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
cfg.define_split(
"tile_ni",
ni,
num_outputs=4,
filter=lambda x: (x.size[1] in vthread_cand and x.size[2] in n_thread_cand),
)
if cfg.is_fallback:
cfg["tile_fi"] = SplitEntity([-1, 2, 8, 4])
cfg["tile_ni"] = SplitEntity([-1, 2, 8, 4])
# Scheduling
step = 8
bz = sch[Out].fuse(hi, wi)
by, tyz, ty, fi = cfg["tile_fi"].apply(sch, Out, fi)
bx, txz, tx, ni = cfg["tile_ni"].apply(sch, Out, ni)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, te.thread_axis("blockIdx.z"))
sch[Out].bind(by, te.thread_axis("blockIdx.y"))
sch[Out].bind(bx, te.thread_axis("blockIdx.x"))
sch[Out].bind(tyz, te.thread_axis("vthread"))
sch[Out].bind(txz, te.thread_axis("vthread"))
sch[Out].bind(ty, te.thread_axis("threadIdx.y"))
sch[Out].bind(tx, te.thread_axis("threadIdx.x"))
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=cfg["tile_fi"].size[2])
tx, ni = sch[AA].split(ni, nparts=cfg["tile_ni"].size[2])
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, te.thread_axis("threadIdx.y"))
sch[AA].bind(tx, te.thread_axis("threadIdx.x"))
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=cfg["tile_fi"].size[2])
tx, fi = sch[WW].split(fi, nparts=cfg["tile_ni"].size[2])
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, te.thread_axis("threadIdx.y"))
sch[WW].bind(tx, te.thread_axis("threadIdx.x"))
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == "conv2d_hwcn":
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.te.ComputeOp) and "dilate" in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.target import Target
from tvm.topi.cuda.injective import schedule_injective_from_existing
from ..utils import get_const_tuple, traverse_inline, simplify, tag
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def unpack_HWNCnc_to_hwnc(packed_out, out_dtype):
"""Unpack conv2d_hwnc output from layout hwncnc to hwnc
Parameters
-----------
packed_out : tvm.te.Tensor
The output tensor of conv2d_hwnc.
out_dtype : str
The output dtype.
Returns
-------
unpacked_out : tvm.te.Tensor
The unpacked output tensor in hwnc layout.
"""
H, W, N, O, wmma_m, wmma_n = get_const_tuple(packed_out.shape)
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
oshape = (H, W, N * wmma_m, O * wmma_n)
unpacked_out = te.compute(
oshape,
lambda h, w, n, o: packed_out[
h, w, idxdiv(n, wmma_m), idxdiv(o, wmma_n), idxmod(n, wmma_m), idxmod(o, wmma_n)
].astype(out_dtype),
name="output_unpack",
tag=tag.INJECTIVE + ",unpack_hwncc",
)
return unpacked_out
def conv2d_hwnc_tensorcore(data, kernel, strides, padding, dilation, in_dtype, out_dtype="int32"):
""" "Compute conv2d with tensorcore for HWNC layout with int8/int4"""
assert data.dtype in ("int4", "uint4", "int8", "uint8")
assert kernel.dtype in ("int4", "uint4", "int8", "uint8")
packed_out = hwnc_tensorcore_cuda(data, kernel, strides, padding, dilation, out_dtype)
return unpack_HWNCnc_to_hwnc(packed_out, out_dtype)
@autotvm.register_topi_compute("conv2d_HWNCnc_tensorcore.cuda")
def hwnc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype="int32"):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
in_dtype = Input.dtype
if in_dtype in ["int4", "uint4"]:
wmma_n = wmma_m = 8
wmma_k = 32
else:
wmma_m = 8
wmma_n = 32
wmma_k = 16
pre_computed = len(Filter.shape) == 6
in_height, in_width, batch, in_channels = get_const_tuple(Input.shape)
if pre_computed:
kernel_h, kernel_w, oc_chunk, _, oc_block_factor, _ = get_const_tuple(Filter.shape)
num_filter = oc_block_factor * oc_chunk
else:
kernel_h, kernel_w, num_filter, _ = get_const_tuple(Filter.shape)
if in_dtype in ["int4", "uint4"]:
assert batch % 8 == 0 and in_channels % 32 == 0 and num_filter % 8 == 0
else:
assert batch % 8 == 0 and in_channels % 16 == 0 and num_filter % 32 == 0, (
"The shape of (batch, in_channels, num_filter) "
"must be multiple of (8, 16, 32) for int8, "
"and (8, 32, 8) for int4"
)
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channels = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
cfg.add_flop(
2 * batch * out_height * out_width * out_channels * in_channels * kernel_h * kernel_w
)
# Input feature map: (H, W, N, IC, n, ic)
data_shape = (in_height, in_width, batch // wmma_m, in_channels // wmma_k, wmma_m, wmma_k)
# Kernel: (H, W, OC, IC, oc, ic)
kernel_shape = (
kernel_h,
kernel_w,
out_channels // wmma_n,
in_channels // wmma_k,
wmma_n,
wmma_k,
)
# Reduction axes
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // wmma_k), name="ic")
ii = te.reduce_axis((0, wmma_k), name="ii")
if pre_computed:
packed_kernel = Filter
else:
packed_kernel = te.compute(
kernel_shape,
lambda kh, kw, o, i, oo, ii: Filter[kh, kw, o * wmma_n + oo, i * wmma_k + ii],
name="packed_kernel",
)
packed_data = te.compute(
data_shape, lambda h, w, n, i, nn, ii: Input[h, w, n * wmma_m + nn, i * wmma_k + ii]
)
pad_before = [pad_top, pad_left, 0, 0, 0, 0]
pad_after = [pad_down, pad_right, 0, 0, 0, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
Conv = te.compute(
(out_height, out_width, batch // wmma_m, out_channels // wmma_n, wmma_m, wmma_n),
lambda h, w, n, o, nn, oo: te.sum(
(
pad_data[h * stride_h + kh, w * stride_w + kw, n, ic, nn, ii].astype("int32")
* packed_kernel[kh, kw, o, ic, oo, ii].astype("int32")
),
axis=[ic, kh, kw, ii],
),
name="Conv",
tag="conv2d_HWNCnc_tensorcore",
)
return Conv
def schedule_hwnc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
pad_data, packed_kernel = s[Conv].op.input_tensors
ic, kh, kw, ii = s[Conv].op.reduce_axis
packed_data = s[pad_data].op.input_tensors[0]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Designate the memory hierarchy
AS = s.cache_read(pad_data, "shared", [Conv])
WS = s.cache_read(packed_kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
out_dtype = Conv.dtype
if isinstance(packed_kernel.op, te.tensor.ComputeOp) and packed_kernel.name == "packed_kernel":
if autotvm.GLOBAL_SCOPE.in_tuning:
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
with Target("cuda"):
schedule_injective_from_existing(s, packed_kernel)
if isinstance(pad_data.op, te.tensor.ComputeOp) and "pad" in pad_data.op.tag:
s[pad_data].compute_inline()
data = pad_data.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[pad_data].pragma(s[pad_data].op.axis[0], "debug_skip_region")
else:
data = pad_data
s[data].compute_inline()
data_dtype = data.dtype
kernel_dtype = packed_kernel.dtype
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4, 8, 16])
cfg.define_knob("warp_col_tiles", [1, 2, 4, 8, 16])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("split_block_k_nums", [1, 2, 4, 8, 16, 32])
cfg.define_knob("vector_ws", [1, 8])
cfg.define_knob("vector_as", [1, 8, 16])
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
vector_as = cfg["vector_as"].val
vector_ws = cfg["vector_ws"].val
split_block_k_nums = cfg["split_block_k_nums"].val
s[packed_data].compute_inline()
if data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
wmma_m = 8
wmma_n = 32
wmma_k = 16
warp_size = 32
# Schedule for output
if len(s[output].op.axis) == 4:
(
hc,
wc,
nc,
oc,
) = output.op.axis
nc, nnc = s[output].split(nc, factor=wmma_m)
oc, ooc = s[output].split(oc, factor=wmma_n)
else:
hc, wc, nc, oc, nnc, ooc = output.op.axis
kernel_scope, hc = s[output].split(hc, nparts=1)
block_k = s[output].fuse(hc, wc)
block_k, split_block_k = s[output].split(block_k, factor=split_block_k_nums)
nc, nci = s[output].split(nc, factor=warp_row_tiles)
block_i, nc = s[output].split(nc, factor=block_row_warps)
oc, oci = s[output].split(oc, factor=warp_col_tiles)
block_j, oc = s[output].split(oc, factor=block_col_warps)
s[output].reorder(block_k, split_block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
t = s[output].fuse(nnc, ooc)
_, tx = s[output].split(t, factor=warp_size)
s[output].bind(block_k, block_z)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tx, thread_x)
s[output].bind(nc, thread_y)
s[output].bind(oc, thread_z)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
hc, wc, nc, oc, nnc, ooc = OL.op.axis
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule local computation
s[ConvF].compute_at(s[OL], oc)
_, _, n, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
cfg.define_reorder("reorder_inner", [ko, kh], policy="all")
cfg["reorder_inner"].apply(s, ConvF, [ko, kh])
cfg["reorder_inner"].apply(s, ConvF, [ki, kw])
# Move intermediate computation into each output compute tile
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
# Schedule for A's share memory
s[AS].compute_at(s[ConvF], ko)
_, _, n, _, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, _ = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, nparts=warp_size)
ti, _t = s[AS].split(ti, factor=vector_as)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(to, thread_x)
s[AS].vectorize(_t)
# Schedule for W's share memory
s[WS].compute_at(s[ConvF], kw)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, _ = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
ti, _t = s[WS].split(ti, factor=vector_ws)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
# double buffer
cfg.define_knob("AS_double_buffer", [0, 1])
cfg.define_knob("WS_double_buffer", [0, 1])
if cfg["AS_double_buffer"].val:
s[AS].double_buffer()
if cfg["WS_double_buffer"].val:
s[WS].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
shape = (wmma_m, wmma_n, wmma_k)
AS_shape = (wmma_m, wmma_k)
AL_shape = (wmma_m, wmma_k)
WS_shape = (wmma_n, wmma_k)
WL_shape = (wmma_n, wmma_k)
CL_shape = (wmma_m, wmma_n)
CS_shape = (wmma_m, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=data_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=kernel_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, jj: te.sum(
(AL_gemm[ii, k_gemm].astype("int32") * WL_gemm[jj, k_gemm].astype("int32")), axis=k_gemm
),
name="C",
)
AL_strides = [wmma_k, 1]
AS_strides = [wmma_k, 1]
WL_strides = [wmma_k, 1]
WS_strides = [wmma_k, 1]
CL_strides = [wmma_n, 1]
CS_strides = [wmma_n, 1]
s[AF].tensorize(
AF.op.axis[-2],
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, data_dtype
),
)
s[WF].tensorize(
WF.op.axis[-2],
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "col_major", WS_shape, WL_shape, kernel_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
return s
@autotvm.register_topi_schedule("conv2d_HWNCnc_tensorcore.cuda")
def schedule_conv2d_hwnc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_HWNCnc_tensorcore" in op.tag:
schedule_hwnc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=no-value-for-parameter
"""Int8 conv2d in NCHWc layout"""
import tvm
from tvm import te
from tvm import autotvm
from .injective import schedule_injective_from_existing
from .tensor_intrin import dp4a
from ..nn.pad import pad
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype="int32"):
"""Compute conv2d internally using conv2d_nchwc layout for int8 dtype"""
assert data.dtype in ("int8", "uint8")
assert kernel.dtype in ("int8", "uint8")
assert data.dtype == kernel.dtype
packed_out = conv2d_NCHWc_int8(data, kernel, strides, padding, dilation, "NCHW", out_dtype)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create schedule for tensors"""
return schedule_conv2d_NCHWc_int8(outs)
@autotvm.register_topi_compute("conv2d_NCHWc_int8.cuda")
def conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, layout, out_dtype):
"""Convolution operator in NCHW[x]c layout for int8.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width] or
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width] or
6-D with shape [num_filter_chunk, in_channel_chunk, filter_height,
filter_width, num_filter_block, in_channel_block]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
layout : str
layout of data
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
assert layout in ["NCHW", "NCHW4c"]
ic_block_factor = 4
oc_block_factor = 4
pre_computed = len(kernel.shape) == 6
if not pre_computed:
batch, channels, height, width = get_const_tuple(data.shape)
assert (
channels % ic_block_factor == 0
), "Number of input channels should be multiple of {}".format(ic_block_factor)
packed_data = te.compute(
(batch, channels // ic_block_factor, height, width, ic_block_factor),
lambda n, c, h, w, vc: data[n, c * ic_block_factor + vc, h, w],
name="packed_data",
)
out_channels, in_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
assert (
out_channels % oc_block_factor == 0
), "Number of output channels should be multiple of {}".format(oc_block_factor)
packed_kernel = te.compute(
(
out_channels // oc_block_factor,
in_channels // ic_block_factor,
kernel_h,
kernel_w,
oc_block_factor,
ic_block_factor,
),
lambda oc_chunk, ic_chunk, kh, kw, oc_block, ic_block: kernel[
oc_chunk * oc_block_factor + oc_block, ic_chunk * ic_block_factor + ic_block, kh, kw
],
name="packed_kernel",
)
else:
packed_data = data
packed_kernel = kernel
batch, ic_chunk, in_height, in_width, ic_block = get_const_tuple(packed_data.shape)
oc_chunk, ic_chunk, kernel_h, kernel_w, oc_block, ic_block = get_const_tuple(
packed_kernel.shape
)
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
oshape = (batch, oc_chunk, out_height, out_width, oc_block)
icc = te.reduce_axis((0, ic_chunk), name="ic_chunk")
icb = te.reduce_axis((0, ic_block), name="ic_block")
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
packed_kernel_dtype = packed_kernel.dtype
packed_dtype = "int32" if packed_kernel_dtype == "int8" else "uint32"
conv = te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: te.sum(
pad_data[
n, icc, oh * stride_h + kh * dilation_h, ow * stride_w + kw * dilation_w, icb
].astype(packed_dtype)
* packed_kernel[oc_chunk, icc, kh, kw, oc_block, icb].astype(packed_dtype),
axis=[icc, kh, kw, icb],
),
)
output = te.compute(
oshape,
lambda n, oc_chunk, oh, ow, oc_block: conv[n, oc_chunk, oh, ow, oc_block].astype(out_dtype),
tag="conv2d_NCHWc_int8",
)
# num flop
num_flop = (
batch
* oc_chunk
* oc_block
* out_height
* out_width
* ic_chunk
* ic_block
* kernel_h
* kernel_w
* 2
)
cfg.add_flop(num_flop)
return output
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.cuda")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Schedule conv2d int8 NCHWc template"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_NCHWc_int8":
_schedule_conv2d_NCHWc_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_conv2d_NCHWc_int8(cfg, s, output):
conv = output.op.input_tensors[0]
packed_data, packed_kernel = conv.op.input_tensors
if isinstance(packed_data.op, tvm.te.ComputeOp) and "pad" in packed_data.op.tag:
pad_data = packed_data
packed_data = pad_data.op.input_tensors[0]
else:
pad_data = packed_data
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[packed_data].pragma(s[packed_data].op.axis[0], "debug_skip_region")
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
if isinstance(packed_kernel.op, tvm.te.ComputeOp) and packed_kernel.name == "packed_kernel":
# data and kernel are not pre-computed, schedule layout transform here
schedule_injective_from_existing(s, packed_data)
schedule_injective_from_existing(s, packed_kernel)
if pad_data != packed_data:
s[pad_data].compute_inline()
# create cache stage
AA = s.cache_read(pad_data, "shared", [conv])
WW = s.cache_read(packed_kernel, "shared", [conv])
s[conv].set_scope("local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
# tile and bind spatial axes
if len(s[output].op.axis) == 5:
n, f, y, x, c = s[output].op.axis
else:
# For task extraction of auto-tuning, the expected output is 4D. Since auto-tuning tasks
# are created from scratch, therefore the real auto-tuning will still happen on 5D output.
n, f, y, x = s[output].op.axis
cfg.define_split("tile_n", cfg.axis(n), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, by, bx, vn, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile and bind reduction axes
n, f, y, x, c = s[conv].op.axis
rc, ry, rx, rc_block = s[conv].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=2)
cfg.define_split("tile_ry", cfg.axis(ry), num_outputs=2)
cfg.define_split("tile_rx", cfg.axis(rx), num_outputs=2)
rco, rci = cfg["tile_rc"].apply(s, conv, rc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x, c, rc_block)
cfg.define_reorder("reorder_inner", [rco, ryo, rxo], policy="all")
cfg["reorder_inner"].apply(s, conv, [rco, ryo, rxo])
cfg["reorder_inner"].apply(s, conv, [rci, ryi, rxi])
_, rc_block = s[conv].split(rc_block, factor=4)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (pad_data.dtype, packed_kernel.dtype)
s[conv].tensorize(rc_block, dp4a("shared", "shared", "local", dtypes))
cache_loc = [rco, ryo, rxo][cfg["reorder_inner"].perm[-1]]
s[AA].compute_at(s[conv], cache_loc)
s[WW].compute_at(s[conv], cache_loc)
# cooperative fetching
for load in [AA, WW]:
c = s[load].op.axis[-1]
c_outer, c = s[load].split(c, factor=4)
s[load].vectorize(c)
fused = s[load].op.axis[:-1] + [c_outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# double buffer
cfg.define_knob("AA_double_buffer", [0, 1])
cfg.define_knob("WW_double_buffer", [0, 1])
if cfg["AA_double_buffer"].val:
s[AA].double_buffer()
if cfg["WW_double_buffer"].val:
s[WW].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_nhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (
(batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0)
or (batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0)
), (
"The shape of (batch, in_channel, num_filter) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# convert data type of input feature maps and weights
# TODO: add checking here, datatype casting may cause precision loss
TransPaddedInput = te.compute(
PaddedInput.shape, lambda n, h, w, c: PaddedInput[n, h, w, c].astype("float16")
)
TransFilter = te.compute(Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype("float16"))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* TransFilter[ry, rx, rc, ff].astype(out_dtype),
axis=[ry, rx, rc],
),
name="Conv2dOutput",
tag="conv2d_nhwc_tensorcore",
)
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, "shared", [Conv])
WS = s.cache_read(kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if batch % 16 == 0 and out_channels % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_channels % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
elif batch % 32 == 0 and out_channels % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nhwc_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
# Schedule for output
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, t0, t1, jj: te.sum(
AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * WL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="C",
)
s[AF].tensorize(
nn,
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, in_dtype
),
)
s[WF].tensorize(
ii,
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "row_major", WS_shape, WL_shape, in_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with tensorcore for NCHW layout"""
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_tensorcore" in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
# pylint: disable=too-many-arguments,too-many-locals
# pylint: disable=too-many-statements
"""Winograd template for cuda backend"""
import tvm
from tvm import autotvm, te
from .. import nn
from ..nn.winograd_util import winograd_transform_matrices
from ..utils import get_const_int, get_const_tuple, traverse_inline
from .tensor_intrin import (
intrin_wmma_gemm,
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
)
def _infer_tile_size(data, kernel):
"""Compute the tile size"""
N, H, W, CI = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def schedule_bgemm_tensorcore(cfg, s, bgemm, data_pack, kernel_pack):
"""Schedule for bgemm tensorcore"""
A = data_pack
B = kernel_pack
C = bgemm
_, _, P, out_dim = get_const_tuple(C.shape)
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# Create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4, 8])
cfg.define_knob("warp_col_tiles", [1, 2, 4, 8])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 1, 2, 4, 8])
cfg.define_knob("offsetCS", [0, 1, 2, 4, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
# Ensure that the default parameters are applicable when autotvm is not in use
if P % 16 == 0 and out_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif P % 32 == 0 and out_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif P % 8 == 0 and out_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
warp_size = 32
wmma_k = 16
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offsetAB = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
wmma_m = cfg["wmma_m"].val
vec = cfg["vec"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offsetAB
BS_align = warp_col_tiles * block_col_warps * wmma_n + offsetAB
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_n * warp_col_tiles, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for computation
block_factor_b = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
alpha_1, alpha_2, b, o = C.op.axis
block_k = s[C].fuse(alpha_1, alpha_2)
block_i, bc = s[C].split(b, factor=block_factor_b)
block_j, oc = s[C].split(o, factor=block_factor_o)
s[C].reorder(block_k, block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_k, block_z)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
_, _, bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bb, oo, bbii, ooii, bbi, ooi)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
_, _, warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
_, _, b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
_, _, i, o = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(i, o, i_ii, o_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
_, _, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
in_dtype = "float16"
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=in_dtype)
BL_gemm = te.placeholder((wmma_k, wmma_n), name="BL_gemm", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# Lower the computation loops down to TensorCore hardware intrinsics
# by mapping the tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride, AS_stride, shape, "row_major", (wmma_m, wmma_k), (wmma_m, wmma_k), "float16"
),
)
s[BF].tensorize(
i_ii,
intrin_wmma_load_matrix_W(
BF_stride, BS_stride, shape, "row_major", (wmma_k, wmma_n), (wmma_k, wmma_n), "float16"
),
)
s[CF].tensorize(
_ii, intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape)
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
def schedule_bgemm_direct(cfg, s, bgemm, data_pack, kernel_pack):
"""Schedule for bgemm direct"""
b1, b2, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
# Create tuning space
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("offset_bgemm", [0, 1, 2, 4, 8])
cfg.define_knob("vector_bgemm", [1, 2, 4, 8])
offset_bgemm = cfg["offset_bgemm"].val
vector_bgemm = cfg["vector_bgemm"].val
C = bgemm
A0, B0 = kernel_pack, data_pack
# Designate the memory hierarchy
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
# Tile and bind spatial axes
b = s[bgemm].fuse(b1, b2)
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# Tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, b, y, x, rci)
s[AA].compute_at(s[OL], rco)
_, _, k, n = s[AA].op.axis
AA_align = offset_bgemm + cfg["tile_x"].size[1] * cfg["tile_x"].size[2] * cfg["tile_x"].size[3]
s[AA].storage_align(k, AA_align - 1, AA_align)
s[BB].compute_at(s[OL], rco)
_, _, m, k = s[BB].op.axis
BB_align = offset_bgemm + cfg["tile_rc"].size[1]
s[BB].storage_align(m, BB_align - 1, BB_align)
# Schedule for A and B shared memory load
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, ti = s[load].split(fused, factor=vector_bgemm)
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(ti)
def nhwc_winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, use_tensorcore, pre_computed
):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, H, W, CI = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"cuda winograd nhwc conv2d doesn't support dynamic \
input height or width."
)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # Kernel tensor is raw tensor, do strict check
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (dilation_h, dilation_w, 1, 1))
KH, KW, CI, CO = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# Kernel tensor is pre-transfomred. This op is created by conv2d_alter_op.
# Dilation is not supported
alpha, _, CI, CO = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
data_pad = nn.pad(
data,
(0, pt, pl, 0),
(0, pb, pr, 0),
name="data_pad",
attrs={"schedule_rule": "None"},
)
r = KW
m = tile_size
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# Determine whether the shape is available with tensorcore
shape_judge = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
if shape_judge and use_tensorcore:
trans_type = "float16"
else:
trans_type = data.dtype
# Compute transform matrix
A, _, _ = winograd_transform_matrices(m, r, out_dtype)
_, B, G = winograd_transform_matrices(m, r, data.dtype)
# Transform kernel
if not pre_computed:
# Check if we are currently tuning, if so we want to avoid counting
# prepacking in time costs. Just use a placeholder with the packed shape instead.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, CI, CO), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CI, CO),
lambda eps, nu, ci, co: te.sum(
(kernel[r_kh][r_kw][ci][co]) * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# Pack input tile
input_tile = te.compute(
(P, CI, alpha, alpha),
lambda p, c, eps, nu: data_pad[
idxdiv(p, (nH * nW)), idxmod(idxdiv(p, nW), nH) * m + eps, idxmod(p, nW) * m + nu, c
],
name="d",
attrs={"schedule_rule": "None"},
)
# Transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: te.sum(
input_tile[p][ci][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# Convert data type of input feature maps and weights for tensorcore
Transdata = te.compute(
data_pack.shape, lambda eps, nu, p, ci: data_pack[eps, nu, p, ci].astype(trans_type)
)
TransFilter = te.compute(
kernel_pack.shape, lambda eps, nu, ci, co: kernel_pack[eps, nu, ci, co].astype(trans_type)
)
# Do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, P, CO),
lambda eps, nu, p, co: te.sum(
(Transdata[eps][nu][p][ci]).astype(out_dtype)
* (TransFilter[eps][nu][ci][co]).astype(out_dtype),
axis=[ci],
),
name="bgemm",
)
# Inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(P, CO, m, m),
lambda p, co, vh, vw: te.sum(
bgemm[r_a][r_b][p][co] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# Output
output = te.compute(
(N, H, W, CO),
lambda n, h, w, co: inverse[
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), co, idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv2d_nhwc_winograd",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * H * W * CI * KH * KW)
return output
def data_weight_transform(s, data_trans, input_tile, thread_num_trans, offset_trans, trans_tag):
"""Schedule for data or kernel transform"""
kernel_align = thread_num_trans + offset_trans
indata_s = s.cache_read(input_tile, "shared", [data_trans])
data_l = s.cache_write(data_trans, "local")
# Schedule for data or kernel transform
eps, nu, p, c = s[data_trans].op.axis
block_x, thread_x = s[data_trans].split(c, thread_num_trans)
block_x = s[data_trans].fuse(p, block_x)
s[data_trans].reorder(block_x, thread_x, eps, nu)
s[data_trans].bind(thread_x, te.thread_axis("threadIdx.x"))
s[data_trans].bind(block_x, te.thread_axis("blockIdx.x"))
s[data_l].compute_at(s[data_trans], thread_x)
eps_l, nu_l, p_l, c_l = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
block_x_l, thread_x_l = s[data_l].split(c_l, thread_num_trans)
block_x_l = s[data_l].fuse(p_l, block_x_l)
s[data_l].reorder(block_x_l, thread_x_l, eps_l, nu_l, r_a, r_b)
for axis in [eps_l, nu_l, r_a, r_b]:
s[data_l].unroll(axis)
# Schedule for share memory load
s[indata_s].compute_at(s[data_l], block_x_l)
if trans_tag == "data":
p_is, c_is, eps_is, nu_is = s[indata_s].op.axis
data_align = (
get_const_int(eps_is.dom.extent) * get_const_int(nu_is.dom.extent) + offset_trans
)
s[indata_s].storage_align(c_is, data_align - 1, data_align)
block_x_is, thread_x_is = s[indata_s].split(c_is, thread_num_trans)
s[indata_s].bind(thread_x_is, te.thread_axis("threadIdx.x"))
else:
eps_is, nu_is, ci_is, co_is = s[indata_s].op.axis
s[indata_s].storage_align(nu_is, kernel_align - 1, kernel_align)
block_x_is, thread_x_is = s[indata_s].split(co_is, thread_num_trans)
s[indata_s].reorder(ci_is, block_x_is, eps_is, nu_is, thread_x_is)
s[indata_s].bind(thread_x_is, te.thread_axis("threadIdx.x"))
def schedule_nhwc_winograd_cuda(cfg, s, output, use_tensorcore, pre_computed):
"""Schedule winograd template"""
# Get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
Transdata, TransFilter = s[bgemm].op.input_tensors
data_pack = s[Transdata].op.input_tensors[0]
kernel_pack = s[TransFilter].op.input_tensors[0]
s[Transdata].compute_inline()
s[TransFilter].compute_inline()
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# Define the stride of intrin functions
cfg.define_knob("thread_num_inverse", [1, 32, 64, 128, 256])
cfg.define_knob("thread_num_data", [1, 32, 64, 128, 256])
cfg.define_knob("thread_num_kernel", [1, 32, 64, 128, 256])
cfg.define_knob("offset_inverse", [0, 2, 4])
cfg.define_knob("offset_data", [0, 1, 2, 4])
cfg.define_knob("offset_kernel", [0, 1, 2, 4])
cfg.define_knob("inverse_in_vector", [1, 2, 4])
thread_num_data = cfg["thread_num_data"].val
thread_num_kernel = cfg["thread_num_kernel"].val
thread_num_inverse = cfg["thread_num_inverse"].val
offset_data = cfg["offset_data"].val
offset_kernel = cfg["offset_kernel"].val
offset_inverse = cfg["offset_inverse"].val
inverse_in_vector = cfg["inverse_in_vector"].val
# Data transform
s[B].compute_inline()
data_weight_transform(s, data_pack, input_tile, thread_num_data, offset_data, trans_tag="data")
s[input_tile].compute_inline()
s[pad_data].compute_inline()
# Kernel transform
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
s[G].compute_inline()
data_weight_transform(
s, kernel_pack, kernel, thread_num_kernel, offset_kernel, trans_tag="kernel"
)
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
b1, b2, y, x = s[bgemm].op.axis
alpha = get_const_int(b1.dom.extent)
_, _, P, CI = get_const_tuple(Transdata.shape)
_, _, _, CO = get_const_tuple(TransFilter.shape)
# Determine whether the shape is available with tensorcore
shape_judge = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
if shape_judge and use_tensorcore:
schedule_bgemm_tensorcore(cfg, s, bgemm, Transdata, TransFilter)
else:
schedule_bgemm_direct(cfg, s, bgemm, Transdata, TransFilter)
# Schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
s[A].compute_inline()
inverse_s = s.cache_read(bgemm, "shared", [inverse])
m = alpha - 3 + 1
offset_inverse_in = offset_inverse
vector_width_inverse_in = inverse_in_vector
# Schedule for output
n, h, w, co = s[output].op.axis
ho, wo, hi, wi = s[output].tile(h, w, m, m)
s[output].reorder(n, ho, wo, co, hi, wi)
fused = s[output].fuse(n, ho, wo)
block_x_s, thread_x_s = s[output].split(co, thread_num_inverse)
block_x_s = s[output].fuse(fused, block_x_s)
s[output].reorder(block_x_s, thread_x_s, hi, wi)
if OL is not None:
s[OL].compute_inline()
# Schedule for inverse
s[inverse].compute_at(s[output], thread_x_s)
p_inv, co_inv, eps_inv, nu_inv = s[inverse].op.axis
block_x_inv, thread_x_inv = s[inverse].split(co_inv, thread_num_inverse)
r_a, r_b = s[inverse].op.reduce_axis
for axis in [eps_inv, nu_inv, r_a, r_b]:
s[inverse].unroll(axis)
# Schedule for share memory load
s[inverse_s].compute_at(s[output], block_x_s)
eps_inv_s, nu_inv_s, p_inv_s, co_inv_s = s[inverse_s].op.axis
inverse_in_align = offset_inverse_in + thread_num_inverse
s[inverse_s].storage_align(p_inv_s, inverse_in_align - 1, inverse_in_align)
block_x_inv_s, thread_x_inv_s = s[inverse_s].split(co_inv_s, thread_num_inverse)
block_x_inv_s = s[inverse_s].fuse(p_inv_s, block_x_inv_s)
s[inverse_s].reorder(block_x_inv_s, eps_inv_s, nu_inv_s, thread_x_inv_s)
t = s[inverse_s].fuse(eps_inv_s, nu_inv_s, thread_x_inv_s)
t, ti = s[inverse_s].split(t, factor=vector_width_inverse_in)
t, tx = s[inverse_s].split(t, factor=thread_num_inverse)
s[inverse_s].bind(tx, te.thread_axis("threadIdx.x"))
s[inverse_s].vectorize(ti)
s[output].bind(thread_x_s, te.thread_axis("threadIdx.x"))
s[output].bind(block_x_s, te.thread_axis("blockIdx.x"))
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_direct.cuda")
def conv2d_nhwc_winograd_direct(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=False,
pre_computed=False,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_direct.cuda")
def schedule_conv2d_nhwc_winograd_direct(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=False, pre_computed=False
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_tensorcore.cuda")
def conv2d_nhwc_winograd_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=True,
pre_computed=False,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_tensorcore.cuda")
def schedule_conv2d_nhwc_winograd_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=True, pre_computed=False
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_direct_without_weight_transform.cuda")
def conv2d_nhwc_winograd_direct_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=False,
pre_computed=True,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_direct_without_weight_transform.cuda")
def schedule_conv2d_nhwc_winograd_direct_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=False, pre_computed=True
)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda")
def conv2d_nhwc_winograd_tensorcore_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Compute conv2d with winograd for NHWC layout"""
return nhwc_winograd_cuda(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
use_tensorcore=True,
pre_computed=True,
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda")
def schedule_conv2d_nhwc_winograd_tensorcore_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_winograd" in op.tag:
schedule_nhwc_winograd_cuda(
cfg, s, op.output(0), use_tensorcore=True, pre_computed=True
)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv2d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm.contrib import cudnn
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from ..utils import get_const_tuple, traverse_inline
@autotvm.register_topi_compute("conv2d_transpose_nchw.cuda")
def conv2d_transpose_nchw(cfg, data, kernel, stride, padding, out_dtype, output_padding, groups=1):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : tuple of two ints
Used to disambiguate output shape.
groups : int
number of groups
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, inp_channels, inp_height, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_height, kernel_width = get_const_tuple(kernel.shape)
stride_height, stride_width = stride
outpad_height, outpad_width = output_padding
assert outpad_height < stride_height and outpad_width < stride_width
assert (
inp_channels % groups == 0
), f"input channels {inp_channels} must divide group size {groups}"
cfg.stride = stride
pad_top, pad_left, pad_bottom, pad_right = nn.get_pad_tuple(
padding, (kernel_height, kernel_width)
)
out_width = (inp_width - 1) * stride_width + kernel_width - pad_left - pad_right + outpad_width
pad_left = kernel_width - 1 - pad_left
pad_right = kernel_width - 1 - pad_right + outpad_width
dilated_width = stride_width * (inp_width - 1) + 1
out_height = (
(inp_height - 1) * stride_height + kernel_height - pad_top - pad_bottom + outpad_height
)
pad_top = kernel_height - 1 - pad_top
pad_bottom = kernel_height - 1 - pad_bottom + outpad_height
dilated_height = stride_height * (inp_height - 1) + 1
# compute pad
data = te.compute(
(
batch,
inp_channels,
pad_top + dilated_height + pad_bottom,
pad_left + dilated_width + pad_right,
),
lambda n, c, y, x: tvm.tir.if_then_else(
tvm.tir.all(
x >= pad_left,
x < pad_left + dilated_width,
tvm.tir.indexmod(x - pad_left, stride_width).equal(0),
y >= pad_top,
y < pad_top + dilated_height,
tvm.tir.indexmod(y - pad_top, stride_height).equal(0),
),
data[
n,
c,
tvm.tir.indexdiv(y - pad_top, stride_height),
tvm.tir.indexdiv(x - pad_left, stride_width),
],
tvm.tir.const(0.0, data.dtype),
),
name="data_pad",
)
# compute transposed conv
dc = te.reduce_axis((0, inp_channels // groups), name="dc")
dh = te.reduce_axis((0, kernel_height), name="dh")
dw = te.reduce_axis((0, kernel_width), name="dw")
data_out = te.compute(
(batch, out_channels * groups, out_height, out_width),
lambda b, c, h, w: te.sum(
data[b, c // out_channels * (inp_channels // groups) + dc, h + dh, w + dw].astype(
out_dtype
)
* kernel[
c // out_channels * (inp_channels // groups) + dc,
c % out_channels,
kernel_height - 1 - dh,
kernel_width - 1 - dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return data_out
@autotvm.register_topi_schedule("conv2d_transpose_nchw.cuda")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""TOPI Schedule callback for conv2d transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv2d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _fallback_schedule(N, F, Y, X):
# pylint: disable=unused-argument
# split N (batch dimension)
if N > 1:
cfg["tile_n"] = SplitEntity([-1, 1, 1, 4])
else:
cfg["tile_n"] = SplitEntity([1, 1, 1, 1])
# split F (output channel dimension)
if F > 1:
cfg["tile_f"] = SplitEntity([-1, 1, 4, 1])
# split Y (height dimension)
y_split_factor = 1
for candidate in range(5, 17):
if Y % candidate == 0:
y_split_factor = candidate
break
cfg["tile_y"] = SplitEntity([-1, 1, 1, y_split_factor])
# split X (width dimension)
x_split_factor = 1
for candidate in range(5, 17):
if X % candidate == 0:
x_split_factor = candidate
break
cfg["tile_x"] = SplitEntity([-1, x_split_factor, 1, 1])
# split RC (input channel dimension, which is a reduction axis)
cfg["tile_rc"] = SplitEntity([-1, 1, 16])
# other configurations
cfg["fuse_yx"] = OtherOptionEntity(False)
cfg["unroll_explicit"] = OtherOptionEntity(True)
cfg["auto_unroll_max_step"] = OtherOptionEntity(1500)
def _callback(op):
if op.tag == "conv2d_transpose_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc = s[conv].op.reduce_axis[0]
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
bs = pad_data.shape[0]
n_tuning_axis = n if isinstance(bs, tvm.tir.IntImm) else 1
cfg.define_split("tile_n", cfg.axis(n_tuning_axis), num_outputs=4)
cfg.define_split("tile_f", cfg.axis(f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4)
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=3)
cfg.define_knob("auto_unroll_max_step", [64, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
N, F, Y, X = get_const_tuple(conv.shape)
if not isinstance(N, int):
N = 1
_fallback_schedule(N, F, Y, X)
##### space definition end #####
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
s[pad_data].set_scope("shared")
AA = pad_data
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bf, by, bx, vn, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(bf, te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(s[output].fuse(ty, tx), te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rcm, ry, rx, rci, n, f, y, x)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(f, y, x)
tz, fused = s[load].split(fused, nparts=n_tz)
ty, fused = s[load].split(fused, nparts=n_ty)
tx, fused = s[load].split(fused, nparts=n_tx)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_transpose_cudnn(
x, w, stride, padding, out_dtype, output_padding=(0, 0), layout="NCHW", groups=1
):
"""Compute conv2d_tranpose using cudnn dgrad kernel"""
tensor_format = 0 if layout == "NCHW" else 1
return cudnn.conv_backward_data(
x,
w,
padding,
stride,
(1, 1),
1,
tensor_format,
out_dtype,
groups=groups,
output_padding=output_padding,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv2d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd template for cuda backend"""
import logging
import tvm
from tvm import autotvm, te
from .. import nn
from ..nn.conv2d import (
_conv2d_winograd_nchw_impl,
_conv2d_winograd_nhwc_impl,
conv2d_winograd_nchw,
conv2d_winograd_nhwc,
)
from ..nn.winograd_util import winograd_transform_matrices
from ..utils import get_const_int, get_const_tuple, traverse_inline
logger = logging.getLogger("conv2d_winograd")
def _infer_tile_size(data, kernel, layout="NCHW"):
if layout == "NCHW":
N, CI, H, W = get_const_tuple(data.shape)
else:
assert layout == "NHWC"
N, H, W, CI = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, CI, H, W = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"cuda winograd conv2d doesn't support dynamic input\
height or width."
)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
CO, CI, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# kernel tensor is pre-transfomred. this op is created by alter op layout.
# dilation is not supported
alpha, _, CI, CO = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
data_pad = nn.pad(
data,
(0, 0, pt, pl),
(0, 0, pb, pr),
name="data_pad",
)
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, CI, CO),
lambda eps, nu, ci, co: te.sum(
kernel[co][ci][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, P, alpha, alpha),
lambda c, p, eps, nu: data_pad[idxdiv(p, (nH * nW))][c][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
data_pack = te.compute(
(alpha, alpha, CI, P),
lambda eps, nu, ci, p: te.sum(
input_tile[ci][p][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, CO, P),
lambda eps, nu, co, p: te.sum(
kernel_pack[eps][nu][ci][co] * data_pack[eps][nu][ci][p], axis=[ci]
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
inverse = te.compute(
(CO, P, m, m),
lambda co, p, vh, vw: te.sum(
bgemm[r_a][r_b][co][p] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, H, W),
lambda n, co, h, w: inverse[
co, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv2d_nchw_winograd",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * H * W * CI * KH * KW)
return output
def schedule_winograd_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
eps, nu, c, p = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[data_l].unroll(axis)
eps, nu, c, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, ci, co = s[kernel_pack].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during pre-compute optimization pass
s[G].pragma(s[G].op.axis[0], "debug_skip_region")
s[kernel_pack].pragma(eps, "debug_skip_region")
else:
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(ci, co)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2)
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rci, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, h, w = s[output].op.axis
ho, wo, hi, wi = s[output].tile(h, w, m, m)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, p, vh, vw = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.cuda")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.cuda")
def schedule_conv2d_nchw_winograd(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nchw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd_without_weight_transform.cuda")
def conv2d_nchw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd_without_weight_transform.cuda")
def schedule_conv2d_nchw_winograd_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nchw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True)
traverse_inline(s, outs[0].op, _callback)
return s
@conv2d_winograd_nhwc.register(["cuda", "gpu"])
def conv2d_winograd_nhwc_cuda(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight, layout="NHWC")
return _conv2d_winograd_nhwc_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
@conv2d_winograd_nchw.register(["cuda", "gpu"])
def conv2d_winograd_nchw_cuda(
data,
weight,
strides,
padding,
dilation,
out_dtype,
pre_computed=False,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
):
"""Conv2D Winograd in NCHW layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight, layout="NCHW")
return _conv2d_winograd_nchw_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv3d with cuda backend"""
from tvm import te
from tvm import autotvm
from tvm.contrib import cudnn
from .. import nn, generic
from ..utils import get_const_tuple, traverse_inline
from .conv3d_direct import schedule_direct_conv3d_cuda
@autotvm.register_topi_compute("conv3d_ncdhw.cuda")
def conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Conv3D operator in NCDHW layout for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
return nn.conv3d_ncdhw(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ncdhw.cuda")
def schedule_conv3d_ncdhw(cfg, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw" in op.tag:
schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NCDHW", "conv3d_ncdhw.cuda")
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_ndhwc.cuda")
def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Conv3d operator in NDHWC layout for cuda backend.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_depth, in_height, in_width, in_channel]
Filter : tvm.te.Tensor
5-D with shape [filter_depth, filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel]
"""
return nn.conv3d_ndhwc(data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc.cuda")
def schedule_conv3d_ndhwc(cfg, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ndhwc" in op.tag:
schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NDHWC", "conv3d_ndhwc.cuda")
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_cudnn.cuda")
def conv3d_cudnn(
cfg, data, kernel, strides, padding, dilation, groups, layout="NCDHW", out_dtype="float32"
):
"""Conv3D operator for cuda backend.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
kernel : tvm.te.Tensor
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
layout : str
layout of data
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
if layout == "NCDHW":
tensor_format = 0 # CUDNN_TENSOR_NCHW
N, _, D, H, W = get_const_tuple(data.shape)
elif layout == "NDHWC":
tensor_format = 1 # CUDNN_TENSOR_NHWC
N, D, H, W, _ = get_const_tuple(data.shape)
else:
raise ValueError("Unsupported layout %s in cudnn" % layout)
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
assert groups == 1, "conv3d_cudnn does not support groups"
# handle dilation
stride_d, stride_h, stride_w = (
(strides, strides, strides) if isinstance(strides, int) else strides
)
pad_d, pad_h, pad_w = (padding, padding, padding) if isinstance(padding, int) else padding
dilation_d, dilation_h, dilation_w = (
(dilation, dilation, dilation) if isinstance(dilation, int) else dilation
)
OD = (D + 2 * pad_d - KD) // stride_d + 1
OH = (H + 2 * pad_h - KH) // stride_h + 1
OW = (W + 2 * pad_w - KW) // stride_w + 1
if isinstance(N, int):
cfg.add_flop(
2
* N
* OD
* OH
* OW
* CO
* CI
* ((KD - 1) * dilation_d + 1)
* ((KH - 1) * dilation_h + 1)
* ((KW - 1) * dilation_w + 1)
)
cfg.define_knob("algo", range(cudnn.algo_to_index("fwd", "CUDNN_CONVOLUTION_FWD_ALGO_COUNT")))
if cfg.is_fallback:
if cudnn.exists():
# Let CUDNN choose the best algo, based on benchmarks run
# on the local machine. In the future, this should be
# based on parameters stored in the Target.
cfg["algo"] = OtherOptionEntity(-1)
else:
cfg["algo"] = OtherOptionEntity(0)
return cudnn.conv_forward(
data,
kernel,
[pad_d, pad_h, pad_w],
[stride_d, stride_h, stride_w],
[dilation_d, dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=cfg["algo"].val,
conv_dtype=dtype,
)
@autotvm.register_topi_schedule("conv3d_cudnn.cuda")
def schedule_conv3d_cudnn(_, outs):
"""TOPI schedule callback of conv3d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
return generic.schedule_extern(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Conv3D alter op and legalize functions for cuda backend"""
import logging
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple
from .conv3d_winograd import _infer_tile_size
logger = logging.getLogger("topi")
@nn.conv3d_alter_layout.register(["cuda", "gpu"])
def _alter_conv3d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv3d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
if topi_tmpl == "conv3d_ncdhw_winograd.cuda":
if dilation != (1, 1, 1):
logger.warning("Does not support weight pre-transform for dilated 3D convolution.")
return None
assert data_layout == "NCDHW" and kernel_layout == "OIDHW"
N, CI, D, H, W = get_const_tuple(data.shape)
CO, _, KD, KH, KW = get_const_tuple(kernel.shape)
# Pre-compute weight transformation in winograd
tile_size = _infer_tile_size(tinfos[0], tinfos[1])
weight = relay.nn.contrib_conv3d_winograd_weight_transform(inputs[1], tile_size=tile_size)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operators (workload)
new_data = data
# Check if depth is transformed or not
if 2 < KD < 8 and KD == KH:
new_weight = te.placeholder(
(KD + tile_size - 1, KH + tile_size - 1, KW + tile_size - 1, CO, CI),
dtype=kernel.dtype,
)
else:
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, KD, CO, CI), dtype=kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
"conv3d_ncdhw_winograd_without_weight_transform.cuda",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv3d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d_direct.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The templates for cuda conv3d operators"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_direct_conv3d_cuda(cfg, s, conv, layout, workload_name):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
if layout == "NCDHW":
n, f, d, y, x = s[conv].op.axis
elif layout == "NDHWC":
n, d, y, x, f = s[conv].op.axis
else:
raise ValueError("not support this layout {} yet".format(layout))
rc, rd, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_d", d, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_rd", ry, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(target.kind.name, target.model, workload_name)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, d, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
bd, vd, td, di = cfg["tile_d"].apply(s, output, d)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].reorder(bf, bd, by, bx, vf, vd, vy, vx, tf, td, ty, tx, fi, di, yi, xi)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bd, by), te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vd, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(s[output].fuse(td, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, d, y, x = s[OL].op.axis
rc, rd, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
rdo, rdi = cfg["tile_rd"].apply(s, OL, rd)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, rdo, ryo, rxo, rci, rdi, ryi, rxi, n, f, d, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, d, y, x = s[load].op.axis
fused = s[load].fuse(n, f, d, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
td, fused = s[load].split(fused, nparts=cfg["tile_d"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(s[load].fuse(td, ty), te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OD, OH, OW = get_const_tuple(output.shape)
_, KD, KH, KW, CI = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OD * OH * OW * CO * CI * KD * KH * KW)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d_ndhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple3d
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def ndhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for conv3d tensorcore function"""
assert isinstance(stride, int) or len(stride) == 3
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
batch, in_depth, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_d, kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (
(batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0)
or (batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0)
or (batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0)
), (
"The shape of (batch, in_channel, num_filter) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
# compute the output shape
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_depth = simplify((in_depth - dilated_kernel_d + pad_front + pad_back) // stride_d + 1)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_front, pad_top, pad_left, 0]
pad_after = [0, pad_back, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
rz = te.reduce_axis((0, kernel_d), name="rz")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# convert data type of input feature maps and weights
# TODO: add checking here, datatype casting may cause precision loss
TransPaddedInput = te.compute(
PaddedInput.shape, lambda n, d, h, w, c: PaddedInput[n, d, h, w, c].astype("float16")
)
TransFilter = te.compute(
Filter.shape, lambda d, h, w, i, o: Filter[d, h, w, i, o].astype("float16")
)
Output = te.compute(
(batch, out_depth, out_height, out_width, out_channel),
lambda nn, zz, yy, xx, ff: te.sum(
TransPaddedInput[
nn,
zz * stride_d + rz * dilation_d,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
rc,
].astype(out_dtype)
* TransFilter[rz, ry, rx, rc, ff].astype(out_dtype),
axis=[rz, ry, rx, rc],
),
name="Conv3dOutput",
tag="conv3d_ndhwc_tensorcore",
)
return Output
def schedule_ndhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kd, kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, "shared", [Conv])
WS = s.cache_read(kernel, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, "shared", [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("shared")
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if batch % 16 == 0 and out_channels % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_channels % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
elif batch % 32 == 0 and out_channels % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv3d_ndhwc_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, 1, CS_align, 1])
# Schedule for output
nc, dc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(dc, hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, dc, hc, wc, oc = OL.op.axis
s[OL].reorder(dc, hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, d, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kd, kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, d, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kd, kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, d, h, w, i = AS.op.axis
s[AS].reorder(d, h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kd, kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name="A", dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name="B", dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(
CL_shape,
lambda ii, t0, t1, t2, jj: te.sum(
AL_gemm[ii, t0, t1, t2, k_gemm].astype(out_dtype)
* WL_gemm[k_gemm, jj].astype(out_dtype),
axis=k_gemm,
),
name="C",
)
s[AF].tensorize(
nn,
intrin_wmma_load_matrix_A(
AL_strides, AS_strides, shape, "row_major", AS_shape, AL_shape, in_dtype
),
)
s[WF].tensorize(
ii,
intrin_wmma_load_matrix_W(
WL_strides, WS_strides, shape, "row_major", WS_shape, WL_shape, in_dtype
),
)
s[OL].tensorize(
nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape)
)
s[ConvF].tensorize(
nnf,
intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape),
)
N, OD, OH, OW, CO = get_const_tuple(output.shape)
KD, KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OD * OH * OW * CO * CI * KD * KH * KW)
@autotvm.register_topi_compute("conv3d_ndhwc_tensorcore.cuda")
def conv3d_ndhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute conv3d with tensorcore for NDHWC layout"""
assert groups == 1, "tensorcore conv3d does not support groups"
return ndhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc_tensorcore.cuda")
def schedule_conv3d_ndhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ndhwc_tensorcore" in op.tag:
schedule_ndhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d_transpose_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Conv3d transpose template for cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple, traverse_inline
from .conv3d_direct import schedule_direct_conv3d_cuda
@autotvm.register_topi_compute("conv3d_transpose_ncdhw.cuda")
def conv3d_transpose_ncdhw(cfg, data, kernel, stride, padding, out_dtype, output_padding):
"""Transposed 3D convolution ncdhw forward operator.
Parameters
----------
cfg: ConfigEntity
The config for this template
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output type. This is used in mixed precision
output_padding : tuple of three ints
Used to disambiguate output shape
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, inp_channels, inp_depth, inp_height, inp_width = get_const_tuple(data.shape)
_, out_channels, kernel_depth, kernel_height, kernel_width = get_const_tuple(kernel.shape)
stride_depth, stride_height, stride_width = stride
outpad_depth, outpad_height, outpad_width = output_padding
assert (
outpad_height < stride_height
and outpad_width < stride_width
and outpad_depth < stride_depth
)
cfg.stride = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = nn.get_pad_tuple3d(
padding, (kernel_depth, kernel_height, kernel_width)
)
out_depth = (inp_depth - 1) * stride_depth + kernel_depth - pad_front - pad_back + outpad_depth
pad_front = kernel_depth - 1 - pad_front
pad_back = kernel_depth - 1 - pad_back
dilated_depth = stride_depth * (inp_depth - 1) + 1
out_width = (inp_width - 1) * stride_width + kernel_width - pad_left - pad_right + outpad_width
pad_left = kernel_width - 1 - pad_left
pad_right = kernel_width - 1 - pad_right
dilated_width = stride_width * (inp_width - 1) + 1
out_height = (
(inp_height - 1) * stride_height + kernel_height - pad_top - pad_bottom + outpad_height
)
pad_top = kernel_height - 1 - pad_top
pad_bottom = kernel_height - 1 - pad_bottom
dilated_height = stride_height * (inp_height - 1) + 1
# compute pad
data = te.compute(
(
batch,
inp_channels,
pad_front + dilated_depth + pad_back,
pad_top + dilated_height + pad_bottom,
pad_left + dilated_width + pad_right,
),
lambda n, c, d, y, x: tvm.tir.if_then_else(
tvm.tir.all(
x >= pad_left,
x < pad_left + dilated_width,
tvm.tir.indexmod(x - pad_left, stride_width).equal(0),
y >= pad_top,
y < pad_top + dilated_height,
tvm.tir.indexmod(y - pad_top, stride_height).equal(0),
d >= pad_front,
d < pad_front + dilated_depth,
tvm.tir.indexmod(d - pad_front, stride_depth).equal(0),
),
data[
n,
c,
tvm.tir.indexdiv(d - pad_front, stride_depth),
tvm.tir.indexdiv(y - pad_top, stride_height),
tvm.tir.indexdiv(x - pad_left, stride_width),
],
tvm.tir.const(0.0, "float32"),
),
name="data_pad",
)
# compute transposed conv
dc = te.reduce_axis((0, inp_channels), name="dc")
dd = te.reduce_axis((0, kernel_depth), name="dd")
dh = te.reduce_axis((0, kernel_height), name="dh")
dw = te.reduce_axis((0, kernel_width), name="dw")
data_out = te.compute(
(batch, out_channels, out_depth, out_height, out_width),
lambda b, c, d, h, w: te.sum(
data[b, dc, d + dd, h + dh, w + dw].astype(out_dtype)
* kernel[
dc, c, kernel_depth - 1 - dd, kernel_height - 1 - dh, kernel_width - 1 - dw
].astype(out_dtype),
axis=[dc, dd, dh, dw],
),
tag="conv3d_transpose_ncdhw",
)
return data_out
@autotvm.register_topi_schedule("conv3d_transpose_ncdhw.cuda")
def schedule_conv3d_transpose_ncdhw(cfg, outs):
"""TOPI Schedule callback for conv3d transpose operator.
Parameters
----------
cfg: ConfigEntity
The parameters for this template
outs: Array of Tensor
The computation graph description of conv3d transpose
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d transpose.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3d_transpose_ncdhw":
schedule_direct_conv3d_cuda(
cfg, s, op.output(0), "NCDHW", "conv3d_transpose_ncdhw.cuda"
)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/conv3d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd template for cuda backend"""
import logging
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_int, get_const_tuple, traverse_inline, simplify
from ..nn.winograd_util import winograd_transform_matrices
logger = logging.getLogger("conv3d_winograd")
def _infer_tile_size(data, kernel):
N, CI, D, H, W = get_const_tuple(data.shape)
if H % 8 == 0:
return 4
return 2
def winograd_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed):
"""Compute declaration for winograd"""
tile_size = _infer_tile_size(data, kernel)
N, CI, D, H, W = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = (strides, strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_d != 1 or dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_d, dilation_h, dilation_w))
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert DSTR == 1 and HSTR == 1 and WSTR == 1 and KD == KH and KH == KW
else:
# kernel tensor is pre-transformed. this op is created by alter op layout.
# dilation is not supported
alpha, _, _, CO, CI = get_const_tuple(kernel.shape)
KD = KH = KW = alpha + 1 - tile_size
assert (
DSTR == 1
and HSTR == 1
and WSTR == 1
and dilation_d == 1
and dilation_h == 1
and dilation_w == 1
)
pf, pt, pl, pb, pd, pr = nn.get_pad_tuple3d(padding, (KD, KH, KW))
data_pad = nn.pad(data, (0, 0, pf, pt, pl), (0, 0, pb, pd, pr), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
D = (D + pf + pb - KD) // DSTR + 1
H = (H + pt + pd - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nD, nH, nW = (D + m - 1) // m, (H + m - 1) // m, (W + m - 1) // m
P = N * nD * nH * nW
# transform kernel
if not pre_computed:
# Check if we are currently tuning, if so we want to avoid counting
# prepacking in time costs. Just use a placeholder with the packed shape instead.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, alpha, CO, CI), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kd = te.reduce_axis((0, KD), name="r_kd")
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, alpha, CO, CI),
lambda omg, eps, nu, co, ci: te.sum(
kernel[co][ci][r_kd][r_kh][r_kw] * G[omg][r_kd] * G[eps][r_kh] * G[nu][r_kw],
axis=[r_kd, r_kh, r_kw],
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, P, alpha, alpha, alpha),
lambda c, p, omg, eps, nu: data_pad[idxdiv(p, (nD * nH * nW))][c][
idxmod(idxdiv(p, nH * nW), nD) * m + omg
][idxmod(idxdiv(p, nW), nH) * m + eps][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
r_c = te.reduce_axis((0, alpha), "r_c")
data_pack = te.compute(
(alpha, alpha, alpha, CI, P),
lambda omg, eps, nu, ci, p: te.sum(
input_tile[ci][p][r_a][r_b][r_c] * B[r_a][omg] * B[r_b][eps] * B[r_c][nu],
axis=[r_a, r_b, r_c],
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, alpha, CO, P),
lambda omg, eps, nu, co, p: te.sum(
kernel_pack[omg][eps][nu][co][ci] * data_pack[omg][eps][nu][ci][p], axis=[ci]
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
r_c = te.reduce_axis((0, alpha), "r_c")
inverse = te.compute(
(CO, P, m, m, m),
lambda co, p, vd, vh, vw: te.sum(
bgemm[r_a][r_b][r_c][co][p] * A[r_a][vd] * A[r_b][vh] * A[r_c][vw], axis=[r_a, r_b, r_c]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, D, H, W),
lambda n, co, d, h, w: inverse[
co,
n * nD * nH * nW + idxdiv(d, m) * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m),
idxmod(d, m),
idxmod(h, m),
idxmod(w, m),
],
name="output",
tag="conv3d_ncdhw_winograd",
)
cfg.add_flop(2 * N * CO * D * H * W * CI * KD * KH * KW)
return output
def winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd without transforming depth"""
tile_size = _infer_tile_size(data, kernel)
N, CI, D, H, W = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_d = dilation_h = dilation_w = dilation
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = (strides, strides, strides) if isinstance(strides, int) else strides
if not pre_computed: # kernel tensor is raw tensor, do strict check
if dilation_d != 1 or dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_d, dilation_h, dilation_w))
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
# kernel tensor is pre-transfomred. this op is created by alter op layout.
# dilation is not supported
alpha, _, KD, CO, CI = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
pf, pt, pl, pb, pd, pr = nn.get_pad_tuple3d(padding, (KD, KH, KW))
data_pad = nn.pad(data, (0, 0, pf, pt, pl), (0, 0, pb, pd, pr), name="data_pad")
out_depth = simplify((D - KD + pf + pb) // DSTR + 1)
D += pf + pb
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pd - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
# transform kernel
if not pre_computed:
# During autotuning dont count kernel packing as a time cost
# as it will later be removed via alter_op_layout.
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_pack = te.placeholder(
(alpha, alpha, KD, CO, CI), dtype=kernel.dtype, name="kernel_pack"
)
else:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
kernel_pack = te.compute(
(alpha, alpha, KD, CO, CI),
lambda eps, nu, d, co, ci: te.sum(
kernel[co][ci][d][r_kh][r_kw] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
# pack input tile
input_tile = te.compute(
(CI, D, P, alpha, alpha),
lambda c, d, p, eps, nu: data_pad[idxdiv(p, (nH * nW))][c][d][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, CI, D, P),
lambda eps, nu, ci, d, p: te.sum(
input_tile[ci][d][p][r_a][r_b] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
rz = te.reduce_axis((0, KD), name="rz")
bgemm = te.compute(
(alpha, alpha, CO, out_depth, P),
lambda eps, nu, co, d, p: te.sum(
kernel_pack[eps][nu][rz][co][ci] * data_pack[eps][nu][ci][d * DSTR + rz][p],
axis=[ci, rz],
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(CO, out_depth, P, m, m),
lambda co, d, p, vh, vw: te.sum(
bgemm[r_a][r_b][co][d][p] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
)
# output
output = te.compute(
(N, CO, out_depth, H, W),
lambda n, co, d, h, w: inverse[
co, d, n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), idxmod(h, m), idxmod(w, m)
],
name="output",
tag="conv3d_ncdhw_winograd_without_depth",
)
cfg.add_flop(2 * N * CO * D * H * W * CI * KD * KH * KW)
return output
def schedule_winograd_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
omg, eps, nu, c, p = s[data_l].op.axis
r_a, r_b, r_c = s[data_l].op.reduce_axis
# TODO unrolling by omg, eps, nu may improve performance but
# in some cases causes extremely long build times due to imperfect tiling.
for axis in [r_a, r_b, r_c]:
s[data_l].unroll(axis)
omg, eps, nu, c, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, omg, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
omg, eps, nu, co, ci = s[kernel_pack].op.axis
s[G].compute_inline()
r_a, r_b, r_c = s[kernel_pack].op.reduce_axis
# Could add additional unrolling by omg, eps, nu in the future.
for axis in [r_a, r_b, r_c]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(co, ci)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, omg, eps, nu, r_a, r_b, r_c)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, b3, y, x = s[bgemm].op.axis
rc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b",
cfg.axis(alpha * alpha * alpha),
num_outputs=4,
filter=lambda x: x.size[-3:] == [1, 1, 1],
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2, b3)
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, b3, y, x = s[OL].op.axis
b = s[OL].fuse(b1, b2, b3)
(rc,) = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
s[OL].reorder(rco, rci, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, d, h, w = s[output].op.axis
do, di = s[output].split(d, m)
ho, hi = s[output].split(h, m)
wo, wi = s[output].split(w, m)
s[output].reorder(n, co, do, ho, wo, di, hi, wi)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, do, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, p, vd, vh, vw = s[inverse].op.axis
r_a, r_b, r_c = s[inverse].op.reduce_axis
# Could add additional unrolling of vd, vh, vw, in the future
for axis in [r_a, r_b, r_c]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
def schedule_winograd_no_depth_cuda(cfg, s, output, pre_computed):
"""Schedule winograd template"""
# get stages
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack = s[bgemm].op.input_tensors
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
data_l = s.cache_write(data_pack, "local")
eps, nu, c, d, p = s[data_l].op.axis
r_a, r_b = s[data_l].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[data_l].unroll(axis)
eps, nu, c, d, p = s[data_pack].op.axis
p, pi = s[data_pack].split(p, 1)
fused = s[data_pack].fuse(c, d, p)
bb, tt = s[data_pack].split(fused, 128)
s[data_pack].reorder(bb, tt, pi, eps, nu)
s[data_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tt, te.thread_axis("threadIdx.x"))
s[data_l].compute_at(s[data_pack], pi)
s[input_tile].compute_at(s[data_pack], pi)
s[pad_data].compute_inline()
# transform kernel
if not pre_computed and not autotvm.GLOBAL_SCOPE.in_tuning:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, kd, co, ci = s[kernel_pack].op.axis
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(kd, co, ci)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
##### space definition begin #####
b1, b2, z, y, x = s[bgemm].op.axis
# Combine channel and depth axes.
rc = s[bgemm].op.reduce_axis[0]
rz = s[bgemm].op.reduce_axis[1]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_b", cfg.axis(alpha * alpha), num_outputs=4, filter=lambda x: x.size[-3:] == [1, 1, 1]
)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_rz", rz, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 128, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
# batch gemm
C = bgemm
A0, B0 = kernel_pack, data_pack
OL = s.cache_write(C, "local")
AA = s.cache_read(A0, "shared", [OL])
BB = s.cache_read(B0, "shared", [OL])
b = s[bgemm].fuse(b1, b2)
# Allow two different tiling strategies as both seem
# to work best in different cases.
cfg.define_knob("unroll_axis", [0, 1])
# tile and bind spatial axes
bgemm_scope, b = s[bgemm].split(b, nparts=1)
bz, vz, tz, zi = cfg["tile_b"].apply(s, C, b)
by, vy, ty, yi = cfg["tile_y"].apply(s, C, z)
if cfg["unroll_axis"].val:
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, y)
else:
bx, vx, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(bz, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(vz, te.thread_axis("vthread"))
s[C].bind(vy, te.thread_axis("vthread"))
s[C].bind(vx, te.thread_axis("vthread"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].reorder(bgemm_scope, bz, by, bx, vz, vy, vx, tz, ty, tx, zi, yi, xi)
if cfg["unroll_axis"].val:
s[C].unroll(x)
else:
s[C].unroll(y)
# tile reduction axes
s[OL].compute_at(s[C], tx)
b1, b2, y1, y2, x = s[OL].op.axis
y = s[OL].fuse(y1, y2)
b = s[OL].fuse(b1, b2)
rc, rz = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
rzo, rzi = cfg["tile_rz"].apply(s, OL, rz)
s[OL].reorder(rco, rzo, rci, rzi, b, y, x)
s[AA].compute_at(s[OL], rco)
s[BB].compute_at(s[OL], rco)
# cooperative fetching
for load in [AA, BB]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_b"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[C].pragma(bgemm_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(bgemm_scope, "unroll_explicit", cfg["unroll_explicit"].val)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
m = alpha - 3 + 1
n, co, d, h, w = s[output].op.axis
do, di = s[output].split(d, m)
ho, hi = s[output].split(h, m)
wo, wi = s[output].split(w, m)
s[output].reorder(n, co, do, ho, wo, di, hi, wi)
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, do, ho, wo)
bb, tt = s[output].split(fused, 128)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
s[A].compute_inline()
co, d, p, vh, vw = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].compute_at(s[output], tt)
return s
@autotvm.register_topi_compute("conv3d_ncdhw_winograd.cuda")
def conv3d_ncdhw_winograd(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""Conv3d NCDHW using winograd optimization"""
assert groups == 1, "conv3d_ncdhw_winograd only supports a single group"
CO, CI, KD, KH, KW = get_const_tuple(kernel.shape)
# Check if we can transform depth.
if 2 < KD < 8 and KD == KH:
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
return winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv3d_ncdhw_winograd.cuda")
def schedule_conv3d_ncdhw_winograd(cfg, outs):
"""Dispatch to schedule approriate for conv3d winograd algorithm used."""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw_winograd_without_depth" in op.tag:
schedule_winograd_no_depth_cuda(cfg, s, op.output(0), pre_computed=False)
elif "conv3d_ncdhw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=False)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3d_ncdhw_winograd_without_weight_transform.cuda")
def conv3d_ncdhw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype
):
"""Conv3d NCDHW winograd without weight transform."""
assert (
groups == 1
), "conv3d_ncdhw_winograd_without_weight_transform does not support more than one group"
A, B, C, _, _ = get_const_tuple(kernel.shape)
# Check if we can transform depth.
if A == B == C:
return winograd_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
return winograd_without_depth_cuda(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv3d_ncdhw_winograd_without_weight_transform.cuda")
def schedule_conv3d_ncdhw_winograd_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv3d_ncdhw_winograd_without_depth" in op.tag:
schedule_winograd_no_depth_cuda(cfg, s, op.output(0), pre_computed=True)
elif "conv3d_ncdhw_winograd" in op.tag:
schedule_winograd_cuda(cfg, s, op.output(0), pre_computed=True)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/correlation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Correlation operators on CUDA"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("correlation_nchw.cuda")
def correlation_nchw(
cfg, data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
):
"""Correlation operator in NCHW layout.
Parameters
----------
data1 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
data2 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding : int or a list/tuple of 2 or 4 ints
Padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
is_multiply: bocorrelation
operation type is either multiplication or substraction
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
# pylint: disable=unused-argument
return nn.correlation_nchw(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
)
def _schedule_correlation_nchw(cfg, s, correlation):
"""Schedule correlation_nchw direct template"""
# pylint: disable=invalid-name
##### space definition begin #####
n, f, y, x = s[correlation].op.axis
rc, ry, rx = s[correlation].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
##### space definition end #####
padded_data1, padded_data2 = s[correlation].op.input_tensors
s[padded_data1].compute_inline()
s[padded_data2].compute_inline()
# create cache stage
s[correlation].set_scope("local")
AA = s.cache_read(padded_data1, "shared", [correlation])
BB = s.cache_read(padded_data2, "shared", [correlation])
output = s.outputs[0].output(0)
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[correlation].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[correlation].op.axis
rc, ry, rx = s[correlation].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, correlation, rc)
ryo, ryi = cfg["tile_ry"].apply(s, correlation, ry)
rxo, rxi = cfg["tile_rx"].apply(s, correlation, rx)
s[correlation].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[correlation], rxo)
s[BB].compute_at(s[correlation], rxo)
# cooperative fetching
for load in [AA, BB]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
@autotvm.register_topi_schedule("correlation_nchw.cuda")
def schedule_correlation_nchw(cfg, outs):
"""schedule of correlation_nchw for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of correlation
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for correlation.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "correlation_nchw":
_schedule_correlation_nchw(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/deformable_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Schedule template of deformable conv2d with cuda backend"""
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import traverse_inline
@autotvm.register_topi_compute("deformable_conv2d_nchw.cuda")
def deformable_conv2d_nchw(
cfg, data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
):
"""Deformable Conv2d."""
return nn.deformable_conv2d_nchw(
data, offset, kernel, strides, padding, dilation, deformable_groups, groups, out_dtype
)
@autotvm.register_topi_schedule("deformable_conv2d_nchw.cuda")
def schedule_deformable_conv2d_nchw(cfg, outs):
"""TOPI schedule callback of deformable conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "deformable_conv2d_nchw":
_schedule_direct_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_direct_cuda(cfg, s, conv):
"""Schedule template of deformable conv2d"""
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
data_deform, kernel = s[conv].op.input_tensors
s[data_deform].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(data_deform, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
cfg.define_reorder("reorder_inner", [rco, ryo, rxo], "all")
cfg["reorder_inner"].apply(s, OL, [rco, ryo, rxo])
cfg["reorder_inner"].apply(s, OL, [rci, ryi, rxi])
cache_loc = [rco, ryo, rxo][cfg["reorder_inner"].perm[-1]]
s[AA].compute_at(s[OL], cache_loc)
s[WW].compute_at(s[OL], cache_loc)
# cooperative fetching
for load in [AA, WW]:
fused = s[load].fuse(*s[load].op.axis)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
import logging
import tvm
from tvm import te
import tvm.autotvm as autotvm
from tvm.contrib import cublas
from .tensor_intrin import dp4a
from .. import tag
from .. import generic
from ..utils import traverse_inline, get_const_tuple
logger = logging.getLogger("topi")
def _matmul_cublas_common(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
assert len(tensor_a.shape) == 2 and len(tensor_b.shape) == 2, "only support 2-dim matmul"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = tensor_a.dtype
if out_dtype not in [tensor_a.dtype, "int32"]:
assert out_dtype == tensor_a.dtype, "Mixed precision other than int8 + int32 not supported."
batch, in_dim = get_const_tuple(tensor_a.shape)
out_dim, _ = get_const_tuple(tensor_b.shape)
matmul = cublas.matmul(tensor_a, tensor_b, transpose_a, transpose_b, dtype=out_dtype)
if all(isinstance(d, int) for d in [batch, in_dim, out_dim]):
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim), lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST
)
return matmul
@autotvm.register_topi_compute("matmul_cublas.cuda")
def matmul_cublas(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
"""Matmul operator on CUDA with CUBLAS"""
return _matmul_cublas_common(cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b)
@autotvm.register_topi_schedule("matmul_cublas.cuda")
def schedule_matmul_cublas(_, outs):
"""Schedule matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_cublas.cuda")
def dense_cublas(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on CUDA with CUBLAS. This is an alias of matmul_nt operator."""
return _matmul_cublas_common(cfg, data, weight, bias, out_dtype, False, True)
@autotvm.register_topi_schedule("dense_cublas.cuda")
def schedule_dense_cublas(_, outs):
"""Schedule dense operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_int8.cuda")
def dense_int8(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for int8 on CUDA"""
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(
data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=[k]
),
tag="dense_int8",
)
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
cfg.add_flop(batch * out_dim)
return matmul
@autotvm.register_topi_schedule("dense_int8.cuda")
def schedule_dense_int8(cfg, outs):
"""Dense schedule for int8 on CUDA"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_int8" in op.tag:
_schedule_dense_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_int8(cfg, s, output):
data, weight = s[output].op.input_tensors
if len(weight.op.input_tensors) == 1 and weight.op.input_tensors[0] == data:
s[weight].compute_inline()
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
in_dim_factor = 4
assert in_dim % in_dim_factor == 0, "Input dimension must divide {}".format(in_dim_factor)
if in_dim % 16 == 0:
in_dim_factor = 16
# create tuning space
cfg.define_split("tile_y", batch, num_outputs=4)
cfg.define_split("tile_x", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim // in_dim_factor, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
# create cache stage
AA = s.cache_read(data, "shared", [output])
WW = s.cache_read(weight, "shared", [output])
CC = s.cache_write(output, "local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
n, x = s[output].op.axis
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
ko = CC.op.reduce_axis[0]
ko, ki = s[CC].split(ko, factor=4)
ko, kt = cfg["tile_k"].apply(s, CC, ko)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (data.dtype, weight.dtype)
s[CC].tensorize(ki, dp4a("shared", "shared", "local", dtypes))
by, vy, ty, yi = cfg["tile_y"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(by, bx, vy, vx, ty, tx, yi, xi)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
s[CC].compute_at(s[output], tx)
yo, xo = CC.op.axis[:2]
s[CC].reorder(ko, kt, yo, xo, ki)
for load in [AA, WW]:
s[load].compute_at(s[CC], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=in_dim_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/dense_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Compute and Schedule definition for dense tensorcore with cuda backend"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
import tvm.autotvm as autotvm
from .. import tag
from ..utils import traverse_inline, get_const_tuple
from .tensor_intrin import (
intrin_wmma_load_matrix_A,
intrin_wmma_load_matrix_W,
intrin_wmma_store_matrix,
intrin_wmma_gemm,
)
@autotvm.register_topi_compute("dense_tensorcore.cuda")
def dense_tensorcore(cfg, data, weight, bias=None, out_dtype=None):
"""Dense tensorcore operator on CUDA"""
matmul = dense_tensorcore_cuda(data, weight, bias, out_dtype)
return matmul
@autotvm.register_topi_schedule("dense_tensorcore.cuda")
def schedule_dense_tensorcore(cfg, outs):
"""Schedule dense operator using Tensorcore"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense_tensorcore":
_schedule_dense_tensorcore(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def dense_tensorcore_cuda(data, weight, bias=None, out_dtype=None):
"""Dense tensorcore operator on CUDA"""
assert len(data.shape) == 2 and len(weight.shape) == 2, "only support 2-dim dense"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
assert data.dtype == weight.dtype
assert data.dtype in ["float16", "int8", "uint8", "int4", "uint4"]
if data.dtype in ["float16", "int8", "uint8"]:
assert (
(batch % 8 == 0 and in_dim % 16 == 0 and out_dim % 32 == 0)
or (batch % 16 == 0 and in_dim % 16 == 0 and out_dim % 16 == 0)
or (batch % 32 == 0 and in_dim % 16 == 0 and out_dim % 8 == 0)
), (
"The shape of (batch, in_dim, out_dim) "
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
)
else:
assert (
batch % 8 == 0 and in_dim % 32 == 0 and out_dim % 8 == 0
), "The shape of (batch, in_dim, out_dim) must be multiple of (8, 32, 8)"
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=k),
name="T_dense",
tag="dense_tensorcore",
)
if bias is not None:
matmul = te.compute(
(batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
return matmul
def _schedule_dense_tensorcore(cfg, s, C):
"""Schedule dense operator using Tensorcore"""
A, B = s[C].op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
batch, out_dim = get_const_tuple(C.shape)
data_dtype = A.dtype
out_dtype = C.dtype
# Explicit memory access
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
CS = s.cache_read(CF, "shared", [C])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "dense_tensorcore.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# Deal with op fusion, such as bias and relu
if C.op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
# create tuning space
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("offsetCS", [0, 8])
cfg.define_knob("vec", [1, 2, 4, 8])
if data_dtype in ["float16", "int8", "uint8"]:
# Ensure that the default parameters are applicable when autotvm is not in use
if batch % 32 == 0 and out_dim % 8 == 0:
cfg.define_knob("wmma_m", [32, 16, 8])
elif batch % 16 == 0 and out_dim % 16 == 0:
cfg.define_knob("wmma_m", [16, 8, 32])
elif batch % 8 == 0 and out_dim % 32 == 0:
cfg.define_knob("wmma_m", [8, 16, 32])
wmma_k = 16
wmma_m = cfg["wmma_m"].val
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
elif data_dtype in ["int4", "uint4"]:
wmma_m = wmma_n = 8
wmma_k = 32
else:
raise ValueError("data dtype %s is not yet supported" % data_dtype)
warp_size = 32
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
offsetCS = cfg["offsetCS"].val
vec = cfg["vec"].val
# Define the stride of intrin functions
AS_align = chunk * wmma_k + offset
BS_align = chunk * wmma_k + offset
CS_align = warp_col_tiles * block_col_warps * wmma_n + offsetCS
AS_stride = [AS_align, 1]
BS_stride = [BS_align, 1]
AF_stride = [wmma_k, 1]
BF_stride = [wmma_k, 1]
CF_stride = [warp_col_tiles * wmma_n, 1]
CS_stride = [CS_align, 1]
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
# Schedule for dense computation
block_factor_b = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
b, o = C.op.axis
block_i, bc = s[C].split(b, factor=block_factor_b)
block_j, oc = s[C].split(o, factor=block_factor_o)
s[C].reorder(block_i, block_j, bc, oc)
t = s[C].fuse(bc, oc)
t, vi = s[C].split(t, factor=vec)
t, tx = s[C].split(t, factor=warp_size)
t, ty = s[C].split(t, factor=block_row_warps)
t, tz = s[C].split(t, factor=block_col_warps)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(tz, thread_z)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].vectorize(vi)
# Schedule for wmma store
s[CS].compute_at(s[C], block_j)
bb, oo = CS.op.axis
s[CS].storage_align(bb, CS_align - 1, CS_align)
bb, bbi = s[CS].split(bb, factor=wmma_m)
oo, ooi = s[CS].split(oo, factor=wmma_n)
bb, bbii = s[CS].split(bb, factor=warp_row_tiles)
oo, ooii = s[CS].split(oo, factor=warp_col_tiles)
s[CS].reorder(bb, oo, bbii, ooii, bbi, ooi)
s[CS].bind(bb, thread_y)
s[CS].bind(oo, thread_z)
# Schedule for wmma computation
s[CF].compute_at(s[CS], oo)
warp_i, warp_j = CF.op.axis
warp_i, _ii = s[CF].split(warp_i, factor=wmma_m)
warp_j, _jj = s[CF].split(warp_j, factor=wmma_n)
(k,) = CF.op.reduce_axis
k, _k = s[CF].split(k, factor=wmma_k)
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _ii, _jj, _k)
# Schedule for wmma_matrix_a load
s[AF].compute_at(s[CF], ki)
b, i = AF.op.axis
b, b_ii = s[AF].split(b, factor=wmma_m)
i, i_jj = s[AF].split(i, factor=wmma_k)
s[AF].reorder(b, i, b_ii, i_jj)
# Schedule for wmma_matrix_b load
s[BF].compute_at(s[CF], ki)
o, i = BF.op.axis
o, o_ii = s[BF].split(o, factor=wmma_n)
i, i_ii = s[BF].split(i, factor=wmma_k)
s[BF].reorder(o, i, o_ii, i_ii)
# Schedule for A's(B's) shared memory load
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
t = s[stage].fuse(xo, yo)
t, vi = s[stage].split(t, factor=vec)
t, tx = s[stage].split(t, factor=warp_size)
t, ty = s[stage].split(t, factor=block_row_warps)
_, tz = s[stage].split(t, factor=block_col_warps)
s[stage].bind(ty, thread_y)
s[stage].bind(tz, thread_z)
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)
shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
BL_gemm = te.placeholder((wmma_n, wmma_k), name="BL_gemm", dtype=data_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k_gemm")
CL_compute = te.compute(
(wmma_m, wmma_n),
lambda ii, jj: te.sum(
AL_gemm[ii, k_gemm].astype(out_dtype) * BL_gemm[jj, k_gemm].astype(out_dtype),
axis=k_gemm,
),
name="CL_compute",
)
# lower the computation loops down to TensorCore hardware intrinsics
# by mapping the dense tensorcore to tensor intrinsics
s[AF].tensorize(
b_ii,
intrin_wmma_load_matrix_A(
AF_stride, AS_stride, shape, "row_major", (wmma_m, wmma_k), (wmma_m, wmma_k), data_dtype
),
)
s[BF].tensorize(
o_ii,
intrin_wmma_load_matrix_W(
BF_stride, BS_stride, shape, "col_major", (wmma_n, wmma_k), (wmma_n, wmma_k), data_dtype
),
)
s[CF].tensorize(
_ii, intrin_wmma_gemm(AL_gemm, BL_gemm, CL_compute, AF_stride, BF_stride, CF_stride, shape)
)
s[CS].tensorize(
bbi,
intrin_wmma_store_matrix(
CS_stride, CF_stride, shape, out_dtype, (wmma_m, wmma_n), (wmma_m, wmma_n)
),
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for depthwise_conv2d with auto fusion"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import traverse_inline
from .. import tag
from .. import nn
# register original implementation of depthwise_conv2d_nchw since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nchw.cuda")
def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout."""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.cuda")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
conv = op.output(0)
##### space definition begin #####
n, f, y, x = s[conv].op.axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 256, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "depthwise_conv2d_nchw.cuda"
)
cfg.fallback_with_reference_log(ref_log)
# TODO(lmzheng): A bug here, set unroll_explicit to False as workaround
cfg["unroll_explicit"].val = 0
##### space definition end #####
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
kernel_scope, n = s[output].split(n, nparts=1)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# cooperative fetching
s[AA].compute_at(s[output], bx)
s[WW].compute_at(s[output], bx)
s[AL].compute_at(s[output], tx)
s[WL].compute_at(s[output], tx)
for load in [AA, WW]:
fused = s[load].fuse(*list(s[load].op.axis))
fused, tx = s[load].split(fused, cfg["tile_x"].size[2])
fused, ty = s[load].split(fused, cfg["tile_y"].size[2])
fused, tz = s[load].split(fused, cfg["tile_f"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(temp, Filter, DepthwiseConv2d):
s[temp].compute_inline()
FS = s.cache_read(Filter, "shared", [DepthwiseConv2d])
if DepthwiseConv2d.op in s.outputs:
Output = DepthwiseConv2d
CL = s.cache_write(DepthwiseConv2d, "local")
else:
Output = outs[0].op.output(0)
s[DepthwiseConv2d].set_scope("local")
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
b, h, w, c = s[Output].op.axis
# make sure the size of our parallelism is not larger than the number of threads
num_thread = min(
tvm.arith.Analyzer().simplify(temp.shape[3]).value,
tvm.target.Target.current().max_num_threads,
)
xoc, xic = s[Output].split(c, factor=num_thread)
s[Output].reorder(xoc, b, h, w, xic)
xo, yo, _, _ = s[Output].tile(h, w, x_factor=2, y_factor=2)
fused = s[Output].fuse(yo, xo)
fused = s[Output].fuse(fused, b)
fused = s[Output].fuse(fused, xoc)
s[Output].bind(fused, block_x)
s[Output].bind(xic, thread_x)
if DepthwiseConv2d.op in s.outputs:
s[CL].compute_at(s[Output], xic)
else:
s[DepthwiseConv2d].compute_at(s[Output], xic)
_, _, ci, fi = s[FS].op.axis
s[FS].compute_at(s[Output], fused)
fused = s[FS].fuse(fi, ci)
s[FS].bind(fused, thread_x)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule depthwise_conv2d
if OP.tag == "depthwise_conv2d_nhwc":
PaddedInput = OP.input_tensors[0]
Filter = OP.input_tensors[1]
if isinstance(Filter.op, tvm.te.ComputeOp) and "dilate" in Filter.op.tag:
s[Filter].compute_inline()
DepthwiseConv2d = OP.output(0)
_schedule(PaddedInput, Filter, DepthwiseConv2d)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_depthwise_conv2d_backward_input_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc backward wrt input.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
backward wrt input in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d backward
wrt input with layout nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Padded_out_grad, In_grad):
s[Padded_out_grad].compute_inline()
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
_, h, w, c = In_grad.op.axis
fused_hwc = s[In_grad].fuse(h, w, c)
xoc, xic = s[In_grad].split(fused_hwc, factor=128)
s[In_grad].bind(xoc, block_x)
s[In_grad].bind(xic, thread_x)
def traverse(OP):
# inline all one-to-one-mapping operators except the last stage (output)
if OP.tag == "depthwise_conv2d_backward_input_nhwc":
Padded_out_grad = OP.input_tensors[0]
Dilated_out_grad = Padded_out_grad.op.input_tensors[0]
s[Dilated_out_grad].compute_inline()
In_grad = OP.output(0)
_schedule(Padded_out_grad, In_grad)
else:
raise ValueError("Depthwise conv backward wrt input for non-NHWC is not supported.")
traverse(outs[0].op)
return s
def schedule_depthwise_conv2d_backward_weight_nhwc(outs):
"""Schedule for depthwise_conv2d nhwc backward wrt weight.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
backward wrt weight in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d backward
wrt weight with layout nhwc.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Weight_grad):
block_x = te.thread_axis("blockIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_x = te.thread_axis("threadIdx.x")
db, dh, dw = Weight_grad.op.reduce_axis
fused_dbdhdw = s[Weight_grad].fuse(db, dh, dw)
_, ki = s[Weight_grad].split(fused_dbdhdw, factor=8)
BF = s.rfactor(Weight_grad, ki)
fused_fwcm = s[Weight_grad].fuse(*s[Weight_grad].op.axis)
xo, xi = s[Weight_grad].split(fused_fwcm, factor=32)
s[Weight_grad].bind(xi, thread_x)
s[Weight_grad].bind(xo, block_x)
s[Weight_grad].bind(s[Weight_grad].op.reduce_axis[0], thread_y)
s[BF].compute_at(s[Weight_grad], s[Weight_grad].op.reduce_axis[0])
def traverse(OP):
# inline all one-to-one-mapping operators except the last stage (output)
if OP.tag == "depthwise_conv2d_backward_weight_nhwc":
Padded_in = OP.input_tensors[1]
s[Padded_in].compute_inline()
Weight_grad = OP.output(0)
_schedule(Weight_grad)
else:
raise ValueError("Depthwise conv backward wrt weight for non-NHWC is not supported.")
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/group_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=no-value-for-parameter
"""The template for cuda group_conv2d_nchw"""
import tvm
from tvm import te
from tvm import autotvm
from .injective import schedule_injective_from_existing
from .tensor_intrin import dp4a
from ..nn.pad import pad
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.utils import get_pad_tuple
from ..utils import traverse_inline, get_const_tuple, get_const_int
from .. import nn
def group_conv2d_nchw_int8(data, kernel, strides, padding, dilation, groups, out_dtype="float32"):
"""Compute group_conv2d internally using group_conv2d_nchwc layout for int8 dtype"""
assert data.dtype in ("int8", "uint8")
assert kernel.dtype in ("int8", "uint8")
assert data.dtype == kernel.dtype
packed_out = group_conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, groups, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_group_conv2d_nchw_int8(outs):
"""Create schedule for tensors"""
return schedule_group_conv2d_NCHWc_int8(outs)
@autotvm.register_topi_compute("group_conv2d_nchw.cuda")
def group_conv2d_nchw(_, data, kernel, stride, padding, dilation, groups, out_dtype="float32"):
return nn.group_conv2d_nchw(data, kernel, stride, padding, dilation, groups, out_dtype)
@autotvm.register_topi_schedule("group_conv2d_nchw.cuda")
def schedule_group_conv2d_nchw(cfg, outs):
"""TOPI schedule callback of group conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for group conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "group_conv2d_nchw":
_schedule_group_conv2d_nchw_direct(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_group_conv2d_nchw_direct(cfg, s, conv):
"""Schedule group conv2d NCHW direct template"""
workload = conv.op.attrs["workload"]
groups = get_const_int(workload[6])
num_filters = get_const_int(conv.shape[1])
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_n", n, num_outputs=4)
cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2)
cfg.define_split("tile_f", cfg.axis(num_filters // groups), num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
g, f = s[output].split(f, nparts=groups)
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bg, vg = cfg["tile_g"].apply(s, output, g)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bg, bf), te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vg, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rc"].apply(s, OL, rc)
ryo, ryi = cfg["tile_rx"].apply(s, OL, ry)
rxo, rxi = cfg["tile_ry"].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
N, CO, OH, OW = get_const_tuple(output.shape)
_, CI_div_groups, KH, KW = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI_div_groups * KH * KW)
@autotvm.register_topi_compute("group_conv2d_NCHWc_int8.cuda")
def group_conv2d_NCHWc_int8(
cfg, data, kernel, stride, padding, dilation, groups, out_dtype="float32"
):
"""Group convolution operator for 'group_conv2d_NCHWc_int8'.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width] or
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel // groups, filter_height, filter_width] or
6-D with shape [num_filter_chunk, in_channel_chunk // groups, filter_height,
filter_width, num_filter_block, in_channel_block]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation : int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_height, out_width, out_channel_block]
"""
ic_block_factor = 4
oc_block_factor = 4
pre_computed = len(kernel.shape) == 6
if not pre_computed:
batch, channels, height, width = get_const_tuple(data.shape)
out_channels, in_channels, kernel_h, kernel_w = get_const_tuple(kernel.shape)
assert channels % groups == 0, "input channels must divide group size"
assert out_channels % groups == 0, "output channels must divide group size"
assert (
channels % ic_block_factor == 0
), "Number of input channels per group must divide {}".format(ic_block_factor)
assert (
out_channels % oc_block_factor == 0
), "Number of output channels per group must divide {}".format(oc_block_factor)
packed_data = te.compute(
(batch, channels // ic_block_factor, height, width, ic_block_factor),
lambda n, c, h, w, vc: data[n, c * ic_block_factor + vc, h, w],
name="packed_data",
)
packed_kernel = te.compute(
(
out_channels // oc_block_factor,
in_channels // ic_block_factor,
kernel_h,
kernel_w,
oc_block_factor,
ic_block_factor,
),
lambda oc_chunk, ic_chunk, kh, kw, oc_block, ic_block: kernel[
oc_chunk * oc_block_factor + oc_block, ic_chunk * ic_block_factor + ic_block, kh, kw
],
name="packed_kernel",
)
else:
packed_data = data
packed_kernel = kernel
batch, ic_chunk, in_height, in_width, _ = get_const_tuple(packed_data.shape)
oc_chunk, _, kernel_h, kernel_w, oc_block, ic_block = get_const_tuple(packed_kernel.shape)
# TODO(kumasento): these assertions ensure that the number of groups
# should be smaller or equal to the number of blocks, so that each
# group will have at least one block.
# Shall we pad the channels to avoid raising assertions?
assert (
groups <= oc_chunk
), "Number of groups {} should be less than " "output channel chunk size {}".format(
groups, oc_chunk
)
assert (
groups <= ic_chunk
), "Number of groups {} should be less than " "input channel chunk size {}".format(
groups, ic_chunk
)
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
# pad the input data
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
pad_data = pad(packed_data, pad_before, pad_after, name="pad_data")
# compute the output shape
out_height = (in_height - (kernel_h - 1) * dilation_h - 1 + pad_top + pad_down) // stride_h + 1
out_width = (in_width - (kernel_w - 1) * dilation_w - 1 + pad_left + pad_right) // stride_w + 1
oshape = (batch, oc_chunk, out_height, out_width, oc_block)
icc = te.reduce_axis((0, ic_chunk // groups), name="ic_chunk")
icb = te.reduce_axis((0, ic_block_factor), name="ic_block")
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
# NOTE(kumasento): explanation of this snippet -
# oc_chunk//groups and ic_chunk//groups give you the number of blocks,
# i.e., chunk, per group.
# occ is the ID of the output channel block, so that occ//(oc_chunk//groups)
# produces the ID of the group.
# Multiplying that result with ic_chunk//groups resulting in the ID
# of the beginning block of the corresponding input group.
# Adding the block offset (icc) will give you the exact block ID.
#
# Compared with a normal convolution, group convolution only sums
# input channels from the group that an output channel resides in.
conv = te.compute(
oshape,
lambda n, occ, oh, ow, ocb: te.sum(
pad_data[
n,
occ // (oc_chunk // groups) * (ic_chunk // groups) + icc,
oh * stride_h + kh * dilation_h,
ow * stride_w + kw * dilation_w,
icb,
].astype("int32")
* packed_kernel[occ, icc, kh, kw, ocb, icb].astype("int32"),
axis=[icc, kh, kw, icb],
),
)
# Type conversion
output = te.compute(
oshape, lambda *index: conv(*index).astype(out_dtype), tag="group_conv2d_NCHWc_int8"
)
num_flop = (
batch
* oc_chunk
* oc_block
* out_height
* out_width
* ic_chunk
* ic_block
* kernel_h
* kernel_w
* 2
// groups
)
cfg.add_flop(num_flop)
return output
@autotvm.register_topi_schedule("group_conv2d_NCHWc_int8.cuda")
def schedule_group_conv2d_NCHWc_int8(cfg, outs):
"""TOPI schedule callback of group conv2d for cuda gpu
Parameters
----------
cfg: ConfigEntity
The config for this template
outs: Array of Tensor
The computation graph description of conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for group conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "group_conv2d_NCHWc_int8":
_schedule_group_conv2d_NCHWc_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_group_conv2d_NCHWc_int8(cfg, s, output):
"""Schedule group conv2d int8 NCHWc template"""
workload = output.op.attrs["workload"]
groups = get_const_int(workload[6])
conv = output.op.input_tensors[0]
packed_data, packed_kernel = conv.op.input_tensors
if isinstance(packed_data.op, tvm.te.ComputeOp) and "pad" in packed_data.op.tag:
pad_data = packed_data
packed_data = pad_data.op.input_tensors[0]
else:
pad_data = packed_data
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make records accurate
# this part will be pre-computed during NNVM's pre-compute optimization pass
s[packed_data].pragma(s[packed_data].op.axis[0], "debug_skip_region")
s[packed_kernel].pragma(s[packed_kernel].op.axis[0], "debug_skip_region")
else:
if isinstance(packed_kernel.op, tvm.te.ComputeOp) and packed_kernel.name == "packed_kernel":
# data and kernel are not pre-computed, schedule layout transform here
schedule_injective_from_existing(s, packed_data)
schedule_injective_from_existing(s, packed_kernel)
if pad_data != packed_data:
s[pad_data].compute_inline()
# create cache stage
AA = s.cache_read(pad_data, "shared", [conv])
WW = s.cache_read(packed_kernel, "shared", [conv])
s[conv].set_scope("local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
oc_chunk = get_const_int(output.shape[1])
# tile and bind spatial axes
if len(s[output].op.axis) == 5:
n, f, y, x, c = s[output].op.axis
else:
# For task extraction of auto-tuning, the expected output is 4D. Since auto-tuning tasks
# are created from scratch, therefore the real auto-tuning will still happen on 5D output.
n, f, y, x = s[output].op.axis
cfg.define_split("tile_n", n, num_outputs=4)
cfg.define_split("tile_g", cfg.axis(groups), num_outputs=2)
cfg.define_split("tile_f", cfg.axis(oc_chunk // groups), num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
g, f = s[output].split(f, nparts=groups)
s[output].bind(n, te.thread_axis("blockIdx.z"))
bn, vn, tn, ni = cfg["tile_n"].apply(s, output, n)
bg, vg = cfg["tile_g"].apply(s, output, g)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(bn, bg, bf, by, bx, vn, vg, vf, vy, vx, tn, tf, ty, tx, ni, fi, yi, xi)
s[output].bind(bn, te.thread_axis("blockIdx.z"))
s[output].bind(s[output].fuse(bg, bf), te.thread_axis("blockIdx.y"))
s[output].bind(s[output].fuse(by, bx), te.thread_axis("blockIdx.x"))
s[output].bind(vn, te.thread_axis("vthread"))
s[output].bind(vg, te.thread_axis("vthread"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
cfg.define_knob("fuse_yx", [0, 1]) # fuse ty,tx or tn,tf
if cfg["fuse_yx"].val:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(tf, te.thread_axis("threadIdx.y"))
tyx = s[output].fuse(ty, tx)
s[output].bind(tyx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tyx)
# number of threads
n_tz = cfg["tile_n"].size[2]
n_ty = cfg["tile_f"].size[2]
n_tx = cfg["tile_y"].size[2] * cfg["tile_x"].size[2]
else:
s[output].bind(tn, te.thread_axis("threadIdx.z"))
s[output].bind(s[output].fuse(tn, tf), te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[conv].compute_at(s[output], tx)
# number of threads
n_tz = cfg["tile_n"].size[2] * cfg["tile_f"].size[2]
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
# tile and bind reduction axes
n, f, y, x, c = s[conv].op.axis
rc, ry, rx, rc_block = s[conv].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(rc), num_outputs=2)
cfg.define_split("tile_ry", cfg.axis(ry), num_outputs=2)
cfg.define_split("tile_rx", cfg.axis(rx), num_outputs=2)
rco, rci = cfg["tile_rc"].apply(s, conv, rc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x, c, rc_block)
_, rc_block = s[conv].split(rc_block, factor=4)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (pad_data.dtype, packed_kernel.dtype)
s[conv].tensorize(rc_block, dp4a("shared", "shared", "local", dtypes))
s[AA].compute_at(s[conv], rxo)
s[WW].compute_at(s[conv], rxo)
# cooperative fetching
for load in [AA, WW]:
c = s[load].op.axis[-1]
c_outer, c = s[load].split(c, factor=4)
s[load].vectorize(c)
fused = s[load].op.axis[:-1] + [c_outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
fused, tz = s[load].split(fused, factor=n_tz)
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
# double buffer
cfg.define_knob("AA_double_buffer", [0, 1])
cfg.define_knob("WW_double_buffer", [0, 1])
if cfg["AA_double_buffer"].val:
s[AA].double_buffer()
if cfg["WW_double_buffer"].val:
s[WW].double_buffer()
# unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable,
"""Schedule for composition of injective operator"""
import numpy as np
import tvm
from tvm import te
from .. import utils
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
def find_nearest_small_factor(num, target):
"""Find the nearest factor of the given number that is smaller than the target."""
for i in range(target, 0, -1):
if num % i == 0:
return i
# Unreachable because i=1 must hold.
return -1
fused = sch[out].fuse(*sch[out].op.axis)
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
max_block = 256
# Vectorize on fp16 data type to enable half2 for better memory bandwidth utilization.
vector_width = 2 if out.dtype == "float16" else 1
is_dynamic_output = False
for dim in out.shape:
if not isinstance(dim, tvm.tir.IntImm):
is_dynamic_output = True
break
out_len = utils.prod(out.shape)
try:
const_size = utils.get_const_int(out_len)
# Adjust block and thread to make sure they are dividable so that vectorize can be
# correctly applied.
if vector_width > 1 and const_size % vector_width == 0:
remain_total_size = const_size // vector_width
cand_sizes = []
for max_size in [num_thread, max_block]:
cand_sizes.append(
max_size
if remain_total_size % max_size == 0
else find_nearest_small_factor(remain_total_size, max_size)
)
remain_total_size //= cand_sizes[-1]
# If the product of candidate dividable (block * thread) is too small,
# then the performance may be worse even half2 is enabled. Note that 0.7
# is just a heuristic ratio and may not be optimal for all workloads.
if np.prod(cand_sizes) / (max_block * num_thread) >= 0.7:
num_thread, max_block = cand_sizes
need_block_split = const_size > max_block * num_thread * vector_width
except ValueError:
need_block_split = False
const_size = 0
if vector_width > 1:
fused, v = sch[out].split(fused, vector_width)
sch[out].vectorize(v)
if need_block_split:
xo, xi = sch[out].split(fused, factor=num_thread * max_block)
bx, tx = sch[out].split(xi, factor=num_thread)
sch[out].reorder(bx, tx, xo)
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
else:
# Use less threads for dynamic shape ops to avoid runtime error.
if is_dynamic_output:
num_thread //= 2
if const_size != 0 and const_size < num_thread:
bx, tx = sch[out].split(fused, factor=const_size)
else:
bx, tx = sch[out].split(fused, factor=num_thread)
sch[out].bind(tx, te.thread_axis("threadIdx.x"))
sch[out].bind(bx, te.thread_axis("blockIdx.x"))
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for out in outs:
if not utils.is_empty_shape(out.shape):
schedule_injective_from_existing(s, out)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/nms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison
# pylint: disable=bad-continuation, unused-argument
"""Non-maximum suppression operator"""
import tvm
from tvm import te
from tvm.contrib import nvcc
from tvm.contrib.thrust import can_use_thrust, can_use_rocthrust
from tvm.ir import register_intrin_lowering
from tvm.tir import if_then_else
from .sort import argsort, argsort_thrust
from .scan import exclusive_scan
from ..utils import ceil_div
from ..math import cast
from ..transform import reshape
from ..vision.nms_util import (
calculate_overlap,
binary_search,
collect_selected_indices,
collect_selected_indices_and_scores,
run_all_class_nms,
)
def cuda_atomic_add_rule(op):
if op.dtype == "float32":
return tvm.tir.call_pure_extern("float32", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "float64":
return tvm.tir.call_pure_extern("float64", "atomicAdd", op.args[0], op.args[1])
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomicAdd", op.args[0], op.args[1])
raise RuntimeError("only support int32, float32 and float64")
def opencl_atomic_add_rule(op):
if op.dtype == "int32":
return tvm.tir.call_pure_extern("int32", "atomic_add", op.args[0], op.args[1])
raise RuntimeError("only support int32")
register_intrin_lowering("tir.atomic_add", target="cuda", f=cuda_atomic_add_rule, level=99)
register_intrin_lowering("tir.atomic_add", target="opencl", f=opencl_atomic_add_rule, level=99)
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
def get_valid_boxes_ir(data, valid_boxes, score_threshold, id_index, score_index):
"""Low level IR to identify bounding boxes given a score threshold.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
score_threshold : Buffer or float32
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_boxes: Buffer
2D Buffer indicating valid boxes with shape [batch_size, num_anchors].
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_boxes = ib.buffer_ptr(valid_boxes)
if isinstance(score_threshold, float):
score_threshold = tvm.tir.FloatImm("float32", score_threshold)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
score = data[(i * num_anchors + j) * elem_length + score_index]
with ib.if_scope(
tvm.tir.all(
score > score_threshold,
tvm.tir.any(
id_index < 0, data[(i * num_anchors + j) * elem_length + id_index] >= 0
),
)
):
valid_boxes[i * num_anchors + j] = 1
with ib.else_scope():
valid_boxes[i * num_anchors + j] = 0
return ib.get()
def get_valid_counts_ir(data, valid_indices, valid_boxes, out, out_indices):
"""Low level IR to get valid count of bounding boxes
given a score threshold. Also prepares to move valid boxes to the
top of input data.
Parameters
----------
data : Buffer
Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length].
valid_indices: Buffer
2D Buffer of flag indicating valid data with shape [batch_size, num_anchors].
Returns
-------
out : Buffer
Sorted valid boxes
out_indices : Buffer
Incidices of valid boxes in original data
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
valid_indices = ib.buffer_ptr(valid_indices)
valid_boxes = ib.buffer_ptr(valid_boxes)
out = ib.buffer_ptr(out)
out_indices = ib.buffer_ptr(out_indices)
one = tvm.tir.const(1, dtype=out.dtype)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = num_anchors // max_threads + 1
nthread_by = batch_size
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + j) * elem_length + k] = -one
out_indices[i * num_anchors + j] = -1
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_anchors):
i = by
j = tid
with ib.if_scope(valid_boxes[i, tid] > 0):
with ib.for_range(0, elem_length) as k:
out[(i * num_anchors + valid_indices[i, tid]) * elem_length + k] = data[
(i * num_anchors + j) * elem_length + k
]
out_indices[i * num_anchors + valid_indices[i, tid]] = j
return ib.get()
def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : tvm.te.Tensor
Input data. 3-D tensor with shape [batch_size, num_anchors, elem_length].
score_threshold : optional, tvm.te.Tensor or float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor
Rearranged data tensor.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
valid_boxes_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_boxes_buf", data_alignment=8
)
valid_boxes = te.extern(
[(batch_size, num_anchors)],
[data],
lambda ins, outs: get_valid_boxes_ir(
ins[0], outs[0], score_threshold, id_index, score_index
),
dtype=["int32"],
in_buffers=[data_buf],
out_buffers=[valid_boxes_buf],
name="get_valid_boxes",
tag="get_valid_boxes_gpu",
)
valid_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "valid_indices_buf", data_alignment=8
)
valid_indices, valid_count = exclusive_scan(valid_boxes, axis=1, return_reduction=True)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out_indices_buf = tvm.tir.decl_buffer(
(batch_size, num_anchors), "int32", "out_buf", data_alignment=8
)
out, out_indices = te.extern(
[data.shape, (batch_size, num_anchors)],
[data, valid_indices, valid_boxes],
lambda ins, outs: get_valid_counts_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
dtype=["int32", data.dtype],
in_buffers=[data_buf, valid_indices_buf, valid_boxes_buf],
out_buffers=[out_buf, out_indices_buf],
name="get_valid_counts",
tag="get_valid_counts_gpu",
)
return [valid_count, out, out_indices]
def _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box_func,
on_new_invalidated_box_func,
needs_bbox_check_func,
calc_overlap_func,
out_scores,
num_valid_boxes,
):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_by = batch_size
nthread_tx = max_threads
# Some cuda architectures have smaller limit of 32K for cudaDevAttrMaxRegistersPerBlock
# vs 64K for most GPUs. Since this kernel uses many registers (around 35), the limit will
# be exceeded with 1024 threads.
target = tvm.target.Target.current(allow_none=False)
if target.kind.name == "cuda":
if nvcc.get_target_compute_version(target) in ["3.2", "5.3", "6.2"]:
nthread_tx = 512
by = te.thread_axis("blockIdx.y")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
num_valid_boxes_local = ib.allocate(
"int32", (1,), name="num_valid_boxes_local", scope="local"
)
num_valid_boxes_local[0] = 0
def nms_inner_loop(ib, i, j, nkeep):
# The box j is valid, invalidate other boxes that overlap with j above iou_threshold
on_new_valid_box_func(ib, tx, num_valid_boxes_local[0], i, j)
num_valid_boxes_local[0] += 1
num_iter_per_thread = ceil_div(nkeep - (j + 1), nthread_tx)
with ib.for_range(0, num_iter_per_thread, name="_k") as _k:
k = j + 1 + _k * nthread_tx + tx
with ib.if_scope(
tvm.tir.all(
k < nkeep,
out_scores[i, k] > 0, # is the box k still valid?
needs_bbox_check_func(i, j, k),
)
):
iou = calc_overlap_func(i, j, k)
with ib.if_scope(iou >= iou_threshold):
# invalidate the box k
out_scores[i, k] = -1.0
on_new_invalidated_box_func(i, k)
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
i = by
nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i])
max_output_size = if_then_else(max_output_size > 0, max_output_size, nkeep)
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Apply nms
# No need to do more iteration if we have already reached max_output_size boxes
box_idx = ib.allocate("int32", (1,), name="box_idx", scope="local")
box_idx[0] = 0
with ib.while_loop(
tvm.tir.all(box_idx[0] < nkeep, num_valid_boxes_local[0] < max_output_size)
):
# Proceed to the inner loop if the box with id box_idx is still valid
with ib.if_scope(out_scores[i, box_idx[0]] > -1.0):
nms_inner_loop(ib, i, box_idx[0], nkeep)
box_idx[0] += 1
with ib.if_scope(tx + 0 == 0):
num_valid_boxes[i] = num_valid_boxes_local[0]
with ib.else_scope():
num_valid_boxes[i] = 0
return ib.get()
def nms_ir(
data,
sorted_index,
valid_count,
indices,
out_bboxes,
out_scores,
out_class_ids,
out_features,
box_indices,
num_valid_boxes,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Low level IR routing for transform location in multibox_detection operator.
Parameters
----------
data : Buffer
Buffer of output boxes with class and score.
sorted_index : Buffer
Buffer of output box indexes sorted by score.
valid_count : Buffer
Buffer of number of valid output boxes.
indices : Buffer
indices in original tensor, with shape [batch_size, num_anchors],
represents the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the second
dimension are like the output of arange(num_anchors) if get_valid_counts
is not used before non_max_suppression.
out_bboxes : Buffer
Output buffer, to be filled with sorted box coordinates.
out_scores : Buffer
Output buffer, to be filled with sorted scores.
out_class_ids : Buffer
Output buffer, to be filled with sorted class ids.
box_indices : Buffer
A indices tensor mapping sorted indices to original indices
This is the first output of NMS when return_indices=True.
num_valid_boxes : Buffer
Record the number of boxes that have survived IOU tests.
This is the second output of NMS when return_indices=True.
max_output_size : int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : float
Overlapping(IoU) threshold to suppress object with smaller score.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
top_k : int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : int
Start index of the consecutive 4 coordinates.
id_index : int
index of the class categories, -1 to disable.
score_index : optional, int
Index of the scores/confidence of boxes.
return_indices : boolean
Whether to return box indices in input data.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
box_data_length = data.shape[2]
num_features = out_features.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
sorted_index = ib.buffer_ptr(sorted_index)
valid_count = ib.buffer_ptr(valid_count)
indices = ib.buffer_ptr(indices)
# outputs
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out_features = ib.buffer_ptr(out_features)
box_indices = ib.buffer_ptr(box_indices)
num_valid_boxes = ib.buffer_ptr(num_valid_boxes)
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.FloatImm("float32", iou_threshold)
top_k = tvm.tir.IntImm("int32", top_k)
coord_start = tvm.tir.IntImm("int32", coord_start)
id_index = tvm.tir.IntImm("int32", id_index)
score_index = tvm.tir.IntImm("int32", score_index)
force_suppress = tvm.tir.IntImm("int32", 1 if force_suppress else 0)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = by
base_src_idx = i * num_anchors * box_data_length
base_bbox_idx = i * num_anchors * 4
base_features_idx = i * num_anchors * num_features
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
# Reorder output
nkeep = if_then_else(
tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i]
)
j = bx * max_threads + tx
with ib.if_scope(j < nkeep):
src_idx = base_src_idx + sorted_index[i * num_anchors + j] * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = data[src_idx + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_idx + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_idx + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_idx + id_index]
with ib.else_scope():
# Indices > nkeep are discarded
# Only needed for return_indices = False case
if return_indices is False:
with ib.if_scope(j < num_anchors):
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[(base_bbox_idx + j * 4 + k)] = -1.0
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = -1.0
out_scores[i, j] = -1.0
if id_index >= 0:
out_class_ids[i, j] = -1.0
if return_indices:
with ib.if_scope(j < num_anchors):
box_indices[i * num_anchors + j] = -1
with ib.else_scope():
# Need to copy all boxes if not using return_indices
bounds = valid_count[i] if return_indices else num_anchors
with ib.if_scope(j < bounds):
src_offset = base_src_idx + j * box_data_length
with ib.for_range(0, 4, kind="unroll") as k:
out_bboxes[base_bbox_idx + j * 4 + k] = data[src_offset + coord_start + k]
with ib.for_range(0, num_features, kind="unroll") as k:
out_features[(base_features_idx + j * num_features + k)] = data[
src_offset + coord_start + 4 + k
]
out_scores[i * num_anchors + j] = data[src_offset + score_index]
if id_index >= 0:
out_class_ids[i * num_anchors + j] = data[src_offset + id_index]
box_indices[i * num_anchors + j] = j
if isinstance(max_output_size, int):
max_output_size = tvm.tir.const(max_output_size)
def calc_overlap(i, j, k):
offset_j = j * 4
offset_k = k * 4
base_bbox_idx = i * num_anchors * 4
return calculate_overlap(
out_bboxes,
base_bbox_idx + offset_j,
base_bbox_idx + offset_k,
)
def on_new_valid_box(ib, tid, num_current_valid_box, i, j):
# When return_indices is False, no need to populate box_indices
if return_indices:
with ib.if_scope(tid + 0 == 0):
orig_idx = sorted_index[i * num_anchors + j]
box_indices[i, num_current_valid_box] = indices[i, orig_idx]
def on_new_invalidated_box(i, k):
if return_indices is False and id_index >= 0:
out_class_ids[i, k] = -1.0
def needs_bbox_check(i, j, k):
return tvm.tir.any(
force_suppress > 0,
id_index < 0,
out_class_ids[i, k] == out_class_ids[i, j],
)
return _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box,
on_new_invalidated_box,
needs_bbox_check,
calc_overlap,
out_scores,
num_valid_boxes,
)
def _fetch_score_ir(data, score, axis):
"""
Fetch score from data.
This routine is required for dynamic shape nms.
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
elem_length = data.shape[2]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
score = ib.buffer_ptr(score)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = batch_size * num_anchors // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size * num_anchors):
score[tid] = data[tid * elem_length + axis]
return ib.get()
def _dispatch_sort(scores, ret_type="indices"):
target = tvm.target.Target.current()
if target and (
can_use_thrust(target, "tvm.contrib.thrust.sort")
or can_use_rocthrust(target, "tvm.contrib.thrust.sort")
):
return argsort_thrust(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
return argsort(scores, axis=1, is_ascend=False, dtype="int32", ret_type=ret_type)
def _get_sorted_indices(data, data_buf, score_index, score_shape):
"""Extract a 1D score tensor from the packed input and do argsort on it."""
score_buf = tvm.tir.decl_buffer(score_shape, data.dtype, "score_buf", data_alignment=8)
score_tensor = te.extern(
[score_shape],
[data],
lambda ins, outs: _fetch_score_ir(
ins[0],
outs[0],
score_index,
),
dtype=[data.dtype],
in_buffers=[data_buf],
out_buffers=[score_buf],
name="fetch_score",
tag="fetch_score",
)
return _dispatch_sort(score_tensor)
def _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
):
"""Run NMS using sorted scores."""
sort_tensor_buf = tvm.tir.decl_buffer(
sort_tensor.shape, sort_tensor.dtype, "sort_tensor_buf", data_alignment=8
)
valid_count_dtype = "int32"
valid_count_buf = tvm.tir.decl_buffer(
valid_count.shape, valid_count_dtype, "valid_count_buf", data_alignment=4
)
indices_buf = tvm.tir.decl_buffer(indices.shape, indices.dtype, "indices_buf", data_alignment=8)
batch_size = data.shape[0]
num_anchors = data.shape[1]
# Number of extra features per box beyond coords, score, and id.
num_features = data.shape[2] - 6 if id_index >= 0 else data.shape[2] - 5
# output shapes
bbox_shape = (batch_size, num_anchors, 4)
score_shape = (batch_size, num_anchors)
class_id_shape = score_shape
out_features_shape = (batch_size, num_anchors, num_features)
box_indices_shape = score_shape
num_valid_boxes_shape = (batch_size, 1)
return te.extern(
[
bbox_shape,
score_shape,
class_id_shape,
out_features_shape,
box_indices_shape,
num_valid_boxes_shape,
],
[data, sort_tensor, valid_count, indices],
lambda ins, outs: nms_ir(
ins[0],
ins[1],
ins[2],
ins[3],
outs[0], # sorted bbox
outs[1], # sorted scores
outs[2], # sorted class ids
outs[3], # sorted box feats
outs[4], # box_indices
outs[5], # num_valid_boxes
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
),
dtype=[data.dtype, "float32", "float32", "float32", "int32", "int32"],
in_buffers=[data_buf, sort_tensor_buf, valid_count_buf, indices_buf],
name="nms",
tag="nms",
)
def _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
out_shape,
coord_start,
score_index,
id_index,
):
"""Pack the results from NMS into a single 5D or 6D tensor."""
batch_size = out_bboxes.shape[0]
num_anchors = out_bboxes.shape[1]
num_features = out_features.shape[2]
def ir(out_bboxes, out_scores, out_class_ids, out):
ib = tvm.tir.ir_builder.create()
out_bboxes = ib.buffer_ptr(out_bboxes)
out_scores = ib.buffer_ptr(out_scores)
out_class_ids = ib.buffer_ptr(out_class_ids)
out = ib.buffer_ptr(out)
with ib.if_scope(num_anchors > 0):
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_anchors, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", batch_size)
tid = bx * nthread_tx + tx
i = by
with ib.if_scope(tid < num_anchors):
with ib.for_range(0, 4, kind="unroll") as j:
out[i, tid, coord_start + j] = out_bboxes[i, tid, j]
with ib.for_range(0, num_features, kind="unroll") as j:
out[i, tid, coord_start + 4 + j] = out_features[i, tid, j]
out[i, tid, score_index] = out_scores[i, tid]
if id_index >= 0:
out[i, tid, id_index] = out_class_ids[i, tid]
return ib.get()
return te.extern(
[out_shape],
[out_bboxes, out_scores, out_class_ids],
lambda ins, outs: ir(ins[0], ins[1], ins[2], outs[0]),
dtype=["float32"],
name="nms_output_concat",
tag="nms_output_concat",
)
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
The last dimension should be in format of
[class_id, score, box_left, box_top, box_right, box_bottom].
It could be the second output out_tensor of get_valid_counts.
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices : tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors], represents
the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the
second dimension are like the output of arange(num_anchors)
if get_valid_counts is not used before non_max_suppression.
max_output_size : optional, tvm.te.Tensor or int
Max number of output valid boxes for each instance.
By default all valid boxes are returned.
iou_threshold : optional, tvm.te.Tensor or float
Non-maximum suppression threshold.
force_suppress : optional, boolean
Whether to suppress all detections regardless of class_id.
top_k : optional, int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : required, int
Start index of the consecutive 4 coordinates.
score_index : optional, int
Index of the scores/confidence of boxes.
id_index : optional, int
index of the class categories, -1 to disable.
return_indices : boolean
Whether to return box indices in input data.
invalid_to_bottom : optional, boolean
Whether to move all valid bounding boxes to the top.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, elem_length].
Example
--------
.. code-block:: python
# An example to use nms
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count")
iou_threshold = 0.7
force_suppress = True
top_k = -1
out = non_max_suppression(data=data, valid_count=valid_count, iou_threshold=iou_threshold,
force_suppress=force_supress, top_k=top_k, return_indices=False)
np_data = np.random.uniform(dshape)
np_valid_count = np.array([4])
s = topi.generic.schedule_nms(out)
f = tvm.build(s, [data, valid_count, out], "cuda")
dev = tvm.cuda(0)
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_valid_count, tvm_out)
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
sort_tensor = _get_sorted_indices(data, data_buf, score_index, (data.shape[0], data.shape[1]))
out_bboxes, out_scores, out_class_ids, out_features, box_indices, num_valid_boxes = _run_nms(
data,
data_buf,
sort_tensor,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
id_index,
score_index,
return_indices,
)
if return_indices:
return [box_indices, num_valid_boxes]
return _concatenate_outputs(
out_bboxes,
out_scores,
out_class_ids,
out_features,
data.shape,
coord_start,
score_index,
id_index,
)
def _get_valid_box_count(scores, score_threshold):
batch_classes, num_boxes = scores.shape
def searchsorted_ir(scores, valid_count):
ib = tvm.tir.ir_builder.create()
scores = ib.buffer_ptr(scores)
valid_count = ib.buffer_ptr(valid_count)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
ib.scope_attr(bx, "thread_extent", ceil_div(batch_classes, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_classes):
binary_search(ib, tid, num_boxes, scores, score_threshold, valid_count)
return ib.get()
scores_buf = tvm.tir.decl_buffer(scores.shape, scores.dtype, "scores_buf", data_alignment=8)
return te.extern(
[(batch_classes,)],
[scores],
lambda ins, outs: searchsorted_ir(ins[0], outs[0]),
dtype=["int32"],
in_buffers=[scores_buf],
name="searchsorted",
tag="searchsorted",
)
def _collect_selected_indices_ir(num_class, selected_indices, num_detections, row_offsets, out):
batch_classes, num_boxes = selected_indices.shape
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
out = ib.buffer_ptr(out)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_classes
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[idy]):
out[row_offsets[idy] + idx, 0] = batch_id
out[row_offsets[idy] + idx, 1] = class_id
out[row_offsets[idy] + idx, 2] = cast(selected_indices[idy, idx], "int64")
return ib.get()
def _collect_selected_indices_and_scores_ir(
selected_indices,
selected_scores,
num_detections,
row_offsets,
num_total_detections,
collected_indices,
collected_scores,
):
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
selected_scores = ib.buffer_ptr(selected_scores)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
num_total_detections = ib.buffer_ptr(num_total_detections)
collected_indices = ib.buffer_ptr(collected_indices)
collected_scores = ib.buffer_ptr(collected_scores)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(num_boxes, nthread_tx)
nthread_by = batch_size * num_class
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
zero = cast(0, "int64")
with ib.new_scope():
idx = bx * nthread_tx + tx
idy = cast(by, "int64")
batch_id = idy // num_class
class_id = idy % num_class
with ib.if_scope(idx < num_detections[batch_id, class_id]):
offset = row_offsets[batch_id, class_id] + idx
collected_indices[batch_id, offset, 0] = class_id
collected_indices[batch_id, offset, 1] = cast(selected_indices[idy, idx], "int64")
collected_scores[batch_id, offset] = selected_scores[idy, idx]
with ib.else_scope():
with ib.if_scope(idx < num_boxes):
offset = (
num_total_detections[batch_id]
+ class_id * num_boxes
- row_offsets[batch_id, class_id]
+ idx
- num_detections[batch_id, class_id]
)
collected_indices[batch_id, offset, 0] = zero
collected_indices[batch_id, offset, 1] = zero
collected_scores[batch_id, offset] = 0.0
return ib.get()
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
scores: tvm.te.Tensor
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
score_threshold : float or tvm.te.Tensor, optional
Score threshold to filter out low score boxes early
output_format : str, optional
"onnx" or "tensorflow", see below
Returns
-------
out : list of tvm.te.Tensor
If `output_format` is "onnx", the output is two tensors. The first is `indices` of size
`(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor
`num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
batch, num_class, num_boxes = scores.shape
scores = reshape(scores, (batch * num_class, num_boxes))
sorted_scores, sorted_indices = _dispatch_sort(scores, ret_type="both")
valid_count = _get_valid_box_count(sorted_scores, score_threshold)
selected_indices, selected_scores, num_detections = run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_boxes_per_class,
iou_threshold,
_nms_loop,
return_scores=(output_format == "tensorflow"),
)
if output_format == "onnx":
row_offsets, num_total_detections = exclusive_scan(
num_detections, return_reduction=True, output_dtype="int64"
)
selected_indices = collect_selected_indices(
num_class, selected_indices, num_detections, row_offsets, _collect_selected_indices_ir
)
return [selected_indices, num_total_detections]
num_detections_per_batch = reshape(num_detections, (batch, num_class))
row_offsets, num_total_detections = exclusive_scan(
num_detections_per_batch, return_reduction=True, output_dtype="int64", axis=1
)
selected_indices, selected_scores = collect_selected_indices_and_scores(
selected_indices,
selected_scores,
num_detections_per_batch,
row_offsets,
num_total_detections,
_collect_selected_indices_and_scores_ir,
)
return [selected_indices, selected_scores, num_total_detections]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""scheduler functions for cuda backend"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from ..utils import traverse_inline
def schedule_lrn(outs):
"""Schedule for LRN
Parameters
----------
outs: Array of Tensor
The computation graph description of LRN
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
def _callback(op):
if "sqr_sum" in op.tag:
pad = op.input_tensors[0]
s[pad].compute_inline()
fused_axis = s[outs[0]].fuse(*s[outs[0]].op.axis)
bx, tx = s[outs[0]].split(fused_axis, factor=max_threads)
s[outs[0]].bind(bx, te.thread_axis("blockIdx.x"))
s[outs[0]].bind(tx, te.thread_axis("threadIdx.x"))
s[op].compute_at(s[outs[0]], tx)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for pooling operators"""
import tvm
from tvm import te
from .. import tag
from ..utils import traverse_inline
from .reduction import _schedule_reduce
from .injective import schedule_injective_from_existing
def schedule_adaptive_pool(outs, layout="NCHW"):
"""Schedule for adaptive_pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for adaptive_pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule_non_global(Pool):
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
fused_axis = s[Out].fuse(*s[Out].op.axis)
bx, tx = s[Out].split(fused_axis, factor=max_threads)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
else:
s[Pool].compute_at(s[Out], tx)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule global_pool
elif OP.tag.startswith("adaptive_pool"):
Pool = OP.output(0)
oshape = Pool.shape
if (layout == "NCHW" and oshape[2] == 1 and oshape[3] == 1) or (
layout == "NHWC" and oshape[1] == 1 and oshape[2] == 1
):
_schedule_reduce(OP, s)
if OP != outs[0].op:
# the final division for adaptive pool or fused elemwise ops
schedule_injective_from_existing(s, outs[0])
else:
_schedule_non_global(Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_pool(outs, layout):
"""Schedule for pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
s: Schedule
The computation schedule for pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, tvm.te.ComputeOp):
s[PaddedInput].compute_inline()
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
fused = s[Out].fuse(*s[Out].op.axis)
bx, tx = s[Out].split(fused, factor=num_thread)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
else:
s[Pool].compute_at(s[Out], tx)
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_pool_grad(outs):
"""Schedule for pool_grad on CUDA
Parameters
----------
outs: Array of Tensor
The computation graph description of pool_grad
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for pool_grad.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule_pool_grad(op):
if op in s.outputs:
out = op
else:
out = outs[0].op.output(0)
fused = s[out].fuse(*s[out].op.axis)
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
bx, tx = s[out].split(fused, factor=num_thread)
s[out].bind(bx, te.thread_axis("blockIdx.x"))
s[out].bind(tx, te.thread_axis("threadIdx.x"))
if tag.COMM_REDUCE_IDX in op.input_tensors[0].op.tag:
max_pool_index = op.input_tensors[0]
s[max_pool_index].compute_at(s[out], tx)
pool_input = max_pool_index.op.input_tensors[0]
if isinstance(pool_input.op, tvm.te.ComputeOp):
# handle padding
s[pool_input].compute_inline()
if op not in s.outputs:
s[op].compute_at(s[out], tx)
def _callback(op):
if op.tag.startswith("pool_grad"):
_schedule_pool_grad(op)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/rcnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Faster R-CNN and Mask R-CNN operators"""
from .proposal import proposal
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/rcnn/proposal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
from tvm import te
from ...vision.rcnn import generate_anchor, reg_bbox, reg_iou
from ...utils import get_const_tuple, get_const_int
def predict_bbox_ir(
cls_prob_buf,
bbox_pred_buf,
im_info_buf,
out_buf,
scales,
ratios,
feature_stride,
rpn_min_size,
iou_loss,
):
"""Predict bounding boxes based on anchors, scores and deltas.
Parameters
----------
cls_prob_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 4 * num_anchors, height, width]
im_info_buf : tvm.te.schedule.Buffer
2-D with shape [batch, 3]
out_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]
The last dimension is in format of [w_start, h_start, w_end, h_end, score]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_anchors, height, width = get_const_tuple(cls_prob_buf.shape)
num_anchors //= 2
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch * height * width) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
tid = bx * max_threads + tx
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
p_score = ib.buffer_ptr(cls_prob_buf)
p_delta = ib.buffer_ptr(bbox_pred_buf)
p_im_info = ib.buffer_ptr(im_info_buf)
p_out = ib.buffer_ptr(out_buf)
idxm = tvm.tir.indexmod
idxd = tvm.tir.indexdiv
with ib.if_scope(tid < batch * height * width):
w = idxm(tid, width)
h = idxm(idxd(tid, width), height)
b = idxd(idxd(tid, width), height)
for k in range(num_anchors):
out_index = tid * num_anchors + k
ratio = ratios[k // len(scales)]
scale = scales[k % len(scales)]
anchor = generate_anchor(ratio, scale, feature_stride)
im_height = p_im_info[b * 3]
im_width = p_im_info[b * 3 + 1]
x1 = anchor[0] + w * feature_stride
y1 = anchor[1] + h * feature_stride
x2 = anchor[2] + w * feature_stride
y2 = anchor[3] + h * feature_stride
delta = [
p_delta[((((b * num_anchors + k) * 4 + i) * height + h) * width + w)]
for i in range(4)
]
regression_func = reg_iou if iou_loss else reg_bbox
pred_x1, pred_y1, pred_x2, pred_y2 = regression_func(x1, y1, x2, y2, *delta)
pred_x1 = tvm.te.max(tvm.te.min(pred_x1, im_width - 1.0), 0.0)
pred_y1 = tvm.te.max(tvm.te.min(pred_y1, im_height - 1.0), 0.0)
pred_x2 = tvm.te.max(tvm.te.min(pred_x2, im_width - 1.0), 0.0)
pred_y2 = tvm.te.max(tvm.te.min(pred_y2, im_height - 1.0), 0.0)
real_height = (im_height / feature_stride).astype("int32")
real_width = (im_width / feature_stride).astype("int32")
bbox_w = pred_x2 - pred_x1 + 1.0
bbox_h = pred_y2 - pred_y1 + 1.0
min_size = p_im_info[b * 3 + 2] * rpn_min_size
pred_score = p_score[((b * num_anchors * 2 + num_anchors + k) * height + h) * width + w]
pred_score = tvm.tir.Select(
tvm.tir.any(h >= real_height, w >= real_width), -1.0, pred_score
)
p_out[out_index * 5 + 0] = pred_x1
p_out[out_index * 5 + 1] = pred_y1
p_out[out_index * 5 + 2] = pred_x2
p_out[out_index * 5 + 3] = pred_y2
p_out[out_index * 5 + 4] = pred_score
with ib.if_scope(tvm.tir.any(bbox_w < min_size, bbox_h < min_size)):
p_out[out_index * 5 + 0] -= min_size / 2.0
p_out[out_index * 5 + 1] -= min_size / 2.0
p_out[out_index * 5 + 2] += min_size / 2.0
p_out[out_index * 5 + 3] += min_size / 2.0
p_out[out_index * 5 + 4] = -1.0
return ib.get()
def argsort_ir(data_buf, out_index_buf):
"""Batched odd-even transposition sort.
Parameters
----------
data_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]
out_index_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Indices of data in sorted order.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox = get_const_tuple(data_buf.shape)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(data_buf)
index_out = ib.buffer_ptr(out_index_buf)
nthread_tx = max_threads
nthread_bx = (num_bbox + 1) // 2 // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("vthread")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "virtual_thread", nthread_bx)
tid = bx * nthread_tx + tx
temp_data = ib.allocate("float32", (1,), name="temp_data", scope="local")
temp_index = ib.allocate("int32", (1,), name="temp_index", scope="local")
idxm = tvm.tir.indexmod
with ib.for_range(0, batch, kind="unroll") as b:
start = b * num_bbox
for i in range(2):
bbox_id = tid * 2 + i
with ib.if_scope(bbox_id < num_bbox):
index_out[start + bbox_id] = bbox_id
with ib.for_range(0, num_bbox) as k:
offset = start + 2 * tid + idxm(k, 2)
with ib.if_scope(
tvm.tir.all(offset + 1 < num_bbox, p_data[offset] < p_data[offset + 1])
):
temp_data[0] = p_data[offset]
p_data[offset] = p_data[offset + 1]
p_data[offset + 1] = temp_data[0]
temp_index[0] = index_out[offset]
index_out[offset] = index_out[offset + 1]
index_out[offset + 1] = temp_index[0]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
return ib.get()
def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum suppression.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
nms_threshold : float
Non-maximum suppression threshold.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
w = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 2], out_tensor[box_b_idx + 2])
- tvm.te.max(out_tensor[box_a_idx], out_tensor[box_b_idx])
+ 1.0,
)
h = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 3], out_tensor[box_b_idx + 3])
- tvm.te.max(out_tensor[box_a_idx + 1], out_tensor[box_b_idx + 1])
+ 1.0,
)
i = w * h
u = (
(out_tensor[box_a_idx + 2] - out_tensor[box_a_idx] + 1.0)
* (out_tensor[box_a_idx + 3] - out_tensor[box_a_idx + 1] + 1.0)
+ (out_tensor[box_b_idx + 2] - out_tensor[box_b_idx] + 1.0)
* (out_tensor[box_b_idx + 3] - out_tensor[box_b_idx + 1] + 1.0)
- i
)
return i / u
batch, num_bbox = get_const_tuple(out_buf.shape)
max_threads = int(math.sqrt(tvm.target.Target.current(allow_none=False).max_num_threads))
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(sorted_bbox_buf)
p_out = ib.buffer_ptr(out_buf)
nthread_tx = max_threads
nthread_bx = num_bbox // max_threads + 1
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
i = bx * max_threads + tx
with ib.for_range(0, batch, kind="unroll", name="n") as b:
base_idx = b * num_bbox
with ib.if_scope(i < num_bbox):
p_out[base_idx + i] = False
with ib.for_range(0, num_bbox - 1) as l:
with ib.if_scope(tvm.tir.all(i < num_bbox, i > l, p_out[base_idx + l] == False)):
iou = calculate_overlap(p_data, (base_idx + l) * 5, (base_idx + i) * 5)
with ib.if_scope(iou > nms_threshold):
p_out[base_idx + i] = True
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
return ib.get()
def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf):
"""Copy output after applying nms to continuous memory.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
remove_mask_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox, _ = get_const_tuple(sorted_bbox_buf.shape)
rpn_post_nms_top_n = get_const_int(out_buf.shape[0]) // batch
nthread_tx = batch
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
ib.scope_attr(tx, "thread_extent", nthread_tx)
i = ib.allocate("int32", (1,), "i", scope="local")
i[0] = 0
p_sorted_bbox = ib.buffer_ptr(sorted_bbox_buf)
p_remove = ib.buffer_ptr(remove_mask_buf)
p_out = ib.buffer_ptr(out_buf)
b = tx
nkeep = ib.allocate("int32", (1,), "nkeep", scope="local")
nkeep[0] = 0 # number of bbox after nms
with ib.for_range(0, num_bbox) as j:
with ib.if_scope(p_remove[b * num_bbox + j] == False):
nkeep[0] += 1
with ib.if_scope(nkeep[0] > 0):
with ib.for_range(
0, te.ceil(tvm.tir.const(rpn_post_nms_top_n, "float32") / nkeep[0]).astype("int32")
):
with ib.for_range(0, num_bbox) as j:
offset_j = (b * num_bbox + j) * 5
offset_i = (b * rpn_post_nms_top_n + i[0]) * 5
with ib.if_scope(
tvm.tir.all(i[0] < rpn_post_nms_top_n, p_remove[(b * num_bbox + j)] == False)
):
p_out[offset_i] = tvm.tir.Cast("float32", b)
with ib.for_range(0, 4, kind="unroll") as k:
p_out[offset_i + k + 1] = p_sorted_bbox[offset_j + k]
i[0] = i[0] + 1
body = ib.get()
return body
def proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.te.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.te.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.te.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.te.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
batch, _, height, width = get_const_tuple(cls_prob.shape)
num_anchors = len(scales) * len(ratios)
num_bbox = height * width * num_anchors
rpn_pre_nms_top_n = min(rpn_pre_nms_top_n, num_bbox) if rpn_pre_nms_top_n > 0 else num_bbox
bbox = te.extern(
(batch, num_bbox, 5),
[cls_prob, bbox_pred, im_info],
lambda ins, outs: predict_bbox_ir(
ins[0], ins[1], ins[2], outs[0], scales, ratios, feature_stride, rpn_min_size, iou_loss
),
dtype=bbox_pred.dtype,
)
score = te.compute((batch, num_bbox), lambda b, i: bbox[b, i, 4], tag="bbox_score")
sorted_index = te.extern(
[score.shape], [score], lambda ins, outs: argsort_ir(ins[0], outs[0]), dtype="int32"
)
sorted_bbox = te.compute(
(batch, rpn_pre_nms_top_n, 5),
lambda b, i, j: bbox[b, sorted_index[b, i], j],
tag="sorted_bbox",
)
nms_remove_mask = te.extern(
(batch, rpn_pre_nms_top_n),
[sorted_bbox],
lambda ins, outs: nms_ir(ins[0], outs[0], threshold),
dtype="bool",
)
nms_out = te.extern(
(batch * rpn_post_nms_top_n, 5),
[sorted_bbox, nms_remove_mask],
lambda ins, outs: prepare_output_ir(ins[0], ins[1], outs[0]),
dtype=sorted_bbox.dtype,
)
return nms_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,too-many-locals,len-as-condition
"""Schedule for reduce operators"""
from __future__ import absolute_import as _abs
from operator import mul
from functools import reduce
import tvm
from tvm import te
from .. import tag
from .injective import schedule_injective_from_existing
def _schedule_reduce(op, sch, is_idx_reduce=False):
if is_idx_reduce:
data_out = op.input_tensors[0]
else:
data_in = op.input_tensors[0]
data_out = op.output(0)
if not sch[data_out].op.reduce_axis:
return schedule_injective_from_existing(sch, op.output(0))
if len(sch[data_out].op.axis) > 0:
all_reduce = False
num_thread = 32
target = tvm.target.Target.current()
if target and (target.kind.name == "opencl" or target.kind.name == "metal"):
# without it, CL_INVALID_WORK_GROUP_SIZE occurred when running test_topi_reduce.py
# don't know why
num_thread = 16
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
else:
all_reduce = True
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
# Fuse and refactor the reduce axis
fused_reduce = sch[data_out].fuse(
*[sch[data_out].op.reduce_axis[i] for i in range(len(sch[data_out].op.reduce_axis))]
)
ko, ki = sch[data_out].split(fused_reduce, factor=num_thread)
if is_idx_reduce:
data_out_rf, _ = sch.rfactor(data_out, ki)
else:
data_out_rf = sch.rfactor(data_out, ki)
tx = sch[data_out].op.reduce_axis[0]
sch[data_out].bind(tx, thread_x)
sch[data_out_rf].compute_at(sch[data_out], tx)
if is_idx_reduce:
real_output = op.output(0)
temp_idx_input = data_out.op.output(0)
temp_val_input = data_out.op.output(1)
else:
real_output = data_out
if not all_reduce:
# Fuse and split the axis
fused_outer = sch[real_output].fuse(
*[sch[real_output].op.axis[i] for i in range(len(sch[real_output].op.axis))]
)
bx, outer_in = sch[real_output].split(fused_outer, factor=num_thread)
# Bind the axes to threads and blocks
sch[real_output].bind(outer_in, thread_y)
sch[real_output].bind(bx, block_x)
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[real_output], outer_in)
sch[temp_val_input].compute_at(sch[real_output], outer_in)
sch[real_output].set_store_predicate(
tvm.tir.all(
thread_x.equal(0), block_x * num_thread + thread_y < reduce(mul, real_output.shape)
)
)
else:
if is_idx_reduce:
spatial_axis = sch[real_output].fuse(*(sch[real_output].op.axis))
sch[real_output].bind(spatial_axis, te.thread_axis("blockIdx.x"))
sch[temp_idx_input].compute_at(sch[real_output], spatial_axis)
sch[temp_val_input].compute_at(sch[real_output], spatial_axis)
sch[real_output].set_store_predicate(thread_x.equal(0))
return sch
def _enable_auto_inline(sch):
def is_scheduled(stage):
# auto inline requires the attach type is AttachType.kGroupRoot
conds = [
len(stage.relations) == 0,
stage.attach_type == 1,
stage.all_iter_vars == stage.leaf_iter_vars,
]
if not all(conds):
return True
return False
for s in sch.stages:
if not s.is_output and isinstance(s.op, tvm.te.ComputeOp):
if is_scheduled(s) or len(s.op.reduce_axis) != 0:
return False
return True
def schedule_reduce_impl(outs, schedule_reduce_stage, schedule_injective_stage):
"""Schedule for inject->reduce->bcast ops.
Traverse over the stages in the schedule and schedule separate stages depending
on the position of the stage. Injecteve post-ops of reduction will be scheduled using
injection schedule, injective pre-ops of reduction will be inlined, reduction stage
will be scheduled using reduction schedule
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce in the format
of an array of tensors.
schedule_reduce_stage: Function responsible for scheduling the reduction
stage
schedule_injective_stage: Function responsible for scheduling the
standalone injection stage
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
scheduled_ops = []
enable_auto_inline = _enable_auto_inline(sch)
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.te.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
schedule_injective_stage(sch, operator.output(0))
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
if enable_auto_inline:
traverse_before_reduce(tensor.op)
else:
traverse_after_reduce(tensor.op)
elif operator.tag == "comm_reduce":
if operator not in scheduled_ops:
schedule_reduce_stage(operator, sch, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == "comm_reduce_idx":
if operator not in scheduled_ops:
schedule_reduce_stage(operator, sch, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.te.PlaceholderOp):
pass
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
for out in outs:
traverse_after_reduce(out.op)
return sch
def schedule_reduce(outs):
return schedule_reduce_impl(outs, _schedule_reduce, schedule_injective_from_existing)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
"Scan related operators"
from typing import Callable, Optional, Union
import tvm
from tvm import te
from tvm.contrib.thrust import can_use_rocthrust, can_use_thrust
from .. import tag
from ..math import cast, ceil_log2
from ..transform import expand_dims, reshape, squeeze, transpose
from ..utils import ceil_div, get_const_int, prod, swap
from .injective import schedule_injective_from_existing
def _get_thrust_func_name(tvmop):
tvmop_to_thrust_func_name = {tvm.tir.generic.add: "tvm.contrib.thrust.sum_scan"}
assert tvmop in tvmop_to_thrust_func_name, "{} not supported by thrust".format(tvmop)
return tvmop_to_thrust_func_name[tvmop]
def exclusive_scan_ir(data, output, reduction=None, binop=tvm.tir.generic.add, identity_value=0):
"""Low level IR to do exclusive sum scan along rows of 2D input.
Parameters
----------
data : Buffer
Input N-D Buffer. Scan is done over the innermost axis.
output: Buffer
A buffer to store the output scan, of the same shape as data
reduction: Buffer, optional
(N-1)-D Buffer, to store the sum of each scan axis.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
"""
batch_size = prod(data.shape[:-1])
scan_axis_size = data.shape[-1]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
output = ib.buffer_ptr(output)
out_dtype = output.dtype
if reduction is not None:
reduction = ib.buffer_ptr(reduction)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.if_scope(scan_axis_size == 0):
with ib.new_scope():
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", batch_size)
with ib.if_scope(bx < batch_size):
if reduction is not None:
reduction[bx] = cast(identity_value, out_dtype)
with ib.else_scope():
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(scan_axis_size, max_threads)
nthread_by = batch_size
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < scan_axis_size):
output[by * scan_axis_size + tid] = cast(data[by * scan_axis_size + tid], out_dtype)
nthread_tx = max_threads
nthread_bx = ceil_div(scan_axis_size, max_threads)
nthread_by = batch_size
# The following algorithm performs parallel exclusive scan
# Up Sweep of exclusive scan
lim = ceil_log2(scan_axis_size)
with ib.for_range(0, cast(lim, "int64"), dtype="int64") as l2_width:
width = 2 << l2_width
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(
bx,
"thread_extent",
tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"),
)
tid = bx * nthread_tx + tx
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
start = ib.allocate("int64", (1,), name="start", scope="local")
middle = ib.allocate("int64", (1,), name="middle", scope="local")
end = ib.allocate("int64", (1,), name="end", scope="local")
start[0] = width * tid
with ib.if_scope(start[0] < scan_axis_size):
middle[0] = start[0] + tvm.tir.indexdiv(width, 2)
end[0] = tvm.te.min(start[0] + width, scan_axis_size)
with ib.if_scope(middle[0] < scan_axis_size):
output[by * scan_axis_size + end[0] - 1] = binop(
output[by * scan_axis_size + end[0] - 1],
output[by * scan_axis_size + middle[0] - 1],
)
# Down Sweep of exclusive scan
with ib.new_scope():
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", batch_size)
with ib.if_scope(bx < batch_size):
if reduction is not None:
reduction[bx] = output[(bx + 1) * scan_axis_size - 1]
output[(bx + 1) * scan_axis_size - 1] = cast(identity_value, out_dtype)
with ib.for_range(0, cast(lim, "int64"), dtype="int64") as l2_width:
width = 2 << (lim - l2_width - 1)
with ib.new_scope():
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(
bx,
"thread_extent",
tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"),
)
tid = bx * nthread_tx + tx
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", nthread_by)
start = ib.allocate("int64", (1,), name="start", scope="local")
middle = ib.allocate("int64", (1,), name="middle", scope="local")
end = ib.allocate("int64", (1,), name="end", scope="local")
tmp = ib.allocate(out_dtype, (1,), name="end", scope="local")
start[0] = width * tid
with ib.if_scope(tvm.tir.all(start[0] < scan_axis_size)):
middle[0] = start[0] + tvm.tir.indexdiv(width, 2)
end[0] = tvm.tir.min(start[0] + width, scan_axis_size)
with ib.if_scope(middle[0] < scan_axis_size):
tmp[0] = output[by * scan_axis_size + middle[0] - 1]
output[by * scan_axis_size + middle[0] - 1] = output[
by * scan_axis_size + end[0] - 1
]
output[by * scan_axis_size + end[0] - 1] = binop(
output[by * scan_axis_size + end[0] - 1], tmp[0]
)
return ib.get()
def get_reduction_from_exclusive_scan(data, ex_scan_output, binop=tvm.tir.generic.add):
"""Return the sum of the last element of data and the exclusive scan output.
The is the reduction of data along each row (for 2-D case).
Parameters
----------
data : tvm.te.Tensor
Input data of any shape
ex_scan_output : tvm.te.Tensor
The output of exclusive scan on data
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
Returns
-------
reduction : tvm.te.Tensor
(N-1)-D tensor storing the reduction of each scan axis.
"""
ndim = len(data.shape)
if ndim == 1:
data = expand_dims(data, axis=0)
ex_scan_output = expand_dims(ex_scan_output, axis=0)
def ir(data, data_ex_scan, reduction):
batch_size = prod(data.shape[:-1])
scan_axis_size = data.shape[-1]
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data)
data_ex_scan = ib.buffer_ptr(data_ex_scan)
reduction = ib.buffer_ptr(reduction)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(scan_axis_size > 0):
reduction[tid] = binop(
data_ex_scan[tid * scan_axis_size + scan_axis_size - 1],
data[tid * scan_axis_size + scan_axis_size - 1],
)
with ib.else_scope():
reduction[tid] = cast(0, reduction.dtype)
return ib.get()
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "valid_indices_buf", data_alignment=8)
ex_scan_output_buf = tvm.tir.decl_buffer(
ex_scan_output.shape, ex_scan_output.dtype, "ex_scan_output_buf", data_alignment=8
)
reduction = te.extern(
[data.shape[:-1]],
[data, ex_scan_output],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
dtype=[ex_scan_output.dtype],
in_buffers=[data_buf, ex_scan_output_buf],
name="ex_scan_reduction",
tag="ex_scan_reduction_gpu",
)
if ndim == 1:
return squeeze(reduction, 0)
return reduction
def scan_thrust(
data, output_dtype, exclusive=True, return_reduction=False, binop=tvm.tir.generic.add
):
"""Do exclusive or inclusive scan on 1D or multidimensional input, using thrust.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape. The scan is done over the innermost axis.
output_dtype: string
The dtype of the output scan tensor.
exclusive: bool, optional
Whether or not do exclusive or inclusive scan.
return_reduction: bool, optional
Whether or not return a (N-1)-D tensor storing the reduction of each scan axis.
Reductions are computed as part of the upsweep pass, so there is no extra cost.
If False, reductions are ignored. It must be False when exclusive is False.
binop: function, optional
A binary associative op to use for scan. Since we need to lookup the corresponding
thrust function, arbitrariy callables are not supported. Currently only
tvm.tir.generic.add can be passed in.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N and shape as the input data.
reduction : tvm.te.Tensor, optional
(N-1)-D tensor storing the reduction of each scan axis.
Returned if return_reduction is True.
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8)
output = te.extern(
[data.shape],
[data],
lambda ins, outs: tvm.tir.call_packed(
_get_thrust_func_name(binop), ins[0], outs[0], exclusive
),
dtype=[output_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="exclusive_scan_thrust",
tag="exclusive_scan_thrust_gpu",
)
if return_reduction:
assert exclusive, "return_reduction should be False for inclusive scan"
reduction = get_reduction_from_exclusive_scan(data, output, binop)
return output, reduction
return output
def exclusive_scan(
data,
axis=-1,
return_reduction=False,
output_dtype=None,
binop=tvm.tir.generic.add,
identity_value=0,
):
"""Do exclusive scan on 1D or multidimensional input.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape.
axis: int, optional
The axis to do scan on. By default, scan is done on the innermost axis.
return_reduction: bool, optional
Whether or not return a tensor storing the reduction over each scan axis.
If the input rank is N, this tensor is of rank N - 1.
Reductions are computed as part of the upsweep pass, so there is no extra cost.
If False, reductions are ignored.
output_dtype: string, optional
The dtype of the output scan tensor. If not provided, the dtype of the input is used.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N and shape as the input data.
reduction : tvm.te.Tensor, optional
(N-1)-D tensor storing the reduction of each scan axis.
Returned if return_reduction is True.
"""
def do_scan(data, output_dtype):
target = tvm.target.Target.current()
# TODO: add support for a prod_scan
if (
target
and binop == tvm.tir.generic.add
and (
can_use_thrust(target, "tvm.contrib.thrust.sum_scan")
or can_use_rocthrust(target, "tvm.contrib.thrust.sum_scan")
)
):
return scan_thrust(
data, output_dtype, exclusive=True, return_reduction=return_reduction, binop=binop
)
if ndim == 1:
# TIR exclusive scan accepts only 2D or higher-rank inputs.
data = expand_dims(data, axis=0)
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8)
if return_reduction:
output, reduction = te.extern(
[data.shape, data.shape[:-1]],
[data],
lambda ins, outs: exclusive_scan_ir(
ins[0], outs[0], outs[1], binop=binop, identity_value=identity_value
),
dtype=[output_dtype, output_dtype],
in_buffers=[data_buf],
name="exclusive_scan",
tag="exclusive_scan_gpu",
)
else:
output = te.extern(
[data.shape],
[data],
lambda ins, outs: exclusive_scan_ir(
ins[0], outs[0], binop=binop, identity_value=identity_value
),
dtype=[output_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="exclusive_scan",
tag="exclusive_scan_gpu",
)
reduction = None
if ndim == 1:
output = squeeze(output, 0)
if return_reduction:
reduction = squeeze(reduction, 0)
if return_reduction:
return output, reduction
return output
if output_dtype is None or output_dtype == "":
output_dtype = data.dtype
ndim = len(data.shape)
if axis < 0:
axis += ndim
# If scan axis is not the innermost one, swap the scan and the innermost axes
# Scan is always done on the innermost axis, for performance reason.
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
if return_reduction:
output, reduction = do_scan(data, output_dtype)
else:
output = do_scan(data, output_dtype)
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output = transpose(output, axes)
if return_reduction:
return output, reduction
return output
def inclusive_scan(data, axis=-1, output_dtype=None, binop=tvm.tir.generic.add, identity_value=0):
"""Do inclusive scan on 1D or multidimensional input.
Parameters
----------
data : tvm.te.Tensor
Input data of any shape.
axis: int, optional
The axis to do scan on. By default, scan is done on the innermost axis.
output_dtype: string, optional
The dtype of the output scan tensor. If not provided, the dtype of the input is used.
binop: function, optional
A binary associative op to use for scan. The function takes two TIR expressions
and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute
prefix sum.
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
Returns
-------
output : tvm.te.Tensor
A N-D tensor of the same rank N as the input data.
"""
ex_scan = exclusive_scan(
data, axis, output_dtype=output_dtype, binop=binop, identity_value=identity_value
)
if output_dtype is not None and data.dtype != output_dtype and output_dtype != "":
data = cast(data, output_dtype)
return binop(data, ex_scan)
def schedule_scan(outs):
"""Schedule for scan operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of scan
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
def scanop(
data: tvm.te.Tensor,
binop: Callable[["tvm.Expr", "tvm.Expr"], "tvm.Expr"],
identity_value: Union[float, int],
axis: Optional[int] = None,
dtype: Optional[str] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Cumulative binary operator (scan) with similar axis behavior as np.cumsum and np.cumprod.
See cumprod and cumsum for an example of use.
E.g. if * is your binary operator and the input tensor is [1, 2, 3, 4] the output may be
[1, 1 * 2, 1 * 2 * 3, 1 * 2 * 3 * 4]
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
binop: Callable (tvm.Expr, tvm.Expr) -> tvm.Expr
A binary operator which should be associative and commutative. E.g. if * is your
operator then a * (b * c) = (a * b) * c and a * b = b * a
identity_value: int or float
A value for the binary operation which provides the identity property. E.g. if * is
your operator and i is the identity_value then a * i = a for all a in the domain of
your operation.
axis : int, optional
Axis along which the operation is computed. The default (None) is to compute
the cumulative operation over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are computed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive cumulative operation in which the first element is not
included. In other terms, if true, the j-th output element would be
the cumulative operation of the first (j-1) elements. Otherwise, it would be the
cumulative operation of the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
if axis is None:
axis = 0
data = reshape(data, (prod(data.shape),))
axis = get_const_int(axis)
if exclusive is not None and exclusive:
return exclusive_scan(
data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value
)
return inclusive_scan(
data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value
)
def cumsum(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
) -> tvm.te.Tensor:
"""Numpy style cumsum op. Return the cumulative sum of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive sum in which the first element is not
included. In other terms, if true, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=tvm.tir.generic.add,
identity_value=0,
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
def cumprod(
data: tvm.te.Tensor,
axis: Optional[int] = None,
dtype: Optional[int] = None,
exclusive: Optional[bool] = None,
):
"""Numpy style cumprod op. Return the cumulative product of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative product is computed. The default (None) is to compute
the cumproduct over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are multiplied.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If True, will return exclusive product in which the first element is not
included. In other terms, if True, the j-th output element would be
the product of the first (j-1) elements. Otherwise, it would be the product of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
return scanop(
data=data,
binop=tvm.tir.generic.multiply,
identity_value=1,
axis=axis,
dtype=dtype,
exclusive=exclusive,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison, unused-argument
"""Scatter operator """
import tvm
from tvm import te, autotvm
from ..scatter import _verify_scatter_nd_inputs
from ..generic import schedule_extern
from .nms import atomic_add
from .sort import stable_sort_by_key_thrust
from ..utils import prod, ceil_div
def _memcpy_ir(ib, out_ptr, data_ptr, shape):
fused = prod(shape)
with ib.new_scope():
num_thread = int(tvm.target.Target.current(allow_none=False).max_num_threads)
num_blocks = ceil_div(fused, num_thread)
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", num_blocks)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", num_thread)
tid = bx * num_thread + tx
with ib.if_scope(tid < fused):
out_ptr[tid] = data_ptr[tid]
def gen_ir_1d(data, indices, updates, axis, out, update_func):
"""Generate scatter ir for 1d inputs
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
axis : int
The axis to scatter on
out : tir.Tensor
The output tensor.
update_func: function
The function to be applied to a destination and the corresponding update.
Returns
-------
ret : tir
The computational ir.
"""
assert axis == 0
n = data.shape[0]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
_memcpy_ir(ib, out_ptr, data_ptr, data.shape)
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
with ib.new_scope():
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", 1)
with ib.for_range(0, ni, name="i") as i:
index = indices_ptr[i]
with ib.if_scope(index < 0):
update_func(out_ptr, index + n, updates_ptr[i])
with ib.else_scope():
update_func(out_ptr, index, updates_ptr[i])
return ib.get()
def gen_ir_2d(data, indices, updates, axis, out, update_func):
"""Generate scatter ir for 2d inputs
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
axis : int
The axis to scatter on
out : tir.Tensor
The output tensor.
update_func: function
The function to be applied to a destination and the corresponding update
Returns
-------
ret : tir
The computational ir.
"""
n = data.shape[0]
c = data.shape[1]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
_memcpy_ir(ib, out_ptr, data_ptr, data.shape)
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
ci = indices.shape[1]
if axis == 0:
with ib.new_scope():
j = te.thread_axis("blockIdx.x")
ib.scope_attr(j, "thread_extent", ci)
with ib.for_range(0, ni, name="i") as i:
idx = i * ci + j
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, (index + n) * c + j, updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, index * c + j, updates_ptr[idx])
else:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
with ib.for_range(0, ci, name="j") as j:
idx = i * ci + j
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, i * c + (index + c), updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, i * c + index, updates_ptr[idx])
return ib.get()
def gen_ir_3d(data, indices, updates, axis, out, update_func):
"""Generate scatter ir for 3d inputs
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
axis : int
The axis to scatter on
out : tir.Tensor
The output tensor.
update_func: function
The function to be applied to a destination and the corresponding update
Returns
-------
ret : tir
The computational ir.
"""
warp_size = tvm.target.Target.current(False).thread_warp_size
n = data.shape[0]
c = data.shape[1]
h = data.shape[2]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
_memcpy_ir(ib, out_ptr, data_ptr, data.shape)
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
ci = indices.shape[1]
hi = indices.shape[2]
if axis == 0:
with ib.new_scope():
j = te.thread_axis("blockIdx.x")
ib.scope_attr(j, "thread_extent", ci)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
with ib.for_range(0, ni, name="i") as i:
with ib.for_range(0, ceil_div(hi, warp_size), name="k") as k_:
k = k_ * warp_size + tx
with ib.if_scope(k < hi):
idx = (i * ci + j) * hi + k
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, ((index + n) * c + j) * h + k, updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, (index * c + j) * h + k, updates_ptr[idx])
elif axis == 1:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
with ib.for_range(0, ci, name="j") as j:
with ib.for_range(0, ceil_div(hi, warp_size), name="k") as k_:
k = k_ * warp_size + tx
with ib.if_scope(k < hi):
idx = (i * ci + j) * hi + k
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, (i * c + (index + c)) * h + k, updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, (i * c + index) * h + k, updates_ptr[idx])
else:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
j = te.thread_axis("blockIdx.y")
ib.scope_attr(j, "thread_extent", ci)
with ib.for_range(0, hi, name="k") as k:
idx = (i * ci + j) * hi + k
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, (i * c + j) * h + (index + h), updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, (i * c + j) * h + index, updates_ptr[idx])
return ib.get()
def gen_ir_4d(data, indices, updates, axis, out, update_func):
"""Generate scatter ir for 4d inputs
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
axis : int
The axis to scatter on
out : tir.Tensor
The output tensor.
update_func: function
The function to be applied to a destination and the corresponding update
Returns
-------
ret : tir
The computational ir.
"""
warp_size = tvm.target.Target.current(False).thread_warp_size
n = data.shape[0]
c = data.shape[1]
h = data.shape[2]
w = data.shape[3]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
_memcpy_ir(ib, out_ptr, data_ptr, data.shape)
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
ci = indices.shape[1]
hi = indices.shape[2]
wi = indices.shape[3]
if axis == 0:
with ib.new_scope():
j = te.thread_axis("blockIdx.y")
ib.scope_attr(j, "thread_extent", ci)
k = te.thread_axis("blockIdx.z")
ib.scope_attr(k, "thread_extent", hi)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
with ib.for_range(0, ni, name="i") as i:
with ib.for_range(0, ceil_div(wi, warp_size), name="l") as l_:
l = l_ * warp_size + tx
with ib.if_scope(l < wi):
idx = ((i * ci + j) * hi + k) * wi + l
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(
out_ptr, (((index + n) * c + j) * h + k) * w + l, updates_ptr[idx]
)
with ib.else_scope():
update_func(
out_ptr, ((index * c + j) * h + k) * w + l, updates_ptr[idx]
)
elif axis == 1:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
k = te.thread_axis("blockIdx.z")
ib.scope_attr(k, "thread_extent", hi)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
with ib.for_range(0, ci, name="j") as j:
with ib.for_range(0, ceil_div(wi, warp_size), name="l") as l_:
l = l_ * warp_size + tx
with ib.if_scope(l < wi):
idx = ((i * ci + j) * hi + k) * wi + l
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(
out_ptr, ((i * c + (index + c)) * h + k) * w + l, updates_ptr[idx]
)
with ib.else_scope():
update_func(
out_ptr, ((i * c + index) * h + k) * w + l, updates_ptr[idx]
)
elif axis == 2:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
j = te.thread_axis("blockIdx.y")
ib.scope_attr(j, "thread_extent", ci)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
with ib.for_range(0, hi, name="k") as k:
with ib.for_range(0, ceil_div(wi, warp_size), name="l") as l_:
l = l_ * warp_size + tx
with ib.if_scope(l < wi):
idx = ((i * ci + j) * hi + k) * wi + l
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(
out_ptr, ((i * c + j) * h + (index + h)) * w + l, updates_ptr[idx]
)
with ib.else_scope():
update_func(
out_ptr, ((i * c + j) * h + index) * w + l, updates_ptr[idx]
)
else:
with ib.new_scope():
i = te.thread_axis("blockIdx.x")
ib.scope_attr(i, "thread_extent", ni)
j = te.thread_axis("blockIdx.y")
ib.scope_attr(j, "thread_extent", ci)
k = te.thread_axis("blockIdx.z")
ib.scope_attr(k, "thread_extent", hi)
with ib.for_range(0, wi, name="l") as l:
idx = ((i * ci + j) * hi + k) * wi + l
index = indices_ptr[idx]
with ib.if_scope(index < 0):
update_func(out_ptr, ((i * c + j) * h + k) * w + (index + w), updates_ptr[idx])
with ib.else_scope():
update_func(out_ptr, ((i * c + j) * h + k) * w + index, updates_ptr[idx])
return ib.get()
@autotvm.register_topi_compute("scatter.cuda")
def scatter(cfg, data, indices, updates, axis=0):
"""Update data at positions defined by indices with values in updates
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
rank = len(data.shape)
assert 1 <= rank <= 4, "scatter only supports 1-4 dimensions"
ir_funcs = {
1: gen_ir_1d,
2: gen_ir_2d,
3: gen_ir_3d,
4: gen_ir_4d,
}
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] = update
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
cfg.add_flop(1) # A dummy value to satisfy AutoTVM
out = te.extern(
[out_shape],
[data, indices, updates],
lambda ins, outs: ir_funcs[rank](ins[0], ins[1], ins[2], axis, outs[0], update_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_gpu",
tag="scatter_gpu",
)
return out
@autotvm.register_topi_schedule("scatter.cuda")
def schedule_scatter(_, outs):
return schedule_extern(outs)
def gen_scatter_1d_thrust(data, indices_sorted, updates_sorted, out):
"""Generate scatter ir for 1d inputs, using a sorting based approach.
By sorting indices and comparing neighboring two indices, we can tell which
of elements in the indices tensor can scatter its update value into the output.
Sorting of indices, and sorting of updates with respect to indices, can be done
at the same time by thrust's sort_by_key function. It is important that sorting
be done in a "stable" way via stable_sort, to guarantee deterministic output.
Negative indices are assumed to have been converted to corresponding positive
indices.
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices_sorted : tir.Tensor
The sorted index locations to update.
updates : tir.Tensor
The values to update, sorted by indices.
out : tir.Tensor
The output tensor.
Returns
-------
ret : tir
The computational ir.
"""
n = data.shape[0]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
with ib.new_scope():
nthread_bx = ceil_div(n, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < n):
out_ptr[tid] = data_ptr[tid]
indices_ptr = ib.buffer_ptr(indices_sorted)
updates_ptr = ib.buffer_ptr(updates_sorted)
ni = indices_sorted.shape[0]
with ib.new_scope():
nthread_bx = ceil_div(ni, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid == ni - 1):
# The last element can always update.
index = indices_ptr[tid]
update = updates_ptr[tid]
out_ptr[index] = update
with ib.else_scope():
with ib.if_scope(tid < ni - 1):
index = indices_ptr[tid]
index_next = indices_ptr[tid + 1]
# If the next neighbor in the sorted list of indices has a different index,
# that means thread tid is the last one to have this index.
# This thread can update the output.
with ib.if_scope(index != index_next):
update = updates_ptr[tid]
out_ptr[index] = update
return ib.get()
@autotvm.register_topi_compute("scatter_via_sort.cuda")
def scatter_via_sort(cfg, data, indices, updates, axis=0):
"""Update data at positions defined by indices with values in updates
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis == 0 and len(data.shape) == 1, "sorting based scatter only supported for 1d input"
cfg.add_flop(1) # A dummy value to satisfy AutoTVM
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
indices_sorted, updates_sorted = stable_sort_by_key_thrust(indices, updates, for_scatter=True)
out = te.extern(
[out_shape],
[data, indices_sorted, updates_sorted],
lambda ins, outs: gen_scatter_1d_thrust(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_via_sort_gpu",
tag="scatter_via_sort_gpu",
)
return out
@autotvm.register_topi_schedule("scatter_via_sort.cuda")
def schedule_scatter_via_sort(_, outs):
return schedule_extern(outs)
def gen_scatter_add_1d_atomic(data, indices, updates, axis, out, _):
"""Generate scatter add ir for 1d inputs, using atomic_add instruction
Parameters
----------
data : tir.Tensor
The input data to the operator.
indices : tir.Tensor
The index locations to update.
updates : tir.Tensor
The values to update.
axis : int
The axis to scatter on
out : tir.Tensor
The output tensor.
Returns
-------
ret : tir
The computational ir.
"""
assert axis == 0
n = data.shape[0]
ib = tvm.tir.ir_builder.create()
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
with ib.new_scope():
nthread_bx = ceil_div(n, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < n):
out_ptr[tid] = data_ptr[tid]
indices_ptr = ib.buffer_ptr(indices)
updates_ptr = ib.buffer_ptr(updates)
ni = indices.shape[0]
atomic_add_return = ib.allocate(updates.dtype, (1,), name="atomic_add_return", scope="local")
with ib.new_scope():
nthread_bx = ceil_div(ni, nthread_tx)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * nthread_tx + tx
with ib.if_scope(tid < ni):
index = indices_ptr[tid]
with ib.if_scope(index < 0):
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out_ptr[index + n]),
updates_ptr[tid],
)
with ib.else_scope():
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out_ptr[index]),
updates_ptr[tid],
)
return ib.get()
def scatter_add(data, indices, updates, axis=0):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to be added.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
rank = len(data.shape)
assert 1 <= rank <= 4, "scatter_add only supports 1-4 dimensions"
ir_funcs = {
1: gen_scatter_add_1d_atomic,
2: gen_ir_2d,
3: gen_ir_3d,
4: gen_ir_4d,
}
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data, indices, updates],
lambda ins, outs: ir_funcs[rank](ins[0], ins[1], ins[2], axis, outs[0], update_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_add_gpu",
tag="scatter_add_gpu",
)
return out
def scatter_nd(data, indices, updates, mode):
"""Scatter elements from a n-dimension array.
Given updates with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape
(M, Y_0, ..., Y_{K-1}), and output copied from data with shape (X_0, X_1, ..., X_{N-1}),
scatter_nd computes
.. code-block::
output[indices[0, y_0, ..., y_{K-1}],
...,
indices[M-1, y_0, ..., y_{K-1}],
x_M,
...,
x_{N-1}
] = f(output[...], updates[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}])
where the update function f is determinted by the mode.
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
mode : string
The update mode for the algorithm, either "update" or "add"
If update, the update values will replace the input data
If add, the update values will be added to the input data
Returns
-------
ret : tvm.te.Tensor
"""
_verify_scatter_nd_inputs(data, indices, updates)
def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr):
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
indices = ib.buffer_ptr(indices_ptr)
updates = ib.buffer_ptr(updates_ptr)
out = ib.buffer_ptr(out_ptr)
atomic_add_return = ib.allocate(
updates.dtype, (1,), name="atomic_add_return", scope="local"
)
fused_indices_dimension = 1
for i in indices_ptr.shape[1:]:
fused_indices_dimension *= i
fused_updates_dimension = 1
for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]:
fused_updates_dimension *= i
fused_shape = 1
for i in data_ptr.shape:
fused_shape *= i
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
tdim = min(max_threads, fused_updates_dimension)
with ib.new_scope():
bdim = ceil_div(fused_shape, tdim)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim)
ib.scope_attr(tx, "thread_extent", tdim)
index = bx * tdim + tx
with ib.if_scope(index < fused_shape):
out[index] = data[index]
# For better performance, we introduce blockIdx.y to implement for-loops
# within one thread.
# The code is parallel over the scattered indices, so we use atomic_add
# to guarantee correctness when mode=="add"
# For now, atomic is not supported by target "vulkan", "metal", or "cuda" with "int64"
# So we fallback to normal algorithm, using "+=" rather than atomic_add
# TODO (CaptainDuke):
# Since multiple threads compete for the same write index, which leads to
# non-determinstic output for update mode. We could add a new attribute,
# "allow_non_deterministic", which can be conditionally set to True by
# each frontend when non-determinsm is allowed.
cur_target_kind = str(tvm.target.Target.current(allow_none=False).kind)
with ib.new_scope():
if (
mode == "add"
and cur_target_kind not in ["vulkan", "metal"]
and updates.dtype in ["int32", "float32"]
):
bdim_x = fused_indices_dimension
bdim_y = ceil_div(fused_updates_dimension, tdim)
# In case of large input sizes, fused_indices_dimension might be too large.
# So we use blockIdx.x because holds larger scales.
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim_x)
ib.scope_attr(by, "thread_extent", bdim_y)
ib.scope_attr(tx, "thread_extent", tdim)
j = by * tdim + tx
with ib.if_scope(j < fused_updates_dimension):
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}]
# part of the index into out.
up_index = bx * fused_updates_dimension + j
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[bx * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}]
index += offset * indices[bx + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", out[index]),
updates[up_index],
)
else:
bdim_x = ceil_div(fused_updates_dimension, tdim)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", bdim_x)
ib.scope_attr(tx, "thread_extent", tdim)
with ib.for_range(0, fused_indices_dimension) as i:
j = bx * tdim + tx
with ib.if_scope(j < fused_updates_dimension):
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the
# indices[0, y_0, .. y_{K-1}], ... indices[M-1, y_0, .. y_{K-1}]
# part of the index into out.
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[i * l * fused_indices_dimension] = indices[l, y_0,
# ... y_{k-1}]
index += offset * indices[i + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
if mode == "update":
out[index] = updates[i * fused_updates_dimension + j]
elif mode == "add":
out[index] += updates[i * fused_updates_dimension + j]
else:
raise NotImplementedError("scatter_nd mode not in [update, add]:", mode)
return ib.get()
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf")
return te.extern(
[data.shape],
[data, indices, updates],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_nd_cuda",
tag="scatter_nd_cuda",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""searchsorted operator for GPU"""
import tvm
from tvm import te
from .. import utils
from ..searchsorted import binary_search
def searchsorted(sorted_sequence, values, right, out_dtype="int64"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : te.Tensor
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : te.Tensor
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : te.Tensor
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
def ir(sorted_sequence, values, indices):
ib = tvm.tir.ir_builder.create()
sorted_sequence_shape = sorted_sequence.shape
values_shape = values.shape
num_search = utils.prod(values_shape)
search_range = sorted_sequence_shape[-1]
sorted_sequence = ib.buffer_ptr(sorted_sequence)
values = ib.buffer_ptr(values)
indices = ib.buffer_ptr(indices)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(
bx, "thread_extent", tvm.tir.indexdiv(num_search + max_threads - 1, max_threads)
)
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_search):
if len(sorted_sequence_shape) == 1:
sequence_offset = 0
else:
sequence_id = tid // values_shape[-1]
sequence_offset = sequence_id * search_range
indices[tid] = binary_search(
ib,
sequence_offset,
search_range,
sorted_sequence,
values[tid],
right,
out_dtype,
)
return ib.get()
return te.extern(
values.shape,
[sorted_sequence, values],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="searchsorted",
dtype=out_dtype,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, trailing-whitespace
"""Schedule for softmax operator"""
from tvm.target import Target
from tvm import te
from tvm.contrib import cudnn
from .. import generic
from .injective import schedule_injective_from_existing
from ..utils import get_const_int, traverse_inline
def _schedule_softmax(softmax_op, s, outs, tgt):
op_tag = softmax_op.tag
axis = get_const_int(softmax_op.attrs["axis"]) # reduce axis
if op_tag == "softmax_output":
expsum = softmax_op.input_tensors[1]
exp = softmax_op.input_tensors[0]
max_elem = s[exp].op.input_tensors[1]
delta = None
elif op_tag == "fast_softmax_output":
expsum = softmax_op.input_tensors[1]
exp = softmax_op.input_tensors[0]
delta = s[exp].op.input_tensors[0]
max_elem = s[delta].op.input_tensors[1]
elif op_tag == "log_softmax_output":
exp = None
delta = None
max_elem = softmax_op.input_tensors[1]
expsum = softmax_op.input_tensors[2]
else:
raise ValueError(
"Tag is expected to be softmax_output or log_softmax_output. \
Got {0}".format(
op_tag
)
)
# The nvptx and rocm backends only supports 32-bits warp shuffle
# instructions.
#
# TODO(tvm-team) Fix nvptx codegen or deprecate nvptx backend.
def sched_warp_softmax():
if tgt.kind.name in ["nvptx", "rocm"]:
dtype = softmax_op.output(0).dtype
return dtype in ["float32", "int32"]
if tgt.kind.name != "cuda":
# this is used as the gpu schedule for other arches which
# may not have warp reductions
return False
return True
if len(outs[0].shape) != 2:
ops = [max_elem.op, expsum.op, softmax_op]
if delta is not None:
ops.append(delta.op)
if exp is not None:
ops.append(exp.op)
if softmax_op != outs[0].op:
ops.append(outs[0].op)
for op in ops:
s = schedule_injective_from_existing(s, op.output(0))
elif sched_warp_softmax():
# A warp of 32 threads performs a row reduction.
num_thread = tgt.thread_warp_size
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
# (4) softmax
output = outs[0]
xo, xi = s[output].split(output.op.axis[axis], nparts=num_thread)
xio, xii = s[output].split(xi, factor=4)
s[output].vectorize(xii)
s[output].bind(xo, thread_x)
s[output].bind(output.op.axis[axis ^ 1], block_x)
s[output].reorder(output.op.axis[axis ^ 1], xo, xio, xii)
if softmax_op != outs[0].op:
s[softmax_op].compute_at(s[output], xio)
s[softmax_op].vectorize(softmax_op.axis[axis]) # vec_len == 4
# (3) expsum
k = expsum.op.reduce_axis[0]
ko, _ = s[expsum].split(k, nparts=num_thread)
s[expsum].bind(ko, thread_x)
s[expsum].compute_at(s[output], xo)
# (2) exp
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
elif exp is not None:
xo, xi = s[exp].split(exp.op.axis[axis], nparts=num_thread)
_, xii = s[exp].split(xi, factor=4)
s[exp].vectorize(xii)
s[exp].bind(xo, thread_x)
s[exp].compute_at(s[expsum], expsum.op.axis[0])
s[exp].compute_at(s[output], output.op.axis[axis ^ 1])
s[exp].set_scope("warp")
# (1) max_elem
k = max_elem.op.reduce_axis[0]
ko, _ = s[max_elem].split(k, nparts=num_thread)
s[max_elem].bind(ko, thread_x)
if exp is not None and delta is None:
s[max_elem].compute_at(s[exp], xo)
else:
s[max_elem].bind(ko, thread_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
else:
num_thread = 64
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
elif exp is not None:
s[exp].bind(exp.op.axis[axis ^ 1], block_x)
s[max_elem].bind(max_elem.op.axis[0], block_x)
k = expsum.op.reduce_axis[0]
ko, ki = s[expsum].split(k, factor=num_thread)
EF = s.rfactor(expsum, ki)
s[expsum].bind(s[expsum].op.axis[0], block_x)
s[expsum].bind(s[expsum].op.reduce_axis[0], thread_x)
s[EF].compute_at(s[expsum], s[expsum].op.reduce_axis[0])
s[expsum].set_store_predicate(thread_x.var.equal(0))
output = outs[0]
tx, xi = s[output].split(output.op.axis[axis], nparts=num_thread)
s[output].bind(output.op.axis[axis ^ 1], block_x)
s[output].bind(tx, thread_x)
s[output].reorder(output.op.axis[axis ^ 1], tx, xi)
if softmax_op != outs[0].op:
s[softmax_op].compute_at(s[output], tx)
def schedule_softmax(outs):
"""Schedule for softmax op.
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tgt = Target.current(allow_none=False)
def _callback(op):
if "softmax" in op.tag:
_schedule_softmax(op, s, outs, tgt)
traverse_inline(s, outs[0].op, _callback)
return s
def softmax_cudnn(x, axis=-1):
"""Perform softmax on the data using cudnn"""
return cudnn.softmax(x, axis)
def schedule_softmax_cudnn(outs):
"""Schedule for softmax cudnn op"""
return generic.schedule_extern(outs)
def log_softmax_cudnn(x, axis=-1):
"""Perform log_softmax on the data using cudnn"""
return cudnn.log_softmax(x, axis)
def schedule_log_softmax_cudnn(outs):
"""Schedule for log_softmax cudnn op"""
return generic.schedule_extern(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison, unused-argument, no-else-return
"""Sort related operators """
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from ..transform import strided_slice, transpose
from .. import tag
from ..utils import ceil_div, swap
from ..math import cast, ceil_log2
def _schedule_sort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
def _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz):
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
by = te.thread_axis("blockIdx.y")
bz = te.thread_axis("blockIdx.z")
ib.scope_attr(by, "thread_extent", nthread_by)
ib.scope_attr(bz, "thread_extent", nthread_bz)
return tx, bx, by, bz
def _sort_init(ib, shape, axis, keys_in, keys_out, values_out=None, value_init_func=None):
"""Initialize the output buffers by copying from inputs"""
axis_mul_before = 1
axis_mul_after = 1
if axis < 0:
axis = len(shape) + axis
for i, value in enumerate(shape, 0):
if i < axis:
axis_mul_before *= value
elif i > axis:
axis_mul_after *= value
# Set up threading
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = ceil_div(shape[axis], max_threads)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
# Copy the keys_in to initial output
with ib.new_scope():
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = bx * nthread_tx + tx
idx = (by * shape[axis] + tid) * axis_mul_after + bz
with ib.if_scope(tid < shape[axis]):
keys_out[idx] = keys_in[idx]
if values_out is not None:
values_out[idx] = value_init_func(idx, tid)
return axis_mul_before, axis_mul_after
## TODO(mbrookhart): These are effective optimziation hyperparametrs
## Perhaps we can autotune?
block_size = 128
thread_work = 4
def _odd_even_sort(
ib,
size,
axis_mul_before,
axis_mul_after,
is_ascend,
keys,
keys_swap,
values=None,
values_swap=None,
):
nthread_tx = block_size // 2
nthread_bx = ceil_div(size, block_size)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
with ib.new_scope():
ib.scope_attr(tvm.tir.const(0), "hand_threaded", 0)
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = 2 * tx
start = bx * block_size
## Create shared memory as syncable thread scratch space
tmp_keys_swap = ib.allocate(
keys_swap.dtype,
(block_size,),
name="temp_keys_swap",
scope="shared",
)
if values_swap is not None:
tmp_values_swap = ib.allocate(
values_swap.dtype,
(block_size,),
name="temp_values_swap",
scope="shared",
)
## Create thread local data for swapping
temp_keys = ib.allocate(keys_swap.dtype, (1,), name="temp_keys", scope="local")
if values_swap is not None:
temp_values = ib.allocate(values_swap.dtype, (1,), name="temp_values", scope="local")
temp_cond1 = ib.allocate(keys_swap.dtype, (1,), name="temp_cond1", scope="local")
temp_cond2 = ib.allocate(keys_swap.dtype, (1,), name="temp_cond2", scope="local")
# Copy data to scratch space
base_idx = by * size * axis_mul_after + bz
with ib.for_range(0, 2) as n:
with ib.if_scope((tid + n + start) < size):
tmp_keys_swap[tid + n] = keys[base_idx + (tid + n + start) * axis_mul_after]
if values_swap is not None:
tmp_values_swap[tid + n] = values[base_idx + (tid + n + start) * axis_mul_after]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
idxm = tvm.tir.indexmod
# OddEvenTransposeSort
current_sort_num = tvm.tir.min(block_size, size - start)
with ib.for_range(0, current_sort_num) as k:
n = idxm(tid + k, 2)
with ib.if_scope(tid + n < current_sort_num - 1):
temp_cond1[0] = tmp_keys_swap[tid + n]
temp_cond2[0] = tmp_keys_swap[tid + n + 1]
if is_ascend:
cond = temp_cond1[0] > temp_cond2[0]
else:
cond = temp_cond1[0] < temp_cond2[0]
with ib.if_scope(cond):
temp_keys[0] = tmp_keys_swap[tid + n]
tmp_keys_swap[tid + n] = tmp_keys_swap[tid + n + 1]
tmp_keys_swap[tid + n + 1] = temp_keys[0]
if values_swap is not None:
temp_values[0] = tmp_values_swap[tid + n]
tmp_values_swap[tid + n] = tmp_values_swap[tid + n + 1]
tmp_values_swap[tid + n + 1] = temp_values[0]
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
## Copy sorted data to output
with ib.for_range(0, 2) as n:
with ib.if_scope(tid + n + start < size):
keys[base_idx + (tid + n + start) * axis_mul_after] = tmp_keys_swap[tid + n]
keys_swap[base_idx + (tid + n + start) * axis_mul_after] = tmp_keys_swap[tid + n]
if values_swap is not None:
values[base_idx + (tid + n + start) * axis_mul_after] = tmp_values_swap[tid + n]
values_swap[base_idx + (tid + n + start) * axis_mul_after] = tmp_values_swap[
tid + n
]
def _sort_common(
ib,
size,
axis_mul_before,
axis_mul_after,
is_ascend,
keys,
keys_swap,
values=None,
values_swap=None,
):
"""Either sort only values or sort values by keys."""
## This function performs a multi-level mergesort
## For blocks of length <= block_size, it does odd-even transpose sort
## in GPU shared memory
## For intermediate block sizes (>block_size, < max_threads * thread_work)
## it uses the mergpath algorthim https://arxiv.org/abs/1406.2628
## to merge blocks in parallel
## At some point, the size of the blocks to be merged is too big for max_threads
## and we switch to using a dual-level mergepath where the outer mergepath
## finds the start/end locations of the inner mergepath so that we can split
## the merge into more blocks
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_by = axis_mul_before * axis_mul_after
nthread_bz = 1
nthread_tx = max_threads
nthread_bx = ceil_div(size, nthread_tx)
def compare(a, b):
"""
Compare a and b in proper ascending or descending order
"""
if is_ascend:
out = a <= b
else:
out = b <= a
return out
# Sort the lower levels of the merge using odd-even sort, it's fast for small inputs
lower_lim = ceil_log2(block_size)
_odd_even_sort(
ib,
size,
axis_mul_before * axis_mul_after,
1,
is_ascend,
keys,
keys_swap,
values,
values_swap,
)
upper_lim = ceil_log2(size)
def get_merge_begin(source, base_idx, aCount, bCount, aStart, bStart, diag, step_count):
first = ib.allocate("int64", (1,), name="first", scope="local")
mid = ib.allocate("int64", (1,), name="mid", scope="local")
last = ib.allocate("int64", (1,), name="last", scope="local")
first[0] = tvm.te.max(0, diag - bCount)
last[0] = tvm.te.min(diag, aCount)
with ib.while_loop(first[0] < last[0]):
mid = (first[0] + last[0]) >> 1
a = source[base_idx + (aStart + mid)]
b = source[base_idx + (bStart + diag - 1 - mid)]
with ib.if_scope(compare(a, b)):
first[0] = mid + 1
with ib.else_scope():
last[0] = mid
return first[0], last[0]
def serial_merge(
source,
dest,
source_idx,
dest_idx,
base_idx,
aCount,
bCount,
aStart,
bStart,
kStart,
diag,
step_count,
first,
last,
):
i = ib.allocate("int64", (1,), name="i", scope="local")
j = ib.allocate("int64", (1,), name="j", scope="local")
i[0] = aStart + first
j[0] = bStart + diag - last
with ib.for_range(0, tvm.te.min(aCount + bCount - diag, step_count)) as count:
i_idx = base_idx + i[0]
j_idx = base_idx + j[0]
k_idx = base_idx + (kStart + diag + count)
def assign_i():
"""assign i value to current output"""
dest[k_idx] = source[i_idx]
if values is not None:
dest_idx[k_idx] = source_idx[i_idx]
i[0] += 1
def assign_j():
"""assign j value to current output"""
dest[k_idx] = source[j_idx]
if values is not None:
dest_idx[k_idx] = source_idx[j_idx]
j[0] += 1
## if both of the iterators are in range
with ib.if_scope(tvm.tir.all(i[0] < aStart + aCount, j[0] < bStart + bCount)):
# compare them and insert whichever is next into the output
with ib.if_scope(compare(source[i_idx], source[j_idx])):
assign_i()
with ib.else_scope():
assign_j()
# otherwise, simply copy the remainder of the valid iterator to the output
with ib.else_scope():
with ib.if_scope(i[0] < aStart + aCount):
assign_i()
with ib.else_scope():
assign_j()
with ib.for_range(0, cast(upper_lim - lower_lim, "int64"), dtype="int64") as l2_width:
width = 2 << (l2_width + lower_lim)
# Define and launch the cuda kernel
with ib.new_scope():
target = tvm.target.Target.current()
if "vulkan" in str(target):
# Vulkan can't handle dynamic nthread, so we thread slightly differently
# for vulkan. We don't do this generally because it causes a 15% perf
# regression on other platforms
ntx = max_threads
nbx = tvm.tir.generic.cast(ceil_div(width, max_threads * thread_work), "int32")
nbz = tvm.tir.generic.cast(ceil_div(size, width), "int32")
tx, bx, by, bz = _get_threads(ib, ntx, nbx, nthread_by, nbz)
else:
ntx = tvm.tir.generic.cast(tvm.te.min(max_threads, width), "int32")
nbx = tvm.tir.generic.cast(ceil_div(width, max_threads * thread_work), "int32")
nbz = tvm.tir.generic.cast(ceil_div(size, width), "int32")
tx, bx, by, bz = _get_threads(ib, ntx, nbx, nthread_by, nbz)
def mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
aStart,
bStart,
kStart,
step_count,
even,
):
# pylint: disable=arguments-out-of-order
def merge(source, dest, source_idx, dest_idx):
diag = tx * step_count
first, last = get_merge_begin(
source,
by * size,
aCount,
bCount,
aStart,
bStart,
diag,
step_count,
)
# iterate over the output loop
serial_merge(
source,
dest,
source_idx,
dest_idx,
by * size,
aCount,
bCount,
aStart,
bStart,
kStart,
diag,
step_count,
first,
last,
)
with ib.if_scope(even):
merge(source, dest, source_idx, dest_idx)
with ib.else_scope():
merge(dest, source, dest_idx, source_idx)
def mergesort(source, dest, source_idx, dest_idx, size, width, even):
# calculate the start, mid, and end points of this section
start = width * bz
middle = cast(tvm.te.min(start + tvm.tir.indexdiv(width, 2), size), "int64")
end = cast(tvm.te.min(start + width, size), "int64")
with ib.if_scope(start < size):
with ib.if_scope(nbx == 1):
## merge the start->middle and middle->end arrays
aCount = middle - start
bCount = end - middle
mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
start,
middle,
start,
ceil_div(width, ntx),
even,
)
with ib.else_scope():
step_count = max_threads * thread_work
diag = bx * step_count
def do_merge(first, last):
aStart = start + first
bStart = middle + diag - last
aCount = tvm.te.min(middle - aStart, step_count)
bCount = tvm.te.min(end - bStart, step_count)
mergepath(
source,
dest,
source_idx,
dest_idx,
aCount,
bCount,
aStart,
bStart,
start + diag,
thread_work,
even,
)
with ib.if_scope(even):
first, last = get_merge_begin(
source,
by * size,
middle - start,
end - middle,
start,
middle,
diag,
step_count,
)
do_merge(first, last)
with ib.else_scope():
first, last = get_merge_begin(
dest,
by * size,
middle - start,
end - middle,
start,
middle,
diag,
step_count,
)
do_merge(first, last)
# Call the kernel
mergesort(
keys,
keys_swap,
values,
values_swap,
size,
width,
tvm.tir.indexmod(l2_width, 2) == 0,
)
nthread_by = axis_mul_before
nthread_bz = axis_mul_after
nthread_tx = max_threads
nthread_bx = ceil_div(size, nthread_tx)
## if the final sorted data ended up in the swap, copy it to the real output
with ib.if_scope(
tvm.tir.all(upper_lim > lower_lim, tvm.tir.indexmod(upper_lim - lower_lim, 2) == 1)
):
with ib.new_scope():
tx, bx, by, bz = _get_threads(ib, nthread_tx, nthread_bx, nthread_by, nthread_bz)
tid = bx * nthread_tx + tx
idx = (by * axis_mul_after + bz) * size + tid
with ib.if_scope(tid < size):
keys[idx] = keys_swap[idx]
if values is not None:
values[idx] = values_swap[idx]
def sort_ir(
data, values_out, values_out_swap, axis, is_ascend, indices_out=None, indices_out_swap=None
):
"""Low level IR to do sorting on the GPU, same usage as tvm.contrib.sort.argsort on the CPU.
Parameters
----------
data: Buffer
Buffer of input data. Data will be sorted in place.
values_out : Buffer
Output buffer of values of sorted tensor with same shape as data.
values_out_swap : Buffer
Output buffer of values with same shape as data to use as swap.
axis : Int
Axis long which to sort the input tensor.
is_ascend : Boolean
Whether to sort in ascending or descending order.
indicess_out : Buffer
Output buffer of indices of sorted tensor with same shape as data.
indices_out_swap : Buffer
Output buffer of indices with same shape as data to use as swap.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
shape = data.shape
data = ib.buffer_ptr(data)
values_out = ib.buffer_ptr(values_out)
values_out_swap = ib.buffer_ptr(values_out_swap)
if indices_out is not None:
indices_out = ib.buffer_ptr(indices_out)
assert indices_out_swap is not None
indices_out_swap = ib.buffer_ptr(indices_out_swap)
with ib.if_scope(shape[axis] > 0):
axis_mul_before, axis_mul_after = _sort_init(
ib,
shape,
axis,
data,
values_out,
indices_out,
value_init_func=lambda _, tid: tvm.tir.generic.cast(tid, indices_out.dtype),
)
_sort_common(
ib,
shape[axis],
axis_mul_before,
axis_mul_after,
is_ascend,
values_out,
values_out_swap,
values=indices_out,
values_swap=indices_out_swap,
)
return ib.get()
def sort_by_key_ir(
keys_in, values_in, keys_out, values_out, keys_out_swap, values_out_swap, axis, is_ascend
):
"""Low level IR to do sort by key on the GPU.
Parameters
----------
keys_in: Buffer
Buffer of input keys.
values_in: Buffer
Buffer of input keys.
keys_out : Buffer
Buffer of output sorted keys.
values_out : Buffer
Buffer of output sorted values.
keys_out_swap : Buffer
Output buffer of values with same shape as keys_in to use as swap.
values_out_swap : Buffer
Output buffer of values with same shape as values_in to use as swap.
axis : Int
Axis long which to sort the input tensor.
is_ascend : Boolean
Whether to sort in ascending or descending order.
indicess_out : Buffer
Output buffer of indices of sorted tensor with same shape as keys_in.
values_out_swap : Buffer
Output buffer of indices with same shape as keys_in to use as swap.
Returns
-------
stmt : Stmt
The result IR statement.
"""
ib = tvm.tir.ir_builder.create()
shape = keys_in.shape
keys_in = ib.buffer_ptr(keys_in)
values_in = ib.buffer_ptr(values_in)
keys_out = ib.buffer_ptr(keys_out)
keys_out_swap = ib.buffer_ptr(keys_out_swap)
values_out = ib.buffer_ptr(values_out)
values_out_swap = ib.buffer_ptr(values_out_swap)
with ib.if_scope(shape[axis] > 0):
axis_mul_before, axis_mul_after = _sort_init(
ib,
shape,
axis,
keys_in,
keys_out,
values_out,
value_init_func=lambda idx, _: values_in[idx],
)
_sort_common(
ib,
shape[axis],
axis_mul_before,
axis_mul_after,
is_ascend,
keys_out,
keys_out_swap,
values=values_out,
values_swap=values_out_swap,
)
return ib.get()
def sort(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array of
sorted values with the same shape as the input data.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
value_buf_swap = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf_swap", data_alignment=8)
out = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(ins[0], outs[0], outs[1], -1, is_ascend),
out_buffers=[value_buf, value_buf_swap],
name="sort_gpu",
tag="sort_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = transpose(out, axes)
return out
def sort_thrust(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array of
sorted values with the same shape as the input data.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
dtype = "float32"
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
out = te.extern(
[data.shape, data.shape],
[data],
## TODO(mbrookhart): This thrust function is actually doing argsort, not sort
## For performance, we should probably rename the contrib function and add
## a pure sort
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.sort", ins[0], outs[0], outs[1], is_ascend
),
out_buffers=[value_buf, indices_buf],
name="sort_gpu",
tag="sort_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = transpose(out, axes)
return out
def argsort(data, axis=-1, is_ascend=1, dtype="float32", ret_type="indices"):
"""Performs sorting along the given axis and returns an array of indices
having same shape as an input array that index data in sorted order.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
ret_type : string, optional
The return type [both, indices].
"both": return both sorted data and indices.
"indices": return sorted indices only.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
value_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8)
value_swap_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "value_swap_buf", data_alignment=8)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
indices_swap_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_swap_buf", data_alignment=8)
outs = te.extern(
[data.shape, data.shape, data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(
ins[0],
outs[0],
outs[2],
-1,
is_ascend,
indices_out=outs[1],
indices_out_swap=outs[3],
),
out_buffers=[value_buf, indices_buf, value_swap_buf, indices_swap_buf],
name="argsort_gpu",
tag="argsort_gpu",
)
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
outs = [transpose(out, axes) for out in outs]
if ret_type == "indices":
return outs[1]
return outs[0], outs[1]
def argsort_thrust(data, axis=-1, is_ascend=1, dtype="float32", ret_type="indices"):
"""Performs sorting along the given axis and returns an array of indices
having same shape as an input array that index data in sorted order.
Parameters
----------
data: tvm.te.Tensor
The input array.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
ret_type : string, optional
The return type [both, indices].
"both": return both sorted data and indices.
"indices": return sorted indices only.
Returns
-------
out : tvm.te.Tensor
The output of this function.
"""
return topk_thrust(data, 0, axis, ret_type, is_ascend, dtype)
def schedule_sort(outs):
"""Schedule for sort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def schedule_argsort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
ndim = len(data.shape)
axis = axis + ndim if axis < 0 else axis
assert 0 <= axis < ndim
dshape = data.shape
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
values_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "values_buf", data_alignment=8)
values_swap_buf = tvm.tir.decl_buffer(
data.shape, data.dtype, "values_swap_buf", data_alignment=8
)
indices_buf = tvm.tir.decl_buffer(data.shape, dtype, "indices_buf", data_alignment=8)
indices_swap_buf = tvm.tir.decl_buffer(data.shape, dtype, "indies_swap_buf", data_alignment=8)
if ret_type == "values":
output = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(ins[0], outs[0], outs[1], -1, is_ascend),
out_buffers=[values_buf, values_swap_buf],
name="topk_gpu",
tag="topk_gpu",
)[0]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output = transpose(output, axes)
else:
output = te.extern(
[data.shape, data.shape, data.shape, data.shape],
[data],
lambda ins, outs: sort_ir(
ins[0],
outs[0],
outs[2],
-1,
is_ascend,
indices_out=outs[1],
indices_out_swap=outs[3],
),
out_buffers=[values_buf, indices_buf, values_swap_buf, indices_swap_buf],
name="topk_gpu",
tag="topk_gpu",
)[0:2]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
output[0] = transpose(output[0], axes)
output[1] = transpose(output[1], axes)
if isinstance(k, int) and k < 1:
if ret_type == "indices":
return output[1]
return output
beg = [0] * ndim
end = []
strides = [1] * ndim
for i in range(ndim):
if i == axis:
end.append(k if isinstance(k, int) else tvm.te.size_var("dim"))
else:
end.append(dshape[i])
if ret_type == "both":
values_out, indices_out = output
values_out = strided_slice(values_out, beg, end, strides)
indices_out = strided_slice(indices_out, beg, end, strides)
output = [values_out, indices_out]
elif ret_type == "values":
output = [strided_slice(output, beg, end, strides)]
else: # ret_type == "indices"
indices_out = output[1]
output = [strided_slice(indices_out, beg, end, strides)]
return output
def topk_thrust(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
ndim = len(data.shape)
axis = ndim + axis if axis < 0 else axis
if axis != ndim - 1:
# Prepare for sorting along axis -1.
axes = swap(list(range(ndim)), axis)
data = transpose(data, axes)
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(data.shape, data.dtype, "value_buf", data_alignment=8),
tvm.tir.decl_buffer(data.shape, dtype, "indices_buf", data_alignment=8),
]
is_ascend = 1 if is_ascend else 0
out = te.extern(
[data.shape, data.shape],
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.sort", ins[0], outs[0], outs[1], is_ascend
),
in_buffers=[data_buf],
out_buffers=out_bufs,
name="topk_gpu",
tag="topk_gpu",
)
if isinstance(k, tvm.tir.IntImm):
k = k.value
if not isinstance(k, int) or k > 0:
beg = [0] * ndim
end = data.shape[:-1] + [k if isinstance(k, int) else tvm.te.size_var("dim")]
strides = [1] * ndim
out = [strided_slice(o, beg, end, strides) for o in out]
if axis != ndim - 1:
axes = swap(list(range(ndim)), axis)
out = [transpose(o, axes) for o in out]
if ret_type == "values":
out = out[0]
elif ret_type == "indices":
out = out[1]
return out
def schedule_topk(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argsort
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _schedule_sort(outs)
def sort_by_key(keys, values, axis=-1, is_ascend=1):
"""Sort values with respect to keys. Both keys and values will
be sorted and returned.
Parameters
----------
keys: tvm.te.Tensor
The input keys.
values : tvm.te.Tensor,
The input values.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
keys_sorted : tvm.te.Tensor
The sorted keys
values_sorted : tvm.te.Tensor
The values sorted with respect to the keys
"""
keys_buf = tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8)
values_buf = tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8),
tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8),
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_swap_buf", data_alignment=8),
tvm.tir.decl_buffer(values.shape, values.dtype, "values_swap_buf", data_alignment=8),
]
out = te.extern(
[keys.shape, values.shape, keys.shape, values.shape],
[keys, values],
lambda ins, outs: sort_by_key_ir(
ins[0], ins[1], outs[0], outs[1], outs[2], outs[3], axis, is_ascend
),
in_buffers=[keys_buf, values_buf],
out_buffers=out_bufs,
dtype=[keys.dtype, values.dtype],
name="sort_by_key",
tag="sort_by_key",
)
return out[0], out[1]
def stable_sort_by_key_thrust(keys, values, for_scatter=False):
"""Sort values with respect to keys using thrust.
Both keys and values will be sorted and returned.
Sorting is done via stable sort, so relative ordering among
ties are preserved.
Parameters
----------
keys: tvm.te.Tensor
The 1D input keys.
values : tvm.te.Tensor,
The 1D input values.
for_scatter: bool, optional
If True, negative keys are interpreted as negative indices.
Before sorting, negative indices are converted to corresponding positive indices.
The output keys (indices) are all positive.
This option is introduced to optimize the scatter implementation.
Returns
-------
keys_sorted : tvm.te.Tensor
The sorted keys
values_sorted : tvm.te.Tensor
The values sorted with respect to the keys
"""
keys_buf = tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8)
values_buf = tvm.tir.decl_buffer(values.shape, values.dtype, "values_buf", data_alignment=8)
out_bufs = [
tvm.tir.decl_buffer(keys.shape, keys.dtype, "keys_buf", data_alignment=8),
tvm.tir.decl_buffer(keys.shape, values.dtype, "values_buf", data_alignment=8),
]
out = te.extern(
[keys.shape, values.shape],
[keys, values],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.thrust.stable_sort_by_key", ins[0], ins[1], outs[0], outs[1], for_scatter
),
in_buffers=[keys_buf, values_buf],
out_buffers=out_bufs,
dtype=[keys.dtype, values.dtype],
name="stable_sort_by_key",
tag="stable_sort_by_key",
)
return out[0], out[1]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sparse operators"""
import numpy as np
import scipy.sparse as sp
import tvm
from tvm import relay, te
from .. import nn
from ..utils import traverse_inline, get_const_tuple, prod, get_const_int, ceil_div
from .transform import schedule_transpose_from_existing
def sparse_dense(data, weight_data, weight_indices, weight_indptr, sparse_lhs=False):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# pylint:disable=unused-argument
return nn.sparse_dense(data, weight_data, weight_indices, weight_indptr, sparse_lhs)
def schedule_sparse_dense(outs):
"""Create schedule for sparse dense"""
# pylint:disable=invalid-name
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "sparse_dense_sp_rhs_bsrmm" or op.tag == "sparse_dense_sp_lhs_bsrmm":
y_bsrmm = op.input_tensors[0]
assert (
y_bsrmm.op.tag == "sparse_dense_sp_rhs_bsrmm_block"
or y_bsrmm.op.tag == "sparse_dense_sp_lhs_bsrmm_block"
)
out = s.outputs[0].output(0)
if op not in s.outputs:
y_reshape = op.output(0)
s[y_reshape].compute_at(s[out], s[out].op.axis[1])
(_, c) = s[y_bsrmm].op.reduce_axis
(m_o, n_o) = s[out].op.axis
s[out].bind(m_o, te.thread_axis("blockIdx.x"))
s[out].bind(n_o, te.thread_axis("blockIdx.y"))
s[y_bsrmm].compute_at(s[out], n_o)
thread_x = te.thread_axis("threadIdx.x")
y_bsrmm_factored = s.rfactor(y_bsrmm, c)
tx = s[y_bsrmm].op.reduce_axis[0]
s[y_bsrmm].bind(tx, thread_x)
s[y_bsrmm_factored].compute_at(s[y_bsrmm], tx)
s[y_bsrmm].set_store_predicate(thread_x.var.equal(0))
s[out].set_store_predicate(thread_x.var.equal(0))
elif op.tag == "sparse_dense_sp_lhs_csrmm" or op.tag == "sparse_dense_sp_rhs_csrmm":
out = op.output(0)
const_size = get_const_int(prod(out.shape))
fused = s[out].fuse(*s[out].op.axis)
bx, tx = s[out].split(fused, factor=const_size)
s[out].bind(tx, te.thread_axis("threadIdx.x"))
s[out].bind(bx, te.thread_axis("blockIdx.x"))
traverse_inline(s, outs[0].op, _callback)
return s
def sparse_dense_tir(data, w_data, w_indices, w_indptr):
"""Compute data * w^T.
Actually computes (w * data^T) ^ T as data needs to be in column-major
format for performance reasons.
Good resources:
Yang, Carl, Aydın Buluç, and John D. Owens. "Design principles for sparse
matrix multiplication on the GPU." European Conference on Parallel
Processing. Springer, Cham, 2018. <- This code is basically row-split from here.
Gale, Trevor, et al. "Sparse GPU Kernels for Deep Learning." arXiv preprint
arXiv:2006.10901 (2020).
Profile with
`/opt/nvidia/nsight-compute/2020.1.2/ncu -k default_function_kernel1
--section '.*' -s 1 -c 1 venv/bin/python3 test_topi_sparse.py manual`
with either default_function_kernel0 for the transpose or
default_function_kernel1 for the multiply.
"""
def gen_ir(data, w_data, w_indices, w_indptr, out):
# pylint: disable=invalid-name, simplifiable-if-statement
# TODO(tkonolige): use tensorcores for block multiply
# TODO(tkonolige): use vectorize on loads
# TODO(tkonolige): separate implementation if M is small
# TODO(tkonolige): separate implementation for large block sizes
ib = tvm.tir.ir_builder.create()
if tvm.target.Target.current(allow_none=False).kind.name == "cuda":
use_warp_storage = True
else:
# TVMs warp shuffle intrinsics are slow on ROCM because they use
# LDS (shared memory) to do the shuffling. Instead, we could use
# ROCM's support for accessing neighboring threads memory, but we
# those intrinsics aren't accessible from TVM. For now, we just use
# shared memory. We also default to shared memory on platforms
# where we do not know how warp storage performs.
use_warp_storage = False
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
m = data.shape[1]
nb = w_indptr.shape[0] - 1
# treat csr like block size 1 bsr
if len(w_data.shape) == 1:
bs_n = 1
bs_k = 1
else:
bs_n = w_data.shape[1]
bs_k = w_data.shape[2]
bs_m = bs_n
mb = m // bs_m
mi = warp_size
assert (
mb >= mi
), "Number of block rows in dense matrix must be larger than warp size: {} vs {}.".format(
warp_size, mb
)
mo = ceil_div(mb, mi)
ni = 1 # TODO(tkonolige): how do I compute the number of warps per block?
no = ceil_div(nb, ni)
rowlength_bi = warp_size
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", mo)
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", no)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
warp = te.thread_axis("threadIdx.y")
ib.scope_attr(warp, "thread_extent", ni)
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
w_data_ptr = ib.buffer_ptr(w_data)
w_indices_ptr = ib.buffer_ptr(w_indices)
w_indptr_ptr = ib.buffer_ptr(w_indptr)
n_index = by * ni + warp
m_index = bx * mi + tx
row_start = w_indptr_ptr[n_index]
# Guaranteed to be evenly divisible
rowlength_bo = ceil_div(w_indptr_ptr[n_index + 1] - row_start, rowlength_bi)
# thread local storage for bs_m x bs_n block
block = ib.allocate(data.dtype, (bs_m, bs_n), name="block", scope="local")
data_cache = ib.allocate(data.dtype, (mi, bs_m, bs_k), name="data_cache", scope="local")
if use_warp_storage:
indices = ib.allocate(w_indices.dtype, (rowlength_bi,), name="indices", scope="warp")
w_data_cache = ib.allocate(
w_data.dtype, (rowlength_bi, bs_n, bs_k), name="w_data_cache", scope="warp"
)
else:
indices = ib.allocate(
w_indices.dtype, (ni, rowlength_bi), name="indices", scope="shared"
)
w_data_cache = ib.allocate(
w_data.dtype, (ni, rowlength_bi, bs_n, bs_k), name="w_data_cache", scope="shared"
)
# zero block
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
block[x, y] = 0.0
# compute into thread local storage using warp_size chunks
with ib.for_range(0, rowlength_bo, name="bb") as bb:
elem_idx = bb * rowlength_bi + tx
# Cache indices. Guaranteed to be multiple of warp_size.
if use_warp_storage:
indices[tx] = w_indices_ptr[row_start + elem_idx]
else:
indices[warp, tx] = w_indices_ptr[row_start + elem_idx]
# cache dense matrix
# each thread has a row
# TODO: ideally we could vectorize this
with ib.for_range(0, rowlength_bi, name="bi") as bi:
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
# This memory acces should be out of bounds when
# m_index >= mb (which occurs when the dense matrix
# rows % 32 != 0), but it seems to work just fine...
if use_warp_storage:
ind = indices[bi]
else:
ind = indices[warp, bi]
data_cache[bi, x, z] = data_ptr[ind * bs_k + z, m_index * bs_m + x]
# cache w_data
elem_idx = bb * rowlength_bi + tx
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
data_indices = [row_start + elem_idx] + (
[y, z] if len(w_data.shape) > 1 else []
)
cache_indices = [tx, y, z] if use_warp_storage else [warp, tx, y, z]
w_data_cache[cache_indices] = w_data_ptr[data_indices]
with ib.for_range(0, mi, name="i") as i:
# thread local block matmul
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.for_range(0, bs_k, name="z", kind="unroll") as z:
if use_warp_storage:
w = w_data_cache[i, y, z]
else:
w = w_data_cache[warp, i, y, z]
block[x, y] += data_cache[i, x, z] * w
# store results
with ib.for_range(0, bs_m, name="x", kind="unroll") as x:
with ib.for_range(0, bs_n, name="y", kind="unroll") as y:
with ib.if_scope(m_index < mb):
with ib.if_scope(n_index < nb):
# It doesn't seem like we would be getting coelesced
# writes here, but it doesn't seem to matter
out_ptr[m_index * bs_m + x, n_index * bs_n + y] = block[x, y]
return ib.get()
data_t = tvm.topi.transpose(data)
# handle csr
if len(w_data.shape) == 1:
blocksize = 1
else:
blocksize = w_data.shape[1]
out_shape = (data_t.shape[1], (w_indptr.shape[0] - 1) * blocksize)
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data_t, w_data, w_indices, w_indptr, data],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="sparse_dense_gpu",
tag="sparse_dense_gpu",
)
return out
def is_valid_for_sparse_dense_padded(data, weight_data):
"""
Check whether input is applicable for sparse_dense_padded op.
If not we should fall back to default scheduling.
"""
# pylint:disable=invalid-name
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
# If there are multiple alter_ops in a model, the first alteration does not
# run type inference for the subsequent ones. In this case, we don't have
# the shape information, so we run the inferencer manually.
try:
m = get_const_tuple(data.checked_type.shape)[1]
except ValueError:
data_infered = relay.transform.InferType()(tvm.IRModule.from_expr(data))["main"]
m = get_const_tuple(data_infered.ret_type.shape)[1]
if len(weight_data.shape) == 1:
bs_m = 1
else:
bs_m = weight_data.shape[1]
mb = m // bs_m
if mb >= warp_size:
return True
return False
def sparse_dense_padded(data, weight_data, weight_indices, weight_indptr, sparse_lhs=False):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
This variation uses a padded matrix where all row lengths are a multiple of the warp size.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# TODO(ANSHUMAN87): Handle for sparse_lhs case too
assert not sparse_lhs, "Currently only sparse weight is supported."
return sparse_dense_tir(data, weight_data, weight_indices, weight_indptr)
def schedule_sparse_dense_padded(outs):
"""Create schedule for sparse dense"""
# XXX: this will fail if we don't include the data_t Tensor in the schedule
# ops. Maybe create_schedule should do some analysis so this isn't
# necessary
data_t = outs[0].op.input_tensors[0]
s = te.create_schedule([outs[0].op, data_t.op])
schedule_transpose_from_existing(s, outs[0].op.input_tensors[0])
return s
def pad_sparse_matrix(matrix, blocksize):
"""Pad rows of sparse matrix matrix so that they are a multiple of blocksize."""
assert isinstance(matrix, sp.bsr_matrix)
new_entries = np.zeros(matrix.shape[0], dtype=matrix.indptr.dtype)
bsr = matrix.blocksize[0]
for i in range(matrix.shape[0] // bsr):
row_length = matrix.indptr[i + 1] - matrix.indptr[i]
if row_length % blocksize != 0:
new_entries[i] = blocksize - (row_length % blocksize)
additional = np.sum(new_entries)
indices = np.zeros(matrix.indices.shape[0] + additional, dtype=matrix.indices.dtype)
data = np.zeros(
(matrix.data.shape[0] + additional, matrix.data.shape[1], matrix.data.shape[2]),
dtype=matrix.data.dtype,
)
n = matrix.shape[0] // bsr
indptr = np.zeros(n + 1, dtype=matrix.indptr.dtype)
indptr[: matrix.indptr.shape[0]] = matrix.indptr
for i in range(matrix.shape[0] // bsr):
indptr[i + 1] = indptr[i] + new_entries[i] + (matrix.indptr[i + 1] - matrix.indptr[i])
indices[indptr[i] : indptr[i + 1] - new_entries[i]] = matrix.indices[
matrix.indptr[i] : matrix.indptr[i + 1]
]
data[indptr[i] : indptr[i + 1] - new_entries[i], :, :] = matrix.data[
matrix.indptr[i] : matrix.indptr[i + 1], :, :
]
return sp.bsr_matrix((data, indices, indptr), matrix.shape)
@nn.sparse_dense_alter_layout.register(["cuda", "gpu", "rocm"])
def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
"""With cuda, we modify use alter_op_layout to swap the default
sparse_dense implementation for one that operates on a padded matrix. We
also pad the matrix.
"""
# TODO(ANSHUMAN87): Handle for sparse_lhs case too
if (
isinstance(inputs[1], relay.Constant)
and isinstance(inputs[2], relay.Constant)
and isinstance(inputs[3], relay.Constant)
and is_valid_for_sparse_dense_padded(inputs[0], inputs[1].data.numpy())
):
if len(inputs[1].data.numpy().shape) == 1:
sparse_matrix = sp.csr_matrix(
(inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy())
).tobsr()
else:
sparse_matrix = sp.bsr_matrix(
(inputs[1].data.numpy(), inputs[2].data.numpy(), inputs[3].data.numpy())
)
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
return relay.nn._make.sparse_dense_padded(
inputs[0],
relay.Constant(tvm.nd.array(sparse_matrix.data)),
relay.Constant(tvm.nd.array(sparse_matrix.indices)),
relay.Constant(tvm.nd.array(sparse_matrix.indptr)),
)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/sparse_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Sparse_Reshape operator"""
import tvm
from tvm import te
from ...tir import decl_buffer, ir_builder, Cast
from ...te import extern, div, floordiv, floormod
from ..utils import ceil_div
def sparse_reshape(
sparse_indices,
prev_shape,
new_shape,
new_sparse_indices_shape,
new_shape_shape,
):
"""
Reshape a Sparse Tensor
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(sparse_indices,
prev_shape,
new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
def gen_ir(
sparse_indices_ptr,
prev_shape_ptr,
new_shape_ptr,
new_sparse_indices_ptr,
out_new_shape_ptr,
):
ib = ir_builder.create()
sparse_indices = ib.buffer_ptr(sparse_indices_ptr)
prev_shape = ib.buffer_ptr(prev_shape_ptr)
new_shape = ib.buffer_ptr(new_shape_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
new_sparse_indices = ib.buffer_ptr(new_sparse_indices_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
prev_shape_size = prev_shape_ptr.shape[0]
new_shape_size = new_shape_ptr.shape[0]
multipliers = ib.allocate(
new_shape_ptr.dtype, (prev_shape_size,), name="multipliers", scope="global"
)
dividers = ib.allocate(
new_shape_ptr.dtype, (new_shape_size,), name="dividers", scope="global"
)
flattened_indices = ib.allocate(
new_shape_ptr.dtype,
(sparse_indices_ptr.shape[0],),
name="flattened_indices",
scope="global",
)
total_ele = ib.allocate(new_shape_ptr.dtype, (1,), name="total_ele", scope="global")
division_total_ele = ib.allocate(
new_shape_ptr.dtype, (1,), name="division_total_ele", scope="global"
)
equal_shape = ib.allocate("bool", (1,), name="equal_shape", scope="global")
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
with ib.new_scope():
# The computation in this block is very very miniscule since we are just iterating over
# shape tensors which are very small (< 10) and there is no need of parallelization
nthread_tx = 1
nthread_bx = 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
total_ele[0] = prev_shape[0]
# Cumulative Reverse Exclusive Multiply
multipliers[prev_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, prev_shape_size - 1) as i_:
i = i_ + 1
multipliers[prev_shape_size - 1 - i] = (
prev_shape[prev_shape_size - i] * multipliers[prev_shape_size - i]
)
total_ele[0] *= prev_shape[prev_shape_size - i]
division_total_ele[0] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] != -1):
division_total_ele[0] *= new_shape[i]
# Compute true output shape (replace negative ones)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] == -1):
out_new_shape[i] = Cast(
new_shape_ptr.dtype, div(total_ele[0], division_total_ele[0])
)
with ib.else_scope():
out_new_shape[i] = new_shape[i]
# Check if prev_shape and new_shape are equal
equal_shape[0] = True
with ib.if_scope(prev_shape_size == new_shape_size):
with ib.for_range(0, prev_shape_size) as i:
with ib.if_scope(prev_shape[i] != out_new_shape[i]):
equal_shape[0] = False
with ib.else_scope():
equal_shape[0] = False
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(sparse_indices_ptr.shape[0], max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
row_number = bx * max_threads + tx
# Return same inputs if shapes are equal
with ib.if_scope(equal_shape[0]):
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[row_number, j] = sparse_indices[row_number, j]
# Else compute new_sparse_indices
with ib.else_scope():
dividers[new_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size - 1) as i_:
i = i_ + 1
dividers[new_shape_size - 1 - i] = (
dividers[new_shape_size - i] * out_new_shape[new_shape_size - i]
)
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
flattened_indices[row_number] = Cast(new_shape_ptr.dtype, 0)
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
flattened_indices[row_number] += (
sparse_indices[row_number, j] * multipliers[j]
)
with ib.if_scope(row_number < sparse_indices_ptr.shape[0]):
current_element = ib.allocate(
new_shape_ptr.dtype, (1,), name="current_element", scope="local"
)
current_element[0] = flattened_indices[row_number]
with ib.for_range(0, new_sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[row_number, j] = Cast(
sparse_indices_ptr.dtype, floordiv(current_element[0], dividers[j])
)
current_element[0] = floormod(current_element[0], dividers[j])
return ib.get()
new_sparse_indices_buf = decl_buffer(
new_sparse_indices_shape, sparse_indices.dtype, "new_sparse_indices_buf"
)
new_shape_buf = decl_buffer(new_shape_shape, prev_shape.dtype, "new_shape_buf")
return extern(
[new_sparse_indices_shape, new_shape_shape],
[sparse_indices, prev_shape, new_shape],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
out_buffers=[new_sparse_indices_buf, new_shape_buf],
name="sparse_reshape_cuda",
tag="sparse_reshape_cuda",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/ssd/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from .multibox import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/ssd/multibox.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, too-many-function-args
"""SSD multibox operators"""
import math
import tvm
from tvm import te
from tvm.tir import if_then_else, exp
from tvm import topi
from ..nms import non_max_suppression
def multibox_prior_ir(data, out, sizes, ratios, steps, offsets):
"""Low level IR routing for multibox_prior operator.
Parameters
----------
data : Buffer
Input data buffer.
out : Buffer
Output buffer.
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
Returns
-------
stmt : Stmt
The result IR statement.
"""
max_threads = int(math.sqrt(tvm.target.Target.current(allow_none=False).max_num_threads))
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib = tvm.tir.ir_builder.create()
p_out = ib.buffer_ptr(out)
in_height = data.shape[2]
in_width = data.shape[3]
nthread_tx = max_threads
nthread_bx = in_height // max_threads + 1
nthread_ty = max_threads
nthread_by = in_width // max_threads + 1
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(ty, "thread_extent", nthread_ty)
ib.scope_attr(bx, "thread_extent", nthread_bx)
ib.scope_attr(by, "thread_extent", nthread_by)
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
i = bx * max_threads + tx
j = by * max_threads + ty
with ib.if_scope((i < in_height)):
with ib.if_scope((j < in_width)):
center_h = (i + offset_h) * steps_h
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = if_then_else(
k < num_sizes,
float(size_ratio_concat[k]) * in_height / in_width / 2.0,
float(size_ratio_concat[0])
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0,
)
h = if_then_else(
k < num_sizes,
size_ratio_concat[k] / 2.0,
size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0,
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
) * 4
p_out[count] = center_w - w
p_out[count + 1] = center_h - h
p_out[count + 2] = center_w + w
p_out[count + 3] = center_h + h
body = ib.get()
return body
def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, c_in, h_in, w_in]]
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
clip : boolean
Whether to clip out-of-boundary boxes.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
num_sizes = len(sizes)
num_ratios = len(ratios)
oshape = (1, data.shape[2] * data.shape[3] * (num_sizes + num_ratios - 1), 4)
out = te.extern(
oshape,
[data],
lambda ins, outs: multibox_prior_ir(ins[0], outs[0], sizes, ratios, steps, offsets),
tag="multibox_prior",
)
if clip:
out = topi.clip(out, 0, 1)
return out
def transform_loc_pre(cls_prob, valid_count, temp_valid_count, temp_cls_id, temp_score, threshold):
"""Low level IR routing for transform location data preparation.
Parameters
----------
cls_prob : Buffer
Buffer of class probabilities.
valid_count : Buffer
Buffer of number of valid output boxes.
temp_valid_count : Buffer
Output intermediate result buffer
temp_cls_id : Buffer
Output intermediate result buffer
temp_score : Buffer
Output buffer
threshold : float
Threshold to be a positive prediction.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch_size = cls_prob.shape[0]
num_classes = cls_prob.shape[1]
num_anchors = cls_prob.shape[2]
ib = tvm.tir.ir_builder.create()
cls_prob = ib.buffer_ptr(cls_prob)
cls_id = ib.buffer_ptr(temp_cls_id)
valid_count = ib.buffer_ptr(valid_count)
temp_valid_count = ib.buffer_ptr(temp_valid_count)
score = ib.buffer_ptr(temp_score)
threshold = tvm.tir.FloatImm("float32", threshold)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch_size * num_anchors) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
with ib.if_scope(tid < batch_size * num_anchors):
i = idxd(tid, num_anchors)
j = idxm(tid, num_anchors)
valid_count[i] = 0
score[tid] = -1.0
cls_id[tid] = 0
with ib.for_range(0, num_classes - 1) as k:
temp = cls_prob[i * num_classes * num_anchors + (k + 1) * num_anchors + j]
cls_id[tid] = if_then_else(temp > score[tid], k + 1, cls_id[tid])
score[tid] = tvm.te.max(temp, score[tid])
with ib.if_scope(tvm.tir.all(cls_id[tid] > 0, score[tid] < threshold)):
cls_id[tid] = 0
with ib.if_scope(cls_id[tid] > 0):
temp_valid_count[tid] = 1
with ib.else_scope():
temp_valid_count[tid] = 0
with ib.if_scope(tid < batch_size):
with ib.for_range(0, num_anchors) as k:
with ib.if_scope(k > 0):
temp_valid_count[tid * num_anchors + k] += temp_valid_count[
tid * num_anchors + k - 1
]
valid_count[i] = temp_valid_count[tid * num_anchors + num_anchors - 1]
return ib.get()
def transform_loc_ir(
loc_pred,
anchor,
temp_valid_count,
temp_cls_id,
temp_score,
out,
clip,
variances,
batch_size,
num_anchors,
):
"""Low level IR routing for transform location in multibox_detection operator.
Parameters
----------
loc_pred : Buffer
Buffer of location regression predictions.
anchor : Buffer
Buffer of prior anchor boxes.
temp_valid_count : Buffer
Intermediate result buffer.
temp_cls_id : Buffer
Intermediate result buffer.
temp_score : Buffer
Input buffer which stores intermediate results.
out : Buffer
Output buffer.
clip : boolean
Whether to clip out-of-boundary boxes.
variances : tuple of float
Variances to be decoded from box regression output.
batch_size : int
Batch size
num_anchors : int
Number of anchors
Returns
-------
stmt : Stmt
The result IR statement.
"""
def transform_loc(loc, loc_base_idx, anchor, anchor_base_idx, clip, vx, vy, vw, vh):
"""Transform prior anchor box to output box through location predictions."""
al = anchor[anchor_base_idx]
at = anchor[anchor_base_idx + 1]
ar = anchor[anchor_base_idx + 2]
ab = anchor[anchor_base_idx + 3]
aw = ar - al
ah = ab - at
ax = (al + ar) / 2.0
ay = (at + ab) / 2.0
px = loc[loc_base_idx]
py = loc[loc_base_idx + 1]
pw = loc[loc_base_idx + 2]
ph = loc[loc_base_idx + 3]
ox = px * vx * aw + ax
oy = py * vy * ah + ay
ow = exp(pw * vw) * aw / 2.0
oh = exp(ph * vh) * ah / 2.0
return (
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, ox - ow)), ox - ow),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, oy - oh)), oy - oh),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, ox + ow)), ox + ow),
tvm.tir.if_then_else(clip, tvm.te.max(0.0, tvm.te.min(1.0, oy + oh)), oy + oh),
)
ib = tvm.tir.ir_builder.create()
loc_pred = ib.buffer_ptr(loc_pred)
anchor = ib.buffer_ptr(anchor)
temp_valid_count = ib.buffer_ptr(temp_valid_count)
cls_id = ib.buffer_ptr(temp_cls_id)
score = ib.buffer_ptr(temp_score)
out_loc = ib.buffer_ptr(out)
max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = (batch_size * num_anchors) // max_threads + 1
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
with ib.if_scope(tid < batch_size * num_anchors):
i = idxd(tid, num_anchors)
j = idxm(tid, num_anchors)
with ib.if_scope(cls_id[tid] > 0):
with ib.if_scope(tid == 0):
out_base_idx = i * num_anchors * 6
out_loc[out_base_idx] = cls_id[tid] - 1.0
out_loc[out_base_idx + 1] = score[tid]
(
out_loc[out_base_idx + 2],
out_loc[out_base_idx + 3],
out_loc[out_base_idx + 4],
out_loc[out_base_idx + 5],
) = transform_loc(
loc_pred,
tid * 4,
anchor,
j * 4,
clip,
variances[0],
variances[1],
variances[2],
variances[3],
)
with ib.else_scope():
out_base_idx = i * num_anchors * 6 + temp_valid_count[tid - 1] * 6
out_loc[out_base_idx] = cls_id[tid] - 1.0
out_loc[out_base_idx + 1] = score[tid]
(
out_loc[out_base_idx + 2],
out_loc[out_base_idx + 3],
out_loc[out_base_idx + 4],
out_loc[out_base_idx + 5],
) = transform_loc(
loc_pred,
tid * 4,
anchor,
j * 4,
clip,
variances[0],
variances[1],
variances[2],
variances[3],
)
return ib.get()
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
Returns
-------
ret : tuple of tvm.te.Tensor composed of
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
valid_count : tvm.te.Tensor
1-D tensor with shape (batch_size,), number of valid anchor boxes.
"""
batch_size = cls_prob.shape[0]
num_anchors = cls_prob.shape[2]
oshape = (batch_size, num_anchors, 6)
# Define data alignment for intermediate buffer
valid_count_dtype = "int32"
out_loc_dtype = loc_pred.dtype
valid_count_buf = tvm.tir.decl_buffer(
(batch_size,), valid_count_dtype, "valid_count_buf", data_alignment=4
)
loc_pred_buf = tvm.tir.decl_buffer(
loc_pred.shape, loc_pred.dtype, "loc_pred_buf", data_alignment=8
)
anchor_buf = tvm.tir.decl_buffer(anchor.shape, anchor.dtype, "anchor_buf", data_alignment=8)
temp_valid_count_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
valid_count_dtype,
"temp_valid_count",
data_alignment=8,
)
temp_cls_id_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
valid_count_dtype,
"temp_cls_id",
data_alignment=8,
)
temp_score_buf = tvm.tir.decl_buffer(
(
batch_size,
num_anchors,
),
cls_prob.dtype,
"temp_score",
data_alignment=8,
)
valid_count, temp_valid_count, temp_cls_id, temp_score = te.extern(
[
(batch_size,),
(
batch_size,
num_anchors,
),
(
batch_size,
num_anchors,
),
(
batch_size,
num_anchors,
),
],
[cls_prob],
lambda ins, outs: transform_loc_pre(ins[0], outs[0], outs[1], outs[2], outs[3], threshold),
dtype=[valid_count_dtype, valid_count_dtype, valid_count_dtype, cls_prob.dtype],
out_buffers=[valid_count_buf, temp_valid_count_buf, temp_cls_id_buf, temp_score_buf],
tag="multibox_transform_loc_phase_one",
)
out_loc = te.extern(
[oshape],
[loc_pred, anchor, temp_valid_count, temp_cls_id, temp_score],
lambda ins, outs: transform_loc_ir(
ins[0],
ins[1],
ins[2],
ins[3],
ins[4],
outs[0],
clip,
variances,
batch_size,
num_anchors,
),
in_buffers=[
loc_pred_buf,
anchor_buf,
temp_valid_count_buf,
temp_cls_id_buf,
temp_score_buf,
],
dtype=[out_loc_dtype],
tag="multibox_transform_loc",
)
return [out_loc, valid_count]
def multibox_detection(
cls_prob,
loc_pred,
anchor,
clip=True,
threshold=0.01,
nms_threshold=0.5,
force_suppress=False,
variances=(0.1, 0.1, 0.2, 0.2),
nms_topk=-1,
):
"""Convert multibox detection predictions.
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
nms_threshold : float
Non-maximum suppression threshold.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
nms_topk : int
Keep maximum top k detections before nms, -1 for no limit.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
"""
inter_out = multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances)
out = non_max_suppression(
inter_out[0],
inter_out[1],
inter_out[1],
max_output_size=-1,
iou_threshold=nms_threshold,
force_suppress=force_suppress,
top_k=nms_topk,
return_indices=False,
)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/stft.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
import tvm
from tvm import te, tir
from ..utils import ceil_div
def _get_max_threads(batch_row):
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
return tir.min(batch_row, max_threads)
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
max_threads = _get_max_threads(output_ptr.shape[0] * output_ptr.shape[1])
output_size = output_ptr.shape[0] * output_ptr.shape[1] * output_ptr.shape[2]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(output_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < output_size):
matrix_size = output_ptr.shape[1] * output_ptr.shape[2]
batch = tir.floordiv(tid, matrix_size)
row = tir.floordiv(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
col = tir.indexmod(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0]
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cuda",
tag="stft_cuda",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unnecessary-lambda, too-many-arguments
"""Tensor intrinsics on CUDA."""
import tvm
from tvm import te
from ..utils import is_target
def dp4a(x_scope="local", y_scope="local", z_scope="local", dtypes=("int8", "int8")):
"""
Int8 dot product reduced by every 4 elements using __dp4a
Parameters
----------
x_scope : str, optional
The storage scope of buffer for lhs
y_scope : str, optional
The storage scope of buffer for rhs
z_scope : str, optional
The storage scope of buffer for result
dtypes: tuple of strs, optional
The dtype of x and y
Returns
-------
intrin : TensorIntrin
The dp4a TensorIntrin that can be used in tensorizing schedule.
"""
n = 4 # dp4a requires operands packed by 4
result_dtype = "int32" if dtypes[1] == "int8" else "uint32"
x = te.placeholder((n,), name="x", dtype=dtypes[0])
y = te.placeholder((n,), name="y", dtype=dtypes[1])
k = te.reduce_axis((0, n), name="rc")
z = te.compute(
(1,), lambda i: te.sum(x[k].astype(result_dtype) * y[k].astype(result_dtype), axis=[k])
)
def _intrin_func(ins, outs):
def _instr(index):
xx, yy = ins
zz = outs[0]
zz_dtype = zz.dtype
if index == 1:
return zz.vstore(0, tvm.tir.const(0, zz_dtype))
ib = tvm.tir.ir_builder.create()
vec_x_dtype = "int8x4" if xx.dtype == "int8" else "uint8x4"
vec_y_dtype = "int8x4" if yy.dtype == "int8" else "uint8x4"
vec_x = xx.vload(0, dtype=vec_x_dtype)
vec_y = yy.vload(0, dtype=vec_y_dtype)
prev_z = 0 if index == 0 else zz.vload(0)
if is_target("rocm"):
# TODO(masahi): Here we are assuming that we are compiling for gfx10 or later
# We can refine the specification for dot product on rocm if needed later.
# We can just use "llvm.amdgcn.udot4" for u8u8u32, but it is not tested.
assert (
dtypes[0] == "int8" and dtypes[0] == "int8"
), "u8u8u32 dot product for rocm not supported yet"
new_z = tvm.tir.call_llvm_pure_intrin(
zz_dtype,
"llvm.amdgcn.sdot4",
tvm.tir.const(4, "uint32"),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_x),
tvm.tir.call_intrin("int32", "tir.reinterpret", vec_y),
prev_z,
True,
)
else:
new_z = tvm.tir.call_pure_extern(zz_dtype, "__dp4a", vec_x, vec_y, prev_z)
ib.emit(zz.vstore(0, new_z))
return ib.get()
return _instr(0), _instr(1), _instr(2) # body, reset, update
default_buffer_params = {"data_alignment": 4, "offset_factor": 1}
scopes = {x: x_scope, y: y_scope, z: z_scope}
binds = {
t: tvm.tir.decl_buffer(
t.shape, t.dtype, t.op.name, scope=scopes[t], **default_buffer_params
)
for t in [x, y, z]
}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds=binds, default_buffer_params=default_buffer_params
)
def intrin_wmma_load_matrix_A(strides_dst, strides_from, shape, layout, A_shape, C_shape, in_dtype):
"""Intrin function for loading data from shared memory to wmma.matrix_a"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=in_dtype)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", strides=strides_from, data_alignment=32, offset_factor=8
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
scope="wmma.matrix_a",
strides=strides_dst,
data_alignment=32,
offset_factor=8,
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_m * wmma_k
warp_index = BC.elem_offset // row + BC.elem_offset % row // wmma_k
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BA.access_ptr("r"),
strides_from[0],
layout,
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_load_matrix_W(strides_dst, strides_from, shape, layout, A_shape, C_shape, in_dtype):
"""Intrin function for loading data from shared memory to wmma.matrix_b"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=in_dtype)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", strides=strides_from, data_alignment=32, offset_factor=8
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
scope="wmma.matrix_b",
strides=strides_dst,
data_alignment=32,
offset_factor=8,
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_n * wmma_k
warp_index = BC.elem_offset // row + BC.elem_offset % row // wmma_n
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BA.access_ptr("r"),
strides_from[0],
layout,
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_store_matrix(strides_dst, strides_from, shape, out_dtype, A_shape, C_shape):
"""Intrin function for storing the results from wmma.accumulator to shared"""
wmma_m, wmma_n, wmma_k = shape
A = te.placeholder(A_shape, name="A", dtype=out_dtype)
BA = tvm.tir.decl_buffer(
A.shape,
A.dtype,
scope="wmma.accumulator",
strides=strides_from,
data_alignment=32,
offset_factor=8,
)
C = te.compute(C_shape, lambda *i: A(*i), name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope="shared", strides=strides_dst, data_alignment=32, offset_factor=8
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
row = wmma_m * wmma_n
warp_index = BA.elem_offset // row + BA.elem_offset % row // wmma_n
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
wmma_m,
wmma_n,
wmma_k,
warp_index,
BC.access_ptr("w"),
strides_dst[0],
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, strides_A, strides_W, strides_Conv, shape):
"""Intrin for wmma fill_fragment and mma_sync
Parameters
----------
AL_gemm : tvm.te.placeholder
wmma matrix A
WL_gemm : tvm.te.placeholder
wmma matrix B
CL_compute : tvm.te.compute
The definition of wmma gemm
"""
wmma_m, wmma_n, wmma_k = shape
A = AL_gemm
B = WL_gemm
C = CL_compute
BA = tvm.tir.decl_buffer(
A.shape,
A.dtype,
name="BA",
scope="wmma.matrix_a",
data_alignment=32,
offset_factor=8,
strides=strides_A,
)
BB = tvm.tir.decl_buffer(
B.shape,
B.dtype,
name="BB",
scope="wmma.matrix_b",
data_alignment=32,
offset_factor=8,
strides=strides_W,
)
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
name="BC",
scope="wmma.accumulator",
data_alignment=32,
offset_factor=8,
strides=strides_Conv,
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def warp_idnex(offset, row, col):
row = row * col
return offset // row + offset % row // col
warp_index_A = warp_idnex(BA.elem_offset, wmma_m, wmma_k)
warp_index_B = warp_idnex(BB.elem_offset, wmma_k, wmma_n)
warp_index_C = warp_idnex(BC.elem_offset, wmma_m, wmma_n)
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_fill_fragment",
BC.data,
wmma_m,
wmma_n,
wmma_k,
warp_index_C,
0.0,
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
warp_index_C,
BA.data,
warp_index_A,
BB.data,
warp_index_B,
BC.data,
warp_index_C,
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/tensorcore_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Tensorcore alter op and legalize functions for cuda backend"""
import logging
import math
from tvm import relay, tir
from .. import nn
logger = logging.getLogger("topi")
@nn.batch_matmul_legalize.register("cuda")
def _batch_matmul_legalize(attrs, inputs, arg_types):
"""Legalizes batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
arg_types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
x_tensor, y_tensor = arg_types[0], arg_types[1]
dtype = x_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
x, y = inputs
B, M, K = x_tensor.shape
B, N, K = y_tensor.shape
if (
isinstance(B, tir.expr.Any)
or isinstance(M, tir.expr.Any)
or isinstance(K, tir.expr.Any)
or isinstance(N, tir.expr.Any)
):
# Dynamic shape do not support alter op layout now
return None
M = M.value
K = K.value
N = N.value
# Pad input and output channels to use tensorcore schedule.
if dtype in ["float16", "int8", "uint8"]:
# The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)
if (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
elif dtype in ["int4", "uint4"]:
if M % 8 == 0 and K % 32 == 0 and N % 8 == 0:
# no need to pad
return None
candidates = [(8, 32, 8)]
else:
return None
(dm, dk, dn), extra_flops = pad_to_tensorcore(M, K, N, candidates)
if extra_flops > 2:
logger.info("batch_matmul pad_to_tensorcore skipped, extra_flops %s", extra_flops)
return None
logger.info("batch_matmul pad_to_tensorcore, extra_flops %s", extra_flops)
x_ = relay.nn.pad(x, pad_width=((0, 0), (0, dm), (0, dk))) if dm or dk else x
y_ = relay.nn.pad(y, pad_width=((0, 0), (0, dn), (0, dk))) if dn or dk else y
out_ = relay.nn.batch_matmul(x_, y_, attrs.out_dtype)
out = (
relay.strided_slice(out_, begin=[0, 0, 0], end=[x.value for x in output_tensor.shape])
if dm or dn
else out_
)
return out
@nn.dense_legalize.register("cuda")
def _dense_legalize(attrs, inputs, arg_types):
"""Legalizes dense op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Collect the input tensors.
x_tensor, y_tensor = arg_types[0], arg_types[1]
dtype = x_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
x, y = inputs
M, K = x_tensor.shape
N, K = y_tensor.shape
try:
M = M.value
K = K.value
N = N.value
except AttributeError:
# todo: deal with unfixed shape when compiling wdl model
return None
# Pad input and output channels to use tensorcore schedule.
if dtype in ["float16", "int8", "uint8"]:
# The shape of (M, K, N) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32)
if (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
):
# no need to pad
return None
candidates = [(16, 16, 16), (32, 16, 8), (8, 16, 32)]
elif dtype in ["int4", "uint4"]:
if M % 8 == 0 and K % 32 == 0 and N % 8 == 0:
# no need to pad
return None
candidates = [(8, 32, 8)]
else:
return None
(dm, dk, dn), extra_flops_ratio = pad_to_tensorcore(M, K, N, candidates)
skip_pad = extra_flops_ratio > 2
if skip_pad and dtype in ["int8", "uint8"]:
skip_pad = False
# If tensorcore schedule padding fails, pad to nearest upward 4x4x4 as long as
# the additional flops ratio isn't double or more.
# Note that 4x4x4 is invalid for tensorcore scheduling, but padding upwards to 4x4x4
# doesn't hurt if tensorcore padding has already failed.
if M % 4 == 0 and K % 4 == 0 and N % 4 == 0:
# No need to pad
return None
(dm, dk, dn) = _pad_to(M, K, N, (4, 4, 4))
extra_flops_ratio = _extra_flops(M, K, N, dm, dk, dn) / (M * K * N)
skip_pad = extra_flops_ratio > 2
if skip_pad:
logger.info("dense pad_to_tensorcore skipped, extra_flops_ratio %s", extra_flops_ratio)
return None
logger.info("dense pad_to_tensorcore, extra_flops_ratio %s", extra_flops_ratio)
x_ = relay.nn.pad(x, pad_width=((0, dm), (0, dk))) if dm or dk else x
y_ = relay.nn.pad(y, pad_width=((0, dn), (0, dk))) if dn or dk else y
# If units is explicitly specified, it is used to compute the output shape.
# We need to update units after padding to prevent a type error.
if attrs["units"] is not None:
new_attrs["units"] = N + dn
out_ = relay.nn.dense(x_, y_, **new_attrs)
out = (
relay.strided_slice(out_, begin=[0, 0], end=[x.value for x in output_tensor.shape])
if dm or dn
else out_
)
return out
def pad_to_tensorcore(M, K, N, candidates):
"""pad shape to enable tensorcore"""
flops = M * K * N
extra_flops = math.inf
best_pad = (0, 0, 0)
for padding in candidates:
dm, dk, dn = _pad_to(M, K, N, padding)
e = _extra_flops(M, K, N, dm, dk, dn)
# print(dm, dk, dn, e, flops)
if e < extra_flops:
extra_flops = e
best_pad = (dm, dk, dn)
return best_pad, extra_flops / flops
def _extra_flops(M, K, N, dm, dk, dn):
return (M + dm) * (N + dn) * (K + dk) - M * N * K
def _pad_to(M, K, N, PADDING):
dm, dk, dn = 0, 0, 0
if M % PADDING[0] != 0:
M_ = ((M + PADDING[0]) // PADDING[0]) * PADDING[0]
dm = M_ - M
if K % PADDING[1] != 0:
K_ = ((K + PADDING[1]) // PADDING[1]) * PADDING[1]
dk = K_ - K
if N % PADDING[2] != 0:
N_ = ((N + PADDING[2]) // PADDING[2]) * PADDING[2]
dn = N_ - N
return dm, dk, dn
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CUDA implementations of transforms"""
import tvm
from ... import te
from ...target import Target
from ..utils import traverse_inline
def schedule_transpose(outs):
"""Schedule a unfused transpose"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
schedule_transpose_from_existing(s, outs[0])
return s
def schedule_transpose_from_existing(s, out):
"""Schedule for transpose on the gpu.
Roughly follows this:
https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/, but
without the padding for shared memory. For better performance, we could
rewrite it in tir to add the padding. Also, rewriting in tir would allow
use to use warp shuffles instead of shared memory (see
https://github.com/bryancatanzaro/trove).
"""
def _callback(op):
# pylint: disable=invalid-name
m, n = s[op].op.axis
warp_size = int(Target.current(allow_none=False).thread_warp_size)
no, ni = s[op].split(n, factor=warp_size)
mo, mi = s[op].split(m, factor=warp_size)
s[op].reorder(mo, no, mi, ni)
s[op].bind(mo, te.thread_axis("blockIdx.x"))
s[op].bind(no, te.thread_axis("blockIdx.y"))
c = s.cache_read(op.input_tensors[0], "shared", op)
s[c].compute_at(s[op], no)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[op].bind(ni, thread_x)
# This is a hack to make the scheduling language realize that this axis
# can be scheduled.
a, _ = s[c].split(s[c].op.axis[1], factor=1)
s[c].bind(a, thread_x)
# Use 4 warps per block. Slightly faster than 1 warp per block
ao, _ = s[op].split(mi, nparts=4)
s[op].bind(ao, thread_y)
ao, _ = s[c].split(s[c].op.axis[0], nparts=4)
s[c].bind(ao, thread_y)
traverse_inline(s, out.op, _callback)
def _invert_permutation_ir(data, out):
"""Low level IR to get invert_permutation.
Parameters
----------
data : Buffer
Input data. 1-D Buffer with shape [elem_num].
out : Buffer
1D buffer for invert permutation result with the same shape with data.
Returns
-------
stmt : Stmt
The result IR statement.
"""
elem_num = data.shape[0]
irb = tvm.tir.ir_builder.create()
data = irb.buffer_ptr(data)
out = irb.buffer_ptr(out)
max_threads = int(Target.current(allow_none=False).max_num_threads)
nthread_tx = max_threads
nthread_bx = elem_num // max_threads + 1
thread_x = te.thread_axis("threadIdx.x")
block_x = te.thread_axis("blockIdx.x")
irb.scope_attr(thread_x, "thread_extent", nthread_tx)
irb.scope_attr(block_x, "thread_extent", nthread_bx)
tid = block_x * max_threads + thread_x
with irb.if_scope(tid < elem_num):
r_ind = data[tid]
out[r_ind] = tid
return irb.get()
def invert_permutation(data):
"""Compute definition of invert_permutation.
For an output tensor y and an input tensor x, this operation computes the following:
y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
Parameters
----------
data : tvm.te.Tensor
1-D tensor
Returns
-------
out : tvm.te.Tensor
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out = te.extern(
[data.shape],
[data],
lambda ins, outs: _invert_permutation_ir(ins[0], outs[0]),
in_buffers=[
data_buf,
],
out_buffers=[
out_buf,
],
name="invert_permutation",
tag="invert_permutation_gpu",
)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
import tvm
from tvm import te, tir
from ...te import hybrid
from .scan import cumsum
from .sort import sort, argsort
from ..utils import ceil_div
def _get_max_threads(batch_size):
target = tvm.target.Target.current()
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
if "vulkan" in str(target) and not isinstance(batch_size, tvm.tir.IntImm):
# SPIR-V does not support dynamic thread group size
return max_threads
return tir.min(batch_size, max_threads)
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
output_ptr[tid] = 0
with ib.else_scope():
output_ptr[tid] = tir.Cast(output.dtype, binop(data_ptr[tid], data_ptr[tid - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
data_buf = tir.decl_buffer(data.shape, data.dtype, "sorted_data_buf", data_alignment=8)
output_buf = tir.decl_buffer(data.shape, out_dtype, "output_buf", data_alignment=8)
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_gpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
for i in bind("threadIdx.x", 1):
output[i] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_seq_indices_ptr[inc_scan_ptr[tid] - 1] = tid
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_unique):
unique_idx = tid if not index_converter_ptr else index_converter_ptr[tid]
with ib.if_scope(tid == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[tid]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[tid] - unique_seq_indices_ptr[tid - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
data_idx = argsorted_indices_ptr[tid]
unique_idx = (
inc_scan_ptr[tid]
if not index_converter_ptr
else index_converter_ptr[inc_scan_ptr[tid]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(tid == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
def _calc_first_occurence_ir(argsorted_indices, inc_scan, first_occurence):
"""Low level IR to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : Buffer
A buffer that stores the first occurence of each unique element in the input data.
"""
ib = tir.ir_builder.create()
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
first_occurence_ptr = ib.buffer_ptr(first_occurence)
batch_size = argsorted_indices.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
first_occurence_ptr[tid] = batch_size
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
return ib.get()
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in the
unique array. (Note that inverse_indices is very similar to indices if output is not
sorted)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, ?, ?, ?]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, ?, ?, ?]
indices = [2, 3, 4, 0, 1, ?, ?, ?]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, out_dtype="int32", binop=tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# buffers
data_buf = tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
argsorted_indices_buf = tir.decl_buffer(
data.shape, "int32", "argsorted_indices_buf", data_alignment=8
)
inc_scan_buf = tvm.tir.decl_buffer(data.shape, "int32", "inc_scan_buf", data_alignment=8)
unique_elements_buf = tir.decl_buffer(
data.shape, data.dtype, "unique_elements_buf", data_alignment=8
)
inverse_indices_buf = tvm.tir.decl_buffer(
data.shape, "int32", "inverse_indices_buf", data_alignment=8
)
# prepare outputs
if return_counts:
counts_buf = tir.decl_buffer(data.shape, "int32", "counts_buf", data_alignment=8)
out_data_shape = [data.shape] * 3
out_buffers = [unique_elements_buf, inverse_indices_buf, counts_buf]
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_buffers = [unique_elements_buf, inverse_indices_buf]
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
# calculate first occurence
first_occurence_buf = tir.decl_buffer(
data.shape, "int32", "first_occurence_buf", data_alignment=8
)
first_occurence = te.extern(
[data.shape],
[argsorted_indices, inc_scan],
lambda ins, outs: _calc_first_occurence_ir(ins[0], ins[1], outs[0]),
dtype=["int32"],
in_buffers=[argsorted_indices_buf, inc_scan_buf],
out_buffers=[first_occurence_buf],
name="_calc_first_occurence",
tag="_calc_first_occurence_gpu",
)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
index_converter_buf = tir.decl_buffer(
data.shape, "int32", "index_converter_buf", data_alignment=8
)
in_data = [data, argsorted_indices, inc_scan, index_converter]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf, index_converter_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
in_buffers=in_buffers,
out_buffers=out_buffers,
name="_calc_unique",
tag="_calc_unique_gpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/cuda/vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-member, import-outside-toplevel
"""Schedule for vision operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from .. import cpp
from .. import tag
from .pooling import schedule_pool
from .injective import schedule_injective_from_existing
def _default_schedule(outs):
"""Default schedule for gpu."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag) or op.tag in ["bbox_score", "sorted_bbox"]:
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for o in outs:
traverse(o.op)
return s
def schedule_reorg(outs):
"""Schedule for reorg operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of reorg
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for reorg.
"""
target = tvm.target.Target.current(allow_none=False)
cpp_target = cpp.TEST_create_target(target.kind.name)
return cpp.cuda.schedule_injective(cpp_target, outs)
def schedule_nms(outs):
"""Schedule for non-maximum suppression
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_multibox_prior(outs):
"""Schedule for multibox_prior operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_prior
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for multibox_prior.
"""
return _default_schedule(outs)
def schedule_multibox_transform_loc(outs):
"""Schedule for multibox_transform_loc
Parameters
----------
outs: Array of Tensor
The computation graph description of
multibox_transform_loc in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_multibox_detection(outs):
"""Schedule for multibox_detection operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_detection
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for multibox_detection.
"""
return _default_schedule(outs)
def schedule_roi_align(outs):
return schedule_pool(outs, "NCHW")
def schedule_roi_pool(outs):
return schedule_pool(outs, "NCHW")
def schedule_proposal(outs):
"""Schedule for proposal operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of proposal
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
def schedule_get_valid_counts(outs):
"""Schedule for get_valid_counts operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of get_valid_counts
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,redefined-outer-name
"""Einsum operator"""
from . import cpp
def einsum(subscripts, *operand):
"""Evaluates the Einstein summation convention on the operands.
Parameters
----------
subscripts : string
Specifies the subscripts for summation as comma separated list of subscript labels.
An implicit (classical Einstein summation) calculation is performed unless the
explicit indicator ‘->’ is included as well as subscript labels of the precise
output form.
a_tuple : tuple of tvm.te.Tensor
These are the Tensors for the operation.
The only difference of einsum between in tvm and numpy is it needs an extra brackets
for the tensors. For example, topi.einsum("ij, jk -> ik", (A, B)).
Returns
-------
out : tvm.te.Tensor
The calculation based on the Einstein summation convention.
"""
return cpp.einsum(subscripts, operand)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Generic declaration and schedules.
This is a recommended way of using TOPI API.
To use the generic schedule function, user must set
the current target scope using with block. See also :any:`tvm.target`
Example
-------
.. code-block:: python
# create schedule that dispatches to topi.cuda.schedule_injective
with tvm.target.Target("cuda"):
s = tvm.tir.generic.schedule_injective(outs)
"""
from __future__ import absolute_import as _abs
from .nn import *
from .injective import *
from .extern import *
from .vision import *
from .sort import *
from .search import *
from .image import *
from .math import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""Generic convolution schedules"""
from tvm import te
from tvm import autotvm
from tvm import relay
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..utils import get_const_tuple, traverse_inline
from ..nn.utils import get_pad_tuple
def fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements):
"""Fallback schedule for conv2d int8 on cpu.
Normally the inner most pattern takes two int8/uint8 tensors
data[num_int8_elements] and kernel[int32_lanes, num_int8_elements],
produces a dot product int32/uint32 output[int32_lanes].
Parameters
----------
int32_lanes : int
How many numbers of int32/uint32 will be produced using intrinsic.
This is related to output channel.
num_int8_elements : int
How many numbers of input int32/uint32 will be multiplied and reduced.
This is related to input channel.
"""
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
assert wkl.out_filter % int32_lanes == 0, "wkl.out_filter=%d, int32_lanes=%d" % (
wkl.out_filter,
int32_lanes,
)
assert wkl.in_filter % num_int8_elements == 0, "wkl.in_filter=%d, num_int8_elements=%d" % (
wkl.in_filter,
num_int8_elements,
)
oc_bn = int32_lanes if int32_lanes >= num_int8_elements else num_int8_elements
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def fallback_schedule_cpu_1x1_int8(cfg, wkl, int32_lanes, num_int8_elements):
"""Fallback schedule for 1x1 conv2d int8 on cpu.
Normally the inner most pattern takes two int8/uint8 tensors
data[num_int8_elements] and kernel[int32_lanes, num_int8_elements],
produces a dot product int32/uint32 output[int32_lanes].
Parameters
----------
int32_lanes : int
How many numbers of int32/uint32 will be produced using intrinsic.
This is related to output channel.
num_int8_elements : int
How many numbers of input int32/uint32 will be multiplied and reduced.
This is related to input channel.
"""
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
out_height = (wkl.height + pt + pb - wkl.kernel_h) // HSTR + 1
out_width = (wkl.width + pl + pr - wkl.kernel_w) // WSTR + 1
assert wkl.out_filter % int32_lanes == 0, "wkl.out_filter=%d, int32_lanes=%d" % (
wkl.out_filter,
int32_lanes,
)
assert wkl.in_filter % num_int8_elements == 0, "wkl.in_filter=%d, num_int8_elements=%d" % (
wkl.in_filter,
num_int8_elements,
)
oc_bn = int32_lanes if int32_lanes >= num_int8_elements else num_int8_elements
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
for ow_factor in range(out_width, 0, -1):
if out_width % ow_factor == 0:
for oh_factor in range(out_height, 0, -1):
if out_height % oh_factor == 0 and ow_factor * oh_factor < 32:
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_oh"] = OtherOptionEntity(oh_factor)
cfg["tile_ow"] = SplitEntity([out_width // ow_factor, ow_factor])
return
raise ValueError("cannot decide default schedule for workload: {}".format(wkl))
def schedule_conv_NCHWc_cpu_common_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=16,
int8_elems=4,
intrin=None,
inline_fused=True,
):
"""
Defines the schedule for INT8 for Intel and ARM machines
Uses the Intel/ARM intrinsics to use INT8 operations
More details - https://software.intel.com/en-us/articles/
lower-numerical-precision-deep-learning-inference-and-training
"""
if isinstance(cfg["tile_ow"], int):
reg_n = cfg["tile_ow"]
else:
reg_n = cfg["tile_ow"].size[-1]
if isinstance(cfg["unroll_kw"], (int, bool)):
unroll_kw = cfg["unroll_kw"]
else:
unroll_kw = cfg["unroll_kw"].val
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
_, _, _, _, oc_bn = get_const_tuple(conv_out.shape)
# schedule pad
if isinstance(s[data_vec].op, te.tensor.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# only in autotuning, input data of conv2d_NCHWc will be 4-D.
# skip this part during tuning to make records accurate.
# this part will be folded during Relay fold_constant pass.
if isinstance(data_vec.op, te.tensor.ComputeOp):
s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region")
if isinstance(kernel_vec.op, te.tensor.ComputeOp):
s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region")
elif isinstance(kernel_vec.op, te.tensor.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
# conv2d_nchwc_int8 has 7D kernel
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block, _ = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
oc_bn = cfg["tile_oc"].size[-1]
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
# schedule 5-D NCHW[x]c conv
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh)
s[C].vectorize(oc_block)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], parallel_axis)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
kh, kw, ic_outer, ic_f_inner, ic_s_inner = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
assert oc_bn % int32_lanes == 0, f"oc_bn={oc_bn} % int32_lanes={int32_lanes} != 0"
assert (
ic_bn % int8_elems == 0
), f"ic_bn={ic_bn} % int8_elems={int8_elems} != 0" # (u)int8 elements in (u)int32
oc_f_inner, oc_s_inner = s[CC].split(oc_block, factor=int32_lanes)
if unroll_kw:
s[CC].reorder(
oc_chunk,
oh,
ow_chunk,
ic_outer,
kh,
ic_f_inner,
kw,
ow_block,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
s[CC].unroll(kw)
else:
s[CC].reorder(
oc_chunk,
oh,
ow_chunk,
ic_outer,
kh,
kw,
ic_f_inner,
ow_block,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
if intrin is not None:
s[CC].tensorize(oc_s_inner, intrin)
s[CC].unroll(ow_block)
s[CC].unroll(oc_f_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
else:
raise ValueError("Unsupported output ndim: %s" % out_ndim)
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
if inline_fused:
s[C].compute_at(s[O], ow_block)
else:
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def schedule_conv_NCHWc_cpu_1x1_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=16,
int8_elems=4,
intrin=None,
inline_fused=False,
):
"""
Defines the 1x1 conv schedule for INT8 for Intel and ARM machines
Uses the Intel/ARM intrinsics to use INT8 operations
More details - https://software.intel.com/en-us/articles/
lower-numerical-precision-deep-learning-inference-and-training
"""
oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1]
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
_, _, _, _, oc_bn = get_const_tuple(conv_out.shape)
# schedule pad
if isinstance(s[data_vec].op, te.tensor.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
if autotvm.GLOBAL_SCOPE.in_tuning:
# only in autotuning, input data of conv2d_NCHWc will be 4-D.
# skip this part during tuning to make records accurate.
# this part will be folded during Relay fold_constant pass.
if isinstance(data_vec.op, te.tensor.ComputeOp):
s[data_vec].pragma(s[data_vec].op.axis[0], "debug_skip_region")
if isinstance(kernel_vec.op, te.tensor.ComputeOp):
s[kernel_vec].pragma(s[kernel_vec].op.axis[0], "debug_skip_region")
elif isinstance(kernel_vec.op, te.tensor.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
# Conv2d int8 schedule has 7D kernel
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block, _ = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
oc_bn = cfg["tile_oc"].size[-1]
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
oh_outer, oh_inner = s[C].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[C].split(ow, factor=ow_factor)
s[C].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
s[C].vectorize(oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh_outer)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], parallel_axis) # good perf on mobilenet, but not on individuals?
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
kh, kw, ic_outer, ic_f_inner, ic_s_inner = s[CC].op.reduce_axis
assert oc_bn % int32_lanes == 0
assert ic_bn % int8_elems == 0 # (u)int8 elements in (u)int32
oc_f_inner, oc_s_inner = s[CC].split(oc_block, factor=int32_lanes)
oh_outer, oh_inner = s[CC].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[CC].split(ow, factor=ow_factor)
s[CC].reorder(
oc_chunk,
oh_outer,
ow_outer,
kh,
kw,
ic_outer,
ic_f_inner,
oh_inner,
ow_inner,
oc_f_inner,
oc_s_inner,
ic_s_inner,
)
s[CC].fuse(oc_chunk, oh_outer)
if intrin is not None:
s[CC].tensorize(oc_s_inner, intrin)
s[CC].unroll(ow_inner)
s[CC].unroll(oh_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
else:
raise ValueError("Unsupported output ndim: %s" % out_ndim)
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
if inline_fused:
s[C].compute_at(s[O], ow_inner)
else:
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def schedule_depthwise_conv2d_nhwc(outs):
"""Create schedule for depthwise conv2d in NHWC layout.
Parameters
----------
outs : list[te.tensor.Tensor]
The output tensors.
Returns
-------
s : tvm.te.schedule.Schedule
The computation schedule for depthwise conv2d.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "depthwise_conv2d_nhwc" in op.tag:
out = outs[0]
depthwise_conv2d_out = op.output(0)
data_pad = depthwise_conv2d_out.op.input_tensors[0]
s[data_pad].compute_inline()
if depthwise_conv2d_out != out:
s[depthwise_conv2d_out].compute_at(s[out], s[out].op.axis[3])
s[out].fuse(*s[out].op.axis)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_alter_int8_common(
data,
data_tensor,
kernel,
kernel_tensor,
output_tensor,
attrs,
data_dtype: str,
in_channel_vector_length: int,
out_channel_vector_length: int,
):
"""
Convert TE inputs/outputs so that they are suitable for fast Int8 instructions.
Int8 instructions require input channels and output channels to be a
multiple of the vector length. For input channels, we pad both the inputs
and weights channels. For output channels, we pad the weight and
stride_slice the output.
Arguments
---------
data: Expr
Data Expr
data_tensor: Tensor
Data tensor
kernel: Expr
Kernel Expr
kernel_tensor: Tensor
Kernel tensor
output_tensor: Tensor
Output tensor
attrs: Conv2dAttrs
Attributes of the computation
data_dtype: "int8" or "uint8"
Desired dtype of data. Data will be converted to this dtype before the main computation.
in_channel_vector_length: int
Length of vector units on target hardware. Input channels are padded to this length.
out_channel_vector_length: int
Output size of vector instruction. Output channels are padded to this length.
Returns
-------
out : Tensor
Conv2d computation with inputs in the correct order for tensorization.
"""
# Dilation not supported yet. Return None if dilation is not (1, 1)
dilation = attrs.get_int_tuple("dilation")
if not (dilation[0] == 1 and dilation[1] == 1):
return None
# No legalization for depthwise convolutions yet.
groups = attrs.get_int("groups")
if groups != 1:
return None
# Get the conv attrs
new_attrs = {k: attrs[k] for k in attrs.keys()}
padding = attrs.get_int_tuple("padding")
kh, kw = attrs.get_int_tuple("kernel_size")
pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))
if data_tensor.dtype != data_dtype:
# How to convert data to uint8
# Original --> C = A (conv) B
# A and B are int8
# C = (A + 128 - 128) (conv) B
# C = (A' conv B) - 128 (conv) B
# where A' = A + 128
# and 128 (conv) B is basically a reduce on CRS axis for weights.
#
# How to convert data to int8
# C = (A - 128 + 128) (conv) B
# C = (A' conv B) + 128 (conv) B
# where A' = A - 128
if data_dtype == "uint8":
# shift data to uint8
before_shift = relay.add
after_shift = relay.subtract
pad_value = 128
else:
# shift data to int8
before_shift = relay.subtract
after_shift = relay.add
pad_value = -128
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(0, 1, 2))
pad_width = ((0, 0), (pt, pb), (pl, pr), (0, 0))
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, 0), (pt, pb), (pl, pr))
adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"), axis=(1, 2, 3))
adjust_shift = relay.expand_dims(adjust_shift, axis=1, num_newaxis=2)
else:
return None
data = relay.cast(data, "int32")
data = before_shift(data, relay.const(128, "int32"))
data = relay.cast(data, data_dtype)
# Do external padding as pad value has to be 128.
if any(padding):
data = relay.nn.pad(data, pad_width=pad_width, pad_value=pad_value)
new_attrs["padding"] = (0, 0)
# Multiply 128 to adjust shift.
adjust_shift = relay.multiply(adjust_shift, relay.const(128, "int32"))
# Flags to remember if the expr is modified
ic_modified = False
oc_modified = False
# Find the value of input and output channel.
in_channel = -1
out_channel = -1
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
in_channel = data_tensor.shape[3].value
out_channel = kernel_tensor.shape[3].value
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
in_channel = data_tensor.shape[1].value
out_channel = kernel_tensor.shape[0].value
else:
return None
if in_channel % in_channel_vector_length != 0:
new_in_channel = (
(in_channel + in_channel_vector_length) // in_channel_vector_length
) * in_channel_vector_length
diff = new_in_channel - in_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
data = relay.nn.pad(data, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, diff), (0, 0)))
ic_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
data = relay.nn.pad(data, pad_width=pad_width)
kernel = relay.nn.pad(kernel, pad_width=pad_width)
ic_modified = True
else:
return None
new_out_channel = out_channel
if out_channel % out_channel_vector_length != 0:
new_out_channel = (
(out_channel + out_channel_vector_length) // out_channel_vector_length
) * out_channel_vector_length
diff = new_out_channel - out_channel
if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
kernel = relay.nn.pad(kernel, pad_width=((0, 0), (0, 0), (0, 0), (0, diff)))
oc_modified = True
elif attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
kernel = relay.nn.pad(kernel, pad_width=((0, diff), (0, 0), (0, 0), (0, 0)))
oc_modified = True
else:
return None
if oc_modified:
new_attrs["channels"] = new_out_channel
out = relay.nn.conv2d(data, kernel, **new_attrs)
original_out_shape = [x.value for x in output_tensor.shape]
out = relay.strided_slice(out, begin=[0, 0, 0, 0], end=original_out_shape)
else:
out = relay.nn.conv2d(data, kernel, **new_attrs)
if data_tensor.dtype != data_dtype:
out = after_shift(out, adjust_shift)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/default.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""The default schedule used by various operators"""
import tvm
from tvm import te
def default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.Target.current(allow_none=False)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
if target.kind.name not in ("llvm", "c"):
raise RuntimeError("schedule not registered for '%s'" % target)
s = te.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
te.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/extern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""generic declaration and schedules."""
import tvm
from .. import cpp
def schedule_extern(outs):
"""Schedule for an extern op followed by injective operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of extern plus injective ops in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current()
return cpp.generic.schedule_extern(target, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic image operators"""
from .default import default_schedule as _default_schedule
def schedule_dilation2d_nchw(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nhwc(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""generic declaration and schedules."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
sch[out].fuse(*sch[out].op.axis)
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current(allow_none=False)
if target.kind.name != "llvm":
raise RuntimeError("schedule_injective not registered for '%s'" % target)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
x = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
schedule_injective_from_existing(s, x)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic math operators"""
from .default import default_schedule as _default_schedule
def schedule_einsum(outs):
"""Schedule for einsum operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of einsum.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
from tvm import te
from .default import default_schedule as _default_schedule
def schedule_conv1d_ncw(outs):
"""Schedule for conv1d_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv1d_ncw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv1d_nwc(outs):
"""Schedule for conv1d_nwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv1d_nwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv1d_ncw(outs):
"""Schedule for group_conv1d_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv1d_ncw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv1d_nwc(outs):
"""Schedule for group_conv1d_nwc
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv1d_nwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_hwcn(outs):
"""Schedule for conv2d_hwcn
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nhwc_pack(outs):
"""Schedule for conv2d_nhwc_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nhwc_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_NCHWc_int8(outs):
"""Schedule for conv2d_NCHW[x]c_int8
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc_int8
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_winograd_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
# so we make a schedule here for cpu llvm
s = te.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
eps, nu, co, ci = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, r_kh, r_kw, eps, nu)
for axis in [r_kh, r_kw, eps, nu]:
s[output].unroll(axis)
s[output].parallel(co)
return s
def schedule_conv2d_gemm_weight_transform(outs):
"""Schedule for weight transformation of gemm
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
s = te.create_schedule([x.op for x in outs])
return s
def schedule_conv3d_winograd_weight_transform(outs):
"""Schedule for weight transformation of 3D winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
# so we make a schedule here for cpu llvm
s = te.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
transform_depth = len(s[output].op.reduce_axis) == 3
if transform_depth:
omg, eps, nu, ci, co = s[output].op.axis
r_kd, r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, omg, eps, nu, r_kd, r_kh, r_kw)
for axis in [r_kd, r_kh, r_kw]:
s[output].unroll(axis)
else:
eps, nu, d, ci, co = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, d, eps, nu, r_kh, r_kw)
for axis in [r_kh, r_kw]:
s[output].unroll(axis)
s[output].parallel(co)
return s
def schedule_conv2d_winograd_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_winograd_nnpack_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in PreCompute pass
s = te.create_schedule([x.op for x in outs])
return s
def schedule_conv3d_ncdhw(outs):
"""Schedule for conv3d_ncdhw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv3d_ndhwc(outs):
"""Schedule for conv3d_ndhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d_ndhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv3d_transpose_ncdhw(outs):
"""Schedule for conv3d_transpose_ncdhw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d_transpose_ncdhw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_conv1d_transpose_ncw(outs):
"""Schedule for conv1d_transpose_ncw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_ncw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_depthwise_conv2d_NCHWc(outs):
"""Schedule for depthwise_conv2d_NCHWc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_nchw(outs):
"""Schedule for group_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_transpose_nchw(outs):
"""Schedule for group_conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_group_conv2d_nhwc(outs):
"""Schedule for group_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_deformable_conv2d_nchw(outs):
"""Schedule for deformable_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_deformable_conv2d_nhwc(outs):
"""Schedule for deformable_conv2d_nhwc.
We only use the default schedule here and rely on auto_scheduler.
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_conv2d_nchw(outs):
"""Schedule for bitserial_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_conv2d_nhwc(outs):
"""Schedule for bitserial_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitserial_dense(outs):
"""Schedule for bitserial_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, True)
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_fast_softmax(outs):
"""Schedule for fast_softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of fast_softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_matmul(outs):
"""Schedule for matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of matmul
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_pool_grad(outs):
"""Schedule for pool_grad
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
"""
return _default_schedule(outs, False)
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_bitpack(outs):
"""Schedule for bitpack
Parameters
----------
outs: Array of Tensor
The computation graph description of bitpack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_binary_dense(outs):
"""Schedule for binary_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_lrn(outs):
"""Schedule for lrn
Parameters
----------
outs: Array of Tensor
The computation graph description of lrn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_dense(outs):
"""Schedule for sparse_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_transpose(outs):
"""Schedule for sparse_transpose
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_conv2d(outs):
"""Schedule for sparse_conv2d
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_conv2d
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_batch_matmul(outs):
"""Schedule for batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_batch_norm(outs):
"""Schedule for batch_norm
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_correlation_nchw(outs):
"""Schedule for correlation_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of correlation_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_lstm(outs):
"""Schedule for LSTM
Parameters
----------
outs : Array of Tensor
The outputs of LSTM (hidden states and cell states).
Returns
-------
sch: Schedule
The default schedule for LSTM.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/search.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic search operators"""
from __future__ import absolute_import as _abs
from .default import default_schedule as _default_schedule
def schedule_argwhere(outs):
"""Schedule for argwhere operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of argwhere.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_scatter(outs):
"""Schedule for scatter operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of scatter.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_scatter_add(outs):
"""Schedule for scatter_add operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of scatter_add.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_sparse_fill_empty_rows(outs):
return _default_schedule(outs, False)
def schedule_unique(outs):
"""Schedule for unique operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of unique.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic sort operators"""
from __future__ import absolute_import as _abs
from .default import default_schedule as _default_schedule
def schedule_sort(outs):
"""Schedule for sort operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_argsort(outs):
"""Schedule for argsort operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_topk(outs):
"""Schedule for topk operator.
Parameters
----------
outs: Array of Tensor
The indices that would sort an input array along
the given axis.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic/vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member
"""Generic vision operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import cpp
from .default import default_schedule as _default_schedule
def schedule_reorg(outs):
"""Schedule for reorg
Parameters
----------
outs: Array of Tensor
The computation graph description of reorg
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
target = tvm.target.Target.current(allow_none=False)
cpp_target = cpp.TEST_create_target(target.kind.name)
return cpp.generic.default_schedule(cpp_target, outs, False)
def schedule_get_valid_counts(outs):
"""Schedule for get_valid_counts
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_nms(outs):
"""Schedule for non-maximum suppression
Parameters
----------
outs: Array of Tensor
The computation graph description of nms
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_prior(outs):
"""Schedule for multibox_prior
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_prior
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_transform_loc(outs):
"""Schedule for multibox_transform_loc
Parameters
----------
outs: Array of Tensor
The computation graph description of
multibox_transform_loc in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_multibox_detection(outs):
"""Schedule for multibox_detection
Parameters
----------
outs: Array of Tensor
The computation graph description of multibox_detection
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_roi_align(outs):
"""Schedule for roi_align
Parameters
----------
outs: Array of Tensor
The computation graph description of roi_align
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_roi_pool(outs):
"""Schedule for roi_align
Parameters
----------
outs: Array of Tensor
The computation graph description of roi_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_proposal(outs):
"""Schedule for proposal operator.
Parameters
----------
outs: Array of Tensor
The computation graph description of proposal
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/generic_op_impl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implementation of generic operators in the presence of Tensor"""
# pylint: disable=invalid-name, too-many-arguments
import tvm
from tvm import te
from . import broadcast as _broadcast
from . import math as _math
def _make_bop(broadcast_bop, orig_bop):
"""Make a specific overloaded binary operator of Tensor when applicable;
apply the original operator if it is not supposed to be overloaded.
Consider the following scenario:
OP : + | - | * | /
R0 : int | float | Expr | TensorSlice | Tensor (rank zero)
R1 : Tensor (positive rank)
In terms of (LHS OP RHS), we apply the following overloading rules:
(1) We use broadcast_OP(LHS, RHS), when both LHS and RHS are R1.
(2) We perform element-wise operation of Tensor and scalar,
when one of LHS and RHS is R1 and another is R0.
(3) We do not overload OP (i.e. stick to orig_bop) otherwise.
Parameters
----------
broadcast_bop : operator function
Operator for broadcast tensor-tensor operation, for rule (1).
orig_bop: operator function
Operator before overloading, for rule (3).
Returns
-------
ret : operator function
The overloaded operator function if applicable or orig_bop otherwise.
"""
name = orig_bop.__name__
def _tensor_bop_impl(lhs, rhs):
"""Overloaded {op} operator.
If both operands are non-zero-rank Tensors, it performs
tensor-tensor {op} operation, and broadcasts inputs when necessary.
If one operand is non-zero-rank Tensor, while the other operand is
scalar like type (e.g., numeric types, Expr, or TensorSlice),
it performs tensor-scalar {op} operation on an element-wise basis.
Otherwise, it performs default generic.{op} operation, as defined
in tvm.generic module.
Parameters
----------
lhs : object
Left operand.
rhs : object
Right operand.
Returns
-------
ret : tvm.te.Tensor (if at least one operand is non-zero-rank Tensor)
tvm.Expr (otherwise)
The result of {op} operation.
"""
if not isinstance(lhs, te.tensor.Tensor) and not isinstance(rhs, te.tensor.Tensor):
return orig_bop(lhs, rhs)
return broadcast_bop(lhs, rhs)
_tensor_bop_impl.__doc__ = _tensor_bop_impl.__doc__.format(op=name)
return _tensor_bop_impl
def _bind_generic_ops():
"""Bind generic operators for Tensor."""
# Check __op_priority__ to make sure the binding happens only once.
__op_priority__ = 1
if __op_priority__ > tvm.tir.generic.__op_priority__:
tvm.tir.generic.__op_priority__ = __op_priority__
tvm.tir.generic.add = _make_bop(_broadcast.add, tvm.tir.generic.add)
tvm.tir.generic.subtract = _make_bop(_broadcast.subtract, tvm.tir.generic.subtract)
tvm.tir.generic.multiply = _make_bop(_broadcast.multiply, tvm.tir.generic.multiply)
tvm.tir.generic.divide = _make_bop(_broadcast.divide, tvm.tir.generic.divide)
tvm.tir.generic.cast = _math.cast
_bind_generic_ops()
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/gpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""GPU specific declaration and schedules."""
from .dense import *
from .conv2d import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/gpu/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for conv2d operator"""
from tvm import te, autotvm
from .. import nn
from ..utils import traverse_inline
from .conv2d_nhwc import schedule_conv2d_nhwc_direct
@autotvm.register_topi_compute("conv2d_nhwc.gpu")
def conv2d_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype="float32"):
"""Compute conv2d with NHWC layout"""
return nn.conv2d_nhwc(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc.gpu")
def schedule_conv2d_nhwc(cfg, outs):
"""Create the schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv2d_nhwc":
schedule_conv2d_nhwc_direct(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.