file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/topi/scatter_add.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Scatter Add operator"""
from tvm.te import hybrid
@hybrid.script
def _scatter_add_1d(data, indices, updates):
out = output_tensor(data.shape, data.dtype)
for i in range(data.shape[0]):
out[i] = data[i]
for i in range(indices.shape[0]):
out[indices[i] if indices[i] >= 0 else indices[i] + data.shape[0]] += updates[i]
return out
@hybrid.script
def _scatter_add_2d(data, indices, updates, axis):
out = output_tensor(data.shape, data.dtype)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
out[i, j] = data[i, j]
if axis == 0:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
out[
indices[i, j] if indices[i, j] >= 0 else indices[i, j] + data.shape[axis], j
] += updates[i, j]
else:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
out[
i, indices[i, j] if indices[i, j] >= 0 else indices[i, j] + data.shape[axis]
] += updates[i, j]
return out
@hybrid.script
def _scatter_add_3d(data, indices, updates, axis):
out = output_tensor(data.shape, data.dtype)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
for k in range(data.shape[2]):
out[i, j, k] = data[i, j, k]
if axis == 0:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
out[
indices[i, j, k]
if indices[i, j, k] >= 0
else indices[i, j, k] + data.shape[axis],
j,
k,
] += updates[i, j, k]
elif axis == 1:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
out[
i,
indices[i, j, k]
if indices[i, j, k] >= 0
else indices[i, j, k] + data.shape[axis],
k,
] += updates[i, j, k]
else:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
out[
i,
j,
indices[i, j, k]
if indices[i, j, k] >= 0
else indices[i, j, k] + data.shape[axis],
] += updates[i, j, k]
return out
@hybrid.script
def _scatter_add_4d(data, indices, updates, axis):
out = output_tensor(data.shape, data.dtype)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
for k in range(data.shape[2]):
for l in range(data.shape[3]):
out[i, j, k, l] = data[i, j, k, l]
if axis == 0:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
for l in range(indices.shape[3]):
out[
indices[i, j, k, l]
if indices[i, j, k, l] >= 0
else indices[i, j, k, l] + data.shape[axis],
j,
k,
l,
] += updates[i, j, k, l]
elif axis == 1:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
for l in range(indices.shape[3]):
out[
i,
indices[i, j, k, l]
if indices[i, j, k, l] >= 0
else indices[i, j, k, l] + data.shape[axis],
k,
l,
] += updates[i, j, k, l]
elif axis == 2:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
for l in range(indices.shape[3]):
out[
i,
j,
indices[i, j, k, l]
if indices[i, j, k, l] >= 0
else indices[i, j, k, l] + data.shape[axis],
l,
] += updates[i, j, k, l]
else:
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
for k in range(indices.shape[2]):
for l in range(indices.shape[3]):
out[
i,
j,
k,
indices[i, j, k, l]
if indices[i, j, k, l] >= 0
else indices[i, j, k, l] + data.shape[axis],
] += updates[i, j, k, l]
return out
def scatter_add(data, indices, updates, axis=0):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter_add on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
if len(data.shape) == 1:
return _scatter_add_1d(data, indices, updates)
if len(data.shape) == 2:
return _scatter_add_2d(data, indices, updates, axis)
if len(data.shape) == 3:
return _scatter_add_3d(data, indices, updates, axis)
if len(data.shape) == 4:
return _scatter_add_4d(data, indices, updates, axis)
raise ValueError("scatter_add only support for 1-4 dimensions")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""searchsorted operator"""
from . import utils
from . import te
from ..tir import ir_builder
from .math import cast
def binary_search(ib, sequence_offset, search_range, sorted_sequence, value, right, out_dtype):
"""Common IR generator for binary search used by CPU and GPU backends.
`sorted_sequence` is a N-D Buffer whose innermost dimension we want to search for `value`,
and `search_range` is the size of the innermost dimension. `sequence_offset` is
a 1-D linearlized offset specifying which of innermost sequences to search.
So the search for `value` is performed over
`sorted_sequence[sequence_offset:(sequence_offset + search_range)]`.
Note that we index N-D Buffer by 1-D linearlized indices.
"""
lo = ib.allocate(out_dtype, (1,), name="lo", scope="local")
hi = ib.allocate(out_dtype, (1,), name="hi", scope="local")
lo[0] = cast(0, out_dtype)
hi[0] = cast(search_range, out_dtype)
# Reference: pytorch/aten/src/ATen/native/cuda/Bucketization.cu
def condition(current_val, target_val):
if right:
return current_val <= target_val
return current_val < target_val
with ib.while_loop(lo[0] < hi[0]):
mid = lo[0] + (hi[0] - lo[0] >> 1)
with ib.if_scope(condition(sorted_sequence[sequence_offset + mid], value)):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
return lo[0]
def searchsorted(sorted_sequence, values, right=False, out_dtype="int64"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : te.Tensor
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : te.Tensor
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : te.Tensor
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
def ir(sorted_sequence, values, indices):
ib = ir_builder.create()
sorted_sequence_shape = sorted_sequence.shape
values_shape = values.shape
num_search = utils.prod(values_shape)
search_range = sorted_sequence_shape[-1]
sorted_sequence = ib.buffer_ptr(sorted_sequence)
values = ib.buffer_ptr(values)
indices = ib.buffer_ptr(indices)
with ib.for_range(0, num_search, name="i", kind="parallel") as i:
if len(sorted_sequence_shape) == 1:
sequence_offset = 0
else:
sequence_id = i // values_shape[-1]
sequence_offset = sequence_id * search_range
indices[i] = binary_search(
ib,
sequence_offset,
search_range,
sorted_sequence,
values[i],
right,
out_dtype,
)
return ib.get()
return te.extern(
values.shape,
[sorted_sequence, values],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="searchsorted",
dtype=out_dtype,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments
"""Argsort operator"""
import tvm
from tvm import te
from .utils import get_const_tuple
def sort(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns an array
in sorted order.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
axis : int, optional
Axis along which to sort the input tensor.
By default the flattened array is used.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
Returns
-------
out : tvm.te.Tensor
Sorted index tensor.
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.sort", ins[0], outs[0], axis, is_ascend
),
dtype=data.dtype,
in_buffers=[data_buf],
out_buffers=out_buf,
name="sort_cpu",
tag="sort_cpu",
)
return out
def argsort(data, valid_count=None, axis=-1, is_ascend=1, dtype="float32"):
"""Performs sorting along the given axis and returns an array
of indices having the same shape as an input array that index
data in sorted order.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
valid_count : tvm.te.Tensor, optional
1-D tensor for valid number of boxes.
axis : int, optional
Axis along which to sort the input tensor.
By default the flattened array is used.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
DType of the output indices.
Returns
-------
out : tvm.te.Tensor
Sorted index tensor.
Example
--------
.. code-block:: python
# An example to use argsort
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
axis = 0
is_ascend = False
out = argsort(data, axis=axis, is_ascend=is_ascend)
np_data = np.random.uniform(dshape)
s = topi.generic.schedule_argsort(out)
f = tvm.build(s, [data, out], "llvm")
dev = tvm.cpu()
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_out)
"""
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
if valid_count is not None:
valid_count_buf = tvm.tir.decl_buffer(
valid_count.shape, valid_count.dtype, "valid_count_buf", data_alignment=4
)
out_buf = tvm.tir.decl_buffer(data.shape, "int32", "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data, valid_count],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.argsort_nms", ins[0], ins[1], outs[0], axis, is_ascend
),
dtype="int32",
in_buffers=[data_buf, valid_count_buf],
out_buffers=out_buf,
name="argsort_nms_cpu",
tag="argsort_nms_cpu",
)
else:
out_buf = tvm.tir.decl_buffer(data.shape, dtype, "out_buf", data_alignment=8)
out = te.extern(
data.shape,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.argsort", ins[0], outs[0], axis, is_ascend
),
dtype=dtype,
in_buffers=[data_buf],
out_buffers=out_buf,
name="argsort_cpu",
tag="argsort_cpu",
)
return out
def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"):
"""Get the top k elements in an input tensor along the given axis.
Parameters
----------
data : tvm.te.Tensor
The input tensor.
k : int or tvm.te.Tensor, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : tvm.te.Tensor or List[tvm.te.Tensor]
The computed result.
"""
assert ret_type in ["both", "values", "indices"]
data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
out_shape = list(get_const_tuple(data.shape))
kvar = tvm.te.size_var("k")
if not isinstance(k, int):
out_shape[axis] = kvar
elif k >= 1:
out_shape[axis] = k
out_bufs = []
if ret_type in ["both", "values"]:
out_bufs.append(tvm.tir.decl_buffer(out_shape, data.dtype, "value_buf", data_alignment=8))
if ret_type in ["both", "indices"]:
out_bufs.append(tvm.tir.decl_buffer(out_shape, dtype, "indices_buf", data_alignment=8))
out_shapes = [out_shape] * len(out_bufs)
kv = kvar if not isinstance(k, int) else k
out = te.extern(
out_shapes,
[data],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.sort.topk", ins[0], *outs, kv, axis, ret_type, is_ascend
),
in_buffers=[data_buf],
out_buffers=out_bufs,
name="topk_cpu",
tag="topk_cpu",
)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Sparse operators"""
from __future__ import absolute_import as _abs
from .csrmv import csrmv
from .csrmm import csrmm
from .dense import dense
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse/csrmm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute SpMM in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ..utils import simplify
from ...tir.generic import cast
def csrmm_default(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The default implementation of csrmm in topi.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [nonzeros]
indices : tvm.te.Tensor
1-D with shape [nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, n]
bias : tvm.te.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(data.shape) == 1
and len(indices.shape) == 1
and len(indptr.shape) == 1
and len(weight.shape) == 2
), "only support 2-dim csrmm"
assert isinstance(
weight, te.tensor.Tensor
), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight))
assert (
data.dtype == weight.dtype
), "Data and weight must have the same dtype, but they have %s and %s" % (
data.dtype,
weight.dtype,
)
if bias is not None:
assert len(bias.shape) == 1
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
def csrmm_default_ir(data, indices, indptr, weight, out):
"""define ir for csrmm"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0] - 1)
_, N = weight.shape
with irb.for_range(0, N, kind="vectorize", name="n") as n:
with irb.for_range(0, M, kind="parallel", name="row") as row:
dot = irb.allocate(data.dtype, (1,), name="dot", scope="local")
out_ptr[row * N + n] = cast(0, data.dtype)
dot[0] = cast(0, data.dtype)
row_start = indptr_ptr[row]
row_end = indptr_ptr[row + 1]
row_elems = row_end - row_start
with irb.for_range(0, row_elems, name="idx") as idx:
elem = row_start + idx
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] * N + n]
out_ptr[row * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: csrmm_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="csrmm",
dtype=data.dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[i], tag=tag.BROADCAST)
return matmul
def csrmm(a, b, c=None):
"""The `csrmm` routine performs a matrix-matrix operation defined as :math:`C := A*B + C`,
where `B` and `C` are dense matrices, `A` is an m-by-k sparse matrix in the CSR format.
Parameters
----------
a : tvm.contrib.sparse.CSRNDArray
2-D sparse matrix with shape [m, k]
b : tvm.te.Tensor
2-D dense matrix with shape [k, n]
c : tvm.te.Tensor, optional
1-D dense vector with shape [n]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
return csrmm_default(a.data, a.indices, a.indptr, b, c)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse/csrmv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute SpMV in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ...tir.generic import cast
def csrmv_default(data, indices, indptr, weight, bias=None):
"""The default implementation of csrmv in topi.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [nonzeros]
indices : tvm.te.Tensor
1-D with shape [nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, 1]
bias : tvm.te.Tensor, optional
1-D with shape [1]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, 1]
"""
assert len(data.shape) == 1 and len(weight.shape) == 2, "only support 2-dim csrmv"
assert isinstance(
weight, te.tensor.Tensor
), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight))
assert (
data.dtype == weight.dtype
), "Data and weight must have the same dtype, but they have %s and %s" % (
data.dtype,
weight.dtype,
)
if bias is not None:
assert len(bias.shape) == 1
batch = indptr.shape[0] - 1
def csrmv_default_ir(data, indices, indptr, weight, out):
"""define ir for csrmv"""
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
num_rows = indptr.shape[0] - 1
with irb.for_range(0, num_rows, kind="parallel", name="row") as row:
dot = irb.allocate(data.dtype, (1,), name="dot", scope="local")
out_ptr[row] = cast(0, data.dtype)
dot[0] = cast(0, data.dtype)
row_start = indptr_ptr[row]
row_end = indptr_ptr[row + 1]
row_elems = row_end - row_start
with irb.for_range(0, row_elems, name="elemidx") as elemidx:
elem = row_start + elemidx
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem]]
out_ptr[row] += dot[0]
return irb.get()
oshape = (batch, 1)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: csrmv_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="csrmv",
dtype=data.dtype,
name="csrmv",
)
if bias is not None:
matmul = te.compute((batch, 1), lambda i, j: matmul[i, 0] + bias[i], tag=tag.BROADCAST)
return matmul
def csrmv(a, x, y=None):
"""The `csrmv` routine performs a matrix-vector operation defined as :math:`y := A*x + y`,
where `x` and `y` are vectors, `A` is an m-by-k sparse matrix in the CSR format.
Parameters
----------
a : tvm.contrib.sparse.CSRNDArray
2-D sparse matrix with shape [m, k]
x : tvm.te.Tensor
2-D dense matrix with shape [k, 1]
y : tvm.te.Tensor, optional
1-D dense vector with shape [1]
Returns
-------
output : tvm.te.Tensor
2-D dense matrix with shape [m, 1]
"""
return csrmv_default(a.data, a.indices, a.indptr, x, y)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM operator compute Dense in CSR format."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
from ..utils import simplify
def dense_si(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse input.
Parameters
----------
data : tvm.te.Tensor
1-D with shape [num_nonzeros]
indices : tvm.te.Tensor
1-D with shape [num_nonzeros]
indptr : tvm.te.Tensor
1-D with shape [m+1]
weight : tvm.te.Tensor
2-D with shape [k, n]
bias : tvm.te.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(data.shape) == 1
and len(indices.shape) == 1
and len(indptr.shape) == 1
and len(weight.shape) == 2
), "only support 2-dim dense"
assert isinstance(
weight, te.tensor.Tensor
), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight))
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M = simplify(indptr.shape[0] - 1)
N, _ = weight.shape
def dense_default_ir(data, indices, indptr, weight, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0] - 1)
N, K = weight.shape
with irb.for_range(0, N, kind="vectorize", name="n") as n:
with irb.for_range(0, M, kind="parallel", name="m") as m:
dot = irb.allocate(dtype, (1,), name="dot", scope="local")
out_ptr[m * N + n] = tvm.tir.const(0, dtype)
dot[0] = tvm.tir.const(0, dtype)
row_start = indptr_ptr[m]
row_elems = indptr_ptr[m + 1] - row_start
with irb.for_range(0, row_elems, name="k") as k:
elem = row_start + k
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] + n * K]
out_ptr[m * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, indices, indptr, weight],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense",
dtype=dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST)
return matmul
def dense_sw(data, w_data, w_indices, w_indptr, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse weight.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [m, k]
w_data : tvm.te.Tensor
1-D with shape [nonzeros]
w_indices : tvm.te.Tensor
1-D with shape [nonzeros]
w_indptr : tvm.te.Tensor
1-D with shape [n+1]
bias : tvm.te.Tensor, optional
1-D with shape [n]
Returns
-------
output : tvm.te.Tensor
2-D with shape [m, n]
"""
assert (
len(w_data.shape) == 1
and len(w_indices.shape) == 1
and len(w_indptr.shape) == 1
and len(data.shape) == 2
), "only support 2-dim dense"
assert isinstance(
data, te.tensor.Tensor
), "data matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(data))
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M, _ = data.shape
N = simplify(w_indptr.shape[0] - 1)
def dense_default_ir(data, w_data, w_indices, w_indptr, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.tir.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
w_data_ptr = irb.buffer_ptr(w_data)
w_indices_ptr = irb.buffer_ptr(w_indices)
w_indptr_ptr = irb.buffer_ptr(w_indptr)
out_ptr = irb.buffer_ptr(out)
M, K = data.shape
N = simplify(w_indptr.shape[0] - 1)
with irb.for_range(0, M, kind="vectorize", name="m") as m:
with irb.for_range(0, N, kind="parallel", name="n") as n:
dot = irb.allocate(dtype, (1,), name="dot", scope="local")
out_ptr[m * N + n] = tvm.tir.const(0, dtype)
dot[0] = tvm.tir.const(0, dtype)
row_start = w_indptr_ptr[n]
row_elems = w_indptr_ptr[n + 1] - row_start
with irb.for_range(0, row_elems, name="k") as k:
elem = row_start + k
dot[0] += w_data_ptr[elem] * data_ptr[w_indices_ptr[elem] + m * K]
out_ptr[m * N + n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = te.extern(
oshape,
[data, w_data, w_indices, w_indptr],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense",
dtype=dtype,
name="out",
)
if bias is not None:
matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST)
return matmul
def dense(data, weight, bias=None):
"""Applies a linear transformation: :math:`Y = XW^T + b`.
Either data or weight should be tvm.contrib.sparse.CSRNDArray.
Parameters
----------
data : tvm.contrib.sparse.CSRNDArray or te.tensor.Tensor
2-D with shape [batch, in_dim]
weight : te.tensor.Tensor or tvm.contrib.sparse.CSRNDArray
2-D with shape [out_dim, in_dim]
bias : te.tensor.Tensor, optional
1-D with shape [out_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
ret = None
if isinstance(data, tvm.contrib.sparse.CSRPlaceholderOp) and isinstance(
weight, te.tensor.Tensor
):
ret = dense_si(data.data, data.indices, data.indptr, weight, bias)
elif isinstance(data, te.tensor.Tensor) and isinstance(
weight, tvm.contrib.sparse.CSRPlaceholderOp
):
ret = dense_sw(data, weight.data, weight.indices, weight.indptr, bias)
else:
raise NotImplementedError(
"implementation for %s as data and %s as weights, "
"is not supported yet."
% (
type(data),
type(weight),
)
)
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Some utils for Sparse operation."""
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
from tvm.auto_scheduler import _ffi_api
def random_bsr_matrix(m, n, bs_r, bs_c, density, dtype):
"""Generate a random sparse matrix in bsr format.
Returns
-------
scipy.sparse.bsr_matrix
"""
# pylint: disable=import-outside-toplevel
import numpy as np
import itertools
import scipy.sparse as sp
y = np.zeros((m, n), dtype=dtype)
assert m % bs_r == 0
assert n % bs_c == 0
nnz = int(density * m * n)
num_blocks = int(nnz / (bs_r * bs_c)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, m, bs_r), range(0, n, bs_c))))
assert candidate_blocks.shape[0] == m // bs_r * n // bs_c
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
# pylint: disable=invalid-name
for (r, c) in chosen_blocks:
y[r : r + bs_r, c : c + bs_c] = np.random.randn(bs_r, bs_c)
s = sp.bsr_matrix(y, blocksize=(bs_r, bs_c))
assert s.data.shape == (num_blocks, bs_r, bs_c)
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (m // bs_r + 1,)
return s
def random_sparse_dense_params(func, params, bs_r, bs_c, density):
"""Replace the dense parameters with random sparse parameters. Mainly used for testing.
Parameters
----------
func : tvm.relay.Expr
Expr will be optimized to sparse operation.
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
density : float
The density of the random sparse parameters.
Returns
-------
Dict[Srting, tvm.nd.array]
The generated random parameters.
"""
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
dense_weight_names = relay.analysis.sparse_dense._search_dense_op_weight(func)
for item in dense_weight_names:
name = str(item)
shape = new_params[name].shape
if shape[0] % bs_r == 0 and shape[1] % bs_c == 0:
new_w = random_bsr_matrix(shape[0], shape[1], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(new_w)
return new_params
def random_sparse_conv2d_params(func, params, bs_r, bs_c, density, layout):
"""Replace the dense parameters with random sparse parameters. Mainly used for testing.
Parameters
----------
func : tvm.relay.Expr
Expr will be optimized to sparse operation.
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
density : float
The density of the random sparse parameters.
layout : str
layout of network
Returns
-------
Dict[Srting, tvm.nd.array]
The generated random parameters.
"""
# pylint: disable=import-outside-toplevel
import numpy as np
def deepcopy(param_dic):
ret = {}
for k, v in param_dic.items():
ret[k] = tvm.nd.array(v.numpy())
return ret
new_params = deepcopy(params)
conv2d_weight_names = relay.analysis.sparse_conv2d._search_conv2d_op_weight(func)
for item in conv2d_weight_names:
name = str(item)
shape = new_params[name].shape
if not ((shape[0] == 1 and shape[1] == 1) or (shape[2] == 1 and shape[3] == 1)):
continue
if layout == "NCHW" and shape[0] % bs_r == 0 and shape[1] % bs_c == 0:
new_w = random_bsr_matrix(shape[0], shape[1], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(np.array(new_w).reshape(shape))
elif layout == "NHWC" and shape[3] % bs_r == 0 and shape[2] % bs_c == 0:
new_w = random_bsr_matrix(shape[3], shape[2], bs_r, bs_c, density, "float32").todense()
new_params[name] = tvm.nd.array(np.array(new_w).reshape(shape))
return new_params
def convert_model_dense_to_sparse(
mod, params, random_params=False, bs_r=1, bs_c=1, sparsity=0.85, layout="NHWC"
):
"""Convert a dense model to sparse model.
Parameters
----------
mod : tvm.Module
The dense model.
params : Dict[Srting, tvm.nd.array]
Parameters of the dense model.
random_params : Bool = False
True to replace the parameters of the dense model with some random sparse tensors.
This is mainly used for testing.
bs_r : int
The row of BSR matrix block.
bs_c : int
The column of BSR matrix block.
sparsity : float
The sparsity of the random sparse parameters.
layout : str
layout of network
Returns
-------
tvm.Module
The updated sparse model.
Dict[Srting, tvm.nd.array]
The updated parameters.
"""
mod, params = ddo.simplify_fc_transpose.convert(mod["main"], params)
if random_params:
# Manually replace the parameters of dense to sparse tensors
params = random_sparse_dense_params(mod, params, bs_r=bs_r, bs_c=bs_c, density=1 - sparsity)
# Manually replace the parameters of conv2d to sparse tensors
params = random_sparse_conv2d_params(
mod, params, bs_r=bs_r, bs_c=bs_c, density=1 - sparsity, layout=layout
)
# convert dense matmul to sparse matmul
mod, params = ddo.bsr_dense.convert(mod, params, (bs_r, bs_c), sparsity_threshold=0.8)
# convert dense conv2d to sparse conv2d
mod, params = ddo.bsr_conv2d.convert(
mod, params, (bs_r, bs_c), sparsity_threshold=0.8, layout=layout
)
return tvm.IRModule.from_expr(mod), params
def sparse_sketch_rules():
"""Return the sketch rules for sparse op"""
sparse_sketch_rule_list = [
auto_scheduler.PreloadCustomSketchRule(
sparse_conv2d_meet_condition_func, sparse_conv2d_apply_func, "SparseConv2D"
),
auto_scheduler.PreloadCustomSketchRule(
sparse_dense_meet_condition_func, sparse_dense_apply_func, "SparseDense"
),
# Add more sketch rules for sparse
]
return sparse_sketch_rule_list
def sparse_conv2d_meet_condition_func(search_policy, state, stage_id):
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if state.stages[stage_id].op.tag in [
"sparse_conv2d_sp_bsrmm",
"sparse_conv2d_sp_bsrmm_block",
]:
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
return auto_scheduler.PreloadCustomSketchRule.PASS
def sparse_conv2d_apply_func(search_policy, state, stage_id):
"""Describe how to generate the initial sketch for sparse conv2d"""
ret = []
s_0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if s_0.stages[stage_id].op.tag == "sparse_conv2d_sp_bsrmm_block":
return [s_0.state_object, stage_id - 1]
sparse_conv2d = s_0.stages[stage_id].op
sparse_conv2d_block = s_0.stages[stage_id - 1].op
assert sparse_conv2d.tag == "sparse_conv2d_sp_bsrmm"
assert sparse_conv2d_block.tag == "sparse_conv2d_sp_bsrmm_block"
layout = sparse_conv2d.attrs["layout"]
# Set the default consumer of compute block
consumer = sparse_conv2d
# If sparse conv2d has a single elementwise consumer
# We can compute inline the sparse_conv2d output stage
consumers = _ffi_api.SearchPolicyUtilsGetConsumers(
search_policy.search_task, s_0.state_object, stage_id
)
if len(consumers) == 1:
consumer_id = int(consumers.items()[0][0])
if _ffi_api.SearchPolicyUtilsIsElementwiseMatch(
search_policy.search_task, s_0.state_object, stage_id, consumer_id
):
consumer = s_0.stages[consumer_id].op
s_0.compute_inline(sparse_conv2d)
c = None
if layout == "NHWC":
if len(s_0[sparse_conv2d_block].iters) == 6:
# bs_c = 1
i, h, w, nb_j, j, row_offset = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
else:
i, h, w, nb_j, j, row_offset, c = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
m, x, y, n = s_0[consumer].iters
elif layout == "NCHW":
if len(s_0[sparse_conv2d_block].iters) == 6:
# bs_c = 1
i, nb_j, j, h, w, row_offset = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
else:
i, nb_j, j, h, w, row_offset, c = s_0[ # pylint: disable=invalid-name
sparse_conv2d_block
].iters
m, n, x, y = s_0[consumer].iters
i_0, i_1, i_2 = s_0.split(sparse_conv2d_block, i, [None, None])
m_0, m_1 = s_0.follow_split(consumer, m, len(s_0.transform_steps) - 1, 1)
h_0, h_1, h_2 = s_0.split(sparse_conv2d_block, h, [None, None])
x_0, x_1 = s_0.follow_split(consumer, x, len(s_0.transform_steps) - 1, 1)
w_0, w_1, w_2 = s_0.split(sparse_conv2d_block, w, [None, None]) # pylint: disable=invalid-name
y_0, y_1 = s_0.follow_split(consumer, y, len(s_0.transform_steps) - 1, 1)
j_0, j_1 = s_0.split(sparse_conv2d_block, nb_j, [None])
n_0, n_1 = s_0.follow_split(consumer, n, len(s_0.transform_steps) - 1, 1)
if layout == "NHWC":
if c is None:
s_0.reorder(
sparse_conv2d_block,
[i_0, h_0, w_0, j_0, i_1, h_1, w_1, j_1, row_offset, i_2, h_2, w_2, j],
)
else:
s_0.reorder(
sparse_conv2d_block,
[i_0, h_0, w_0, j_0, i_1, h_1, w_1, j_1, row_offset, i_2, h_2, w_2, j, c],
)
s_0.reorder(consumer, [m_0, x_0, y_0, n_0, m_1, x_1, y_1, n_1])
elif layout == "NCHW":
if c is None:
s_0.reorder(
sparse_conv2d_block,
[i_0, j_0, h_0, w_0, i_1, j_1, h_1, w_1, row_offset, i_2, j, h_2, w_2],
)
else:
s_0.reorder(
sparse_conv2d_block,
[i_0, j_0, h_0, w_0, i_1, j_1, h_1, w_1, row_offset, i_2, j, c, h_2, w_2],
)
s_0.reorder(consumer, [m_0, n_0, x_0, y_0, m_1, n_1, x_1, y_1])
s_0.compute_at(sparse_conv2d_block, consumer, n_0)
ret.append([s_0.state_object, stage_id - 2])
return ret
def sparse_dense_meet_condition_func(search_policy, state, stage_id):
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if state.stages[stage_id].op.tag in [
"sparse_dense_sp_rhs_bsrmm",
"sparse_dense_sp_rhs_bsrmm_block",
]:
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
return auto_scheduler.PreloadCustomSketchRule.PASS
def sparse_dense_apply_func(search_policy, state, stage_id):
"""Describe how to generate the initial sketch for sparse dense"""
ret = []
s_0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
if s_0.stages[stage_id].op.tag == "sparse_dense_sp_rhs_bsrmm_block":
return [s_0.state_object, stage_id - 1]
sparse_dense = s_0.stages[stage_id].op
sparse_dense_block = s_0.stages[stage_id - 1].op
assert sparse_dense.tag == "sparse_dense_sp_rhs_bsrmm"
assert sparse_dense_block.tag == "sparse_dense_sp_rhs_bsrmm_block"
# Set the default consumer of compute block
consumer = sparse_dense
# If sparse dense has a single elementwise consumer
# We can compute inline the sparse_dense output stage
consumers = _ffi_api.SearchPolicyUtilsGetConsumers(
search_policy.search_task, s_0.state_object, stage_id
)
if len(consumers) == 1:
consumer_id = int(consumers.items()[0][0])
if _ffi_api.SearchPolicyUtilsIsElementwiseMatch(
search_policy.search_task, s_0.state_object, stage_id, consumer_id
):
consumer = s_0.stages[consumer_id].op
s_0.compute_inline(sparse_dense)
i, nb_j, j, row_offset, c = s_0[sparse_dense_block].iters
m, n = s_0[consumer].iters
i_0, i_1, i_2 = s_0.split(sparse_dense_block, i, [None, None])
m_0, m_1 = s_0.follow_split(consumer, m, len(s_0.transform_steps) - 1, 1)
j_0, j_1 = s_0.split(sparse_dense_block, nb_j, [None])
n_0, n_1 = s_0.follow_split(consumer, n, len(s_0.transform_steps) - 1, 1)
s_0.reorder(sparse_dense_block, [i_0, j_0, i_1, j_1, row_offset, i_2, j, c])
s_0.reorder(consumer, [m_0, n_0, m_1, n_1])
s_0.compute_at(sparse_dense_block, consumer, n_0)
ret.append([s_0.state_object, stage_id - 2])
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse_fill_empty_rows.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHnew_sparse_indices WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, too-many-locals, too-many-arguments, too-many-branches
# pylint: disable=undefined-variable, invalid-name
"""SparseFillEmptyRows operator"""
from ..te import hybrid
@hybrid.script
def _sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
):
default_value_ = int64(default_value[0])
new_sparse_indices = output_tensor(new_sparse_indices_shape, "int64")
new_sparse_values = output_tensor(new_sparse_values_shape, "int64")
empty_row_indicator = output_tensor(empty_row_indicator_shape, "int64")
new_sparse_indices_row_id = 0
if int64(sparse_indices.shape[0]) == int64(0): # Handle Empty Case
# Fill all rows with default values
for i in range(0, new_sparse_indices_shape[0]):
new_sparse_indices[i, 0] = int64(i)
new_sparse_values[i] = default_value_
empty_row_indicator[i] = int64(1)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[i, k] = int64(0)
return (new_sparse_indices, new_sparse_values, empty_row_indicator)
else:
# Iterate through sparse_indices and add rows if/when required
for i in range(0, int64(sparse_indices.shape[0])):
if i == 0:
prev_row_id = int64(0)
else:
prev_row_id = int64(sparse_indices[i - 1, 0] + 1)
row_id = int64(sparse_indices[i, 0])
# Since input is in row-major order, add rows between prev_row_id and row_id
for j in range(prev_row_id, row_id):
new_sparse_indices[new_sparse_indices_row_id, 0] = int64(j)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(0)
empty_row_indicator[prev_row_id] = int64(1)
new_sparse_values[new_sparse_indices_row_id] = default_value_
new_sparse_indices_row_id += 1
# Add current element to output
new_sparse_indices[new_sparse_indices_row_id, 0] = row_id
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(sparse_indices[i, k])
new_sparse_values[new_sparse_indices_row_id] = int64(sparse_values[i])
empty_row_indicator[row_id] = int64(0)
new_sparse_indices_row_id += 1
# Add rows with default value if last row id of sparse_indices is not dense_shape[0] - 1
for i in range(
int64(sparse_indices[sparse_indices.shape[0] - 1, 0] + 1), int64(dense_shape[0])
):
new_sparse_indices[new_sparse_indices_row_id, 0] = int64(i)
for k in range(1, int64(new_sparse_indices_shape[1])):
new_sparse_indices[new_sparse_indices_row_id, k] = int64(0)
empty_row_indicator[i] = int64(1)
new_sparse_values[new_sparse_indices_row_id] = default_value_
new_sparse_indices_row_id += 1
return (new_sparse_indices, new_sparse_values, empty_row_indicator)
def sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
):
return _sparse_fill_empty_rows(
sparse_indices,
sparse_values,
dense_shape,
default_value,
new_sparse_indices_shape,
new_sparse_values_shape,
empty_row_indicator_shape,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/sparse_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Sparse_Reshape operator"""
from ..tir import decl_buffer, ir_builder, Cast
from ..te import extern, div, floordiv, floormod
def sparse_reshape(
sparse_indices,
prev_shape,
new_shape,
new_sparse_indices_shape,
new_shape_shape,
):
"""
Reshape a Sparse Tensor
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(
sparse_indices, prev_shape, new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
def gen_ir(
sparse_indices_ptr,
prev_shape_ptr,
new_shape_ptr,
new_sparse_indices_ptr,
out_new_shape_ptr,
):
ib = ir_builder.create()
sparse_indices = ib.buffer_ptr(sparse_indices_ptr)
prev_shape = ib.buffer_ptr(prev_shape_ptr)
new_shape = ib.buffer_ptr(new_shape_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
new_sparse_indices = ib.buffer_ptr(new_sparse_indices_ptr)
out_new_shape = ib.buffer_ptr(out_new_shape_ptr)
prev_shape_size = prev_shape_ptr.shape[0]
new_shape_size = new_shape_ptr.shape[0]
multipliers = ib.allocate(
new_shape_ptr.dtype, (prev_shape_size,), name="multipliers", scope="local"
)
dividers = ib.allocate(
new_shape_ptr.dtype, (new_shape_size,), name="dividers", scope="local"
)
flattened_indices = ib.allocate(
new_shape_ptr.dtype,
(sparse_indices_ptr.shape[0],),
name="flattened_indices",
scope="local",
)
total_ele = ib.allocate(new_shape_ptr.dtype, (1,), name="total_ele", scope="local")
total_ele[0] = prev_shape[0]
# Cumulative Reverse Exclusive Multiply
multipliers[prev_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, prev_shape_size - 1) as i_:
i = i_ + 1
multipliers[prev_shape_size - 1 - i] = (
prev_shape[prev_shape_size - i] * multipliers[prev_shape_size - i]
)
total_ele[0] *= prev_shape[prev_shape_size - i]
division_total_ele = ib.allocate(
new_shape_ptr.dtype, (1,), name="division_total_ele", scope="local"
)
division_total_ele[0] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] != -1):
division_total_ele[0] *= new_shape[i]
# Compute true output shape (replace negative ones)
with ib.for_range(0, new_shape_size) as i:
with ib.if_scope(new_shape[i] == -1):
out_new_shape[i] = Cast(
new_shape_ptr.dtype, div(total_ele[0], division_total_ele[0])
)
with ib.else_scope():
out_new_shape[i] = new_shape[i]
equal_shape = ib.allocate("bool", (1,), name="equal_shape", scope="local")
# Check if prev_shape and new_shape are equal
equal_shape[0] = True
with ib.if_scope(prev_shape_size == new_shape_size):
with ib.for_range(0, prev_shape_size) as i:
with ib.if_scope(prev_shape[i] != out_new_shape[i]):
equal_shape[0] = False
with ib.else_scope():
equal_shape[0] = False
# Return same inputs if shapes are equal
with ib.if_scope(equal_shape[0]):
with ib.for_range(0, sparse_indices_ptr.shape[0], kind="parallel") as i:
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[i, j] = sparse_indices[i, j]
# Else compute new_sparse_indices
with ib.else_scope():
dividers[new_shape_size - 1] = Cast(new_shape_ptr.dtype, 1)
with ib.for_range(0, new_shape_size - 1) as i_:
i = i_ + 1
dividers[new_shape_size - 1 - i] = (
dividers[new_shape_size - i] * out_new_shape[new_shape_size - i]
)
with ib.for_range(0, sparse_indices_ptr.shape[0], kind="parallel") as i:
flattened_indices[i] = Cast(new_shape_ptr.dtype, 0)
with ib.for_range(0, sparse_indices_ptr.shape[1]) as j:
flattened_indices[i] += sparse_indices[i, j] * multipliers[j]
with ib.for_range(0, new_sparse_indices_ptr.shape[0], kind="parallel") as i:
current_element = ib.allocate(
new_shape_ptr.dtype, (1,), name="current_element", scope="local"
)
current_element[0] = flattened_indices[i]
with ib.for_range(0, new_sparse_indices_ptr.shape[1]) as j:
new_sparse_indices[i, j] = Cast(
sparse_indices_ptr.dtype, floordiv(current_element[0], dividers[j])
)
current_element[0] = floormod(current_element[0], dividers[j])
return ib.get()
new_sparse_indices_buf = decl_buffer(
new_sparse_indices_shape, sparse_indices.dtype, "new_sparse_indices_buf"
)
new_shape_buf = decl_buffer(new_shape_shape, prev_shape.dtype, "new_shape_buf")
return extern(
[new_sparse_indices_shape, new_shape_shape],
[sparse_indices, prev_shape, new_shape],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0], outs[1]),
out_buffers=[new_sparse_indices_buf, new_shape_buf],
name="sparse_reshape_cpu",
tag="sparse_reshape_cpu",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/stft.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
from tvm import te, tir
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
loop_kind,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
# https://librosa.org/doc/0.7.2/_modules/librosa/core/spectrum.html#stft
with ib.for_range(
0, output_ptr.shape[0] * output_ptr.shape[1], kind="parallel"
) as batch_row:
with ib.for_range(0, output_ptr.shape[2], kind=loop_kind) as col:
batch = ib.allocate("int32", (1), name="batch", scope="local")
row = ib.allocate("int32", (1), name="row", scope="local")
batch = tir.floordiv(batch_row, output_ptr.shape[1])
row = tir.floormod(batch_row, output_ptr.shape[1])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
loop_kind = "vectorize"
if isinstance(output_shape[2], tir.expr.SizeVar): # any_dim
loop_kind = "serial"
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0], loop_kind
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cpu",
tag="stft_cpu",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all tag system in tvm
Each operator can be tagged by a tag, which indicate its type.
Generic categories
- tag.ELEMWISE="elemwise":
Elementwise operator, for example :code:`out[i, j] = input[i, j]`
- tag.BROADCAST="broadcast":
Broadcasting operator, can always map output axis to the input in order.
for example :code:`out[i, ax1, j, ax2] = input[i, j]`.
Note that the axis need to be in order so transpose is not a bcast operator.
If an input of broadcast operator has same shape as output,
we can ensure that it is elementwise relation.
- tag.INJECTIVE="injective":
Injective operator, can always injectively map output axis to a single input axis.
All injective operator can still be safely fused similar to ewise to reduction.
- tag.COMM_REDUCE="comm_reduce":
Communicative reduction operator
- If an op does not belong to these generic categories, it should have a special tag.
Note
----
When we add a new topi operator, the op need to be tagged as generic as possible.
We can also compose tags like "injective,pad" to give generic and specific information.
When we use composed tags, we must always put generic tag in the first location.
"""
ELEMWISE = "elemwise"
BROADCAST = "broadcast"
INJECTIVE = "injective"
COMM_REDUCE = "comm_reduce"
COMM_REDUCE_IDX = "comm_reduce_idx"
def is_broadcast(tag):
"""Check if a tag is bcast
Parameters
----------
tag : str
The input tag
Returns
-------
ret : bool
Whether a tag is broadcast
"""
if tag in (ELEMWISE, BROADCAST):
return True
return tag.startswith(ELEMWISE) or tag.startswith(BROADCAST)
def is_injective(tag):
"""Check if a tag is injective
Parameters
----------
tag : str
The input tag
Returns
-------
ret : bool
Whether a tag is injective
"""
if tag in (ELEMWISE, BROADCAST, INJECTIVE):
return True
return tag.startswith(ELEMWISE) or tag.startswith(BROADCAST) or tag.startswith(INJECTIVE)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,unused-argument,len-as-condition
"""Elementwise operators"""
from __future__ import absolute_import as _abs
from . import cpp
def elemwise_sum(xs):
"""Perform element-wise sum on inputs
Parameters
----------
xs : list of tvm.te.Tensor
Input arguments.
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.elemwise_sum(xs)
def full(shape, dtype, fill_value):
"""Fill tensor with fill_value
Parameters
----------
shape : tuple
Input tensor shape.
dtype : str
Data type
fill_value : float
Value to be filled
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.full(shape, dtype, fill_value)
def full_like(x, fill_value):
"""Construct a tensor with same shape as input tensor,
then fill tensor with fill_value.
Parameters
----------
x : tvm.te.Tensor
Input argument.
fill_value : float
Value to be filled
Returns
-------
y : tvm.te.Tensor
The result.
"""
return cpp.full_like(x, fill_value)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TOPI Testing Util functions.
Used to verify the correctness of operators in TOPI .
"""
from __future__ import absolute_import as _abs
from .conv1d_ncw_python import conv1d_ncw_python, group_conv1d_ncw_python
from .conv2d_hwcn_python import conv2d_hwcn_python
from .conv2d_nchw_python import conv2d_nchw_python
from .conv2d_nhwc_python import conv2d_nhwc_python
from .conv3d_ncdhw_python import conv3d_ncdhw_python
from .conv3d_ndhwc_python import conv3d_ndhwc_python
from .conv3d_transpose_ncdhw_python import conv3d_transpose_ncdhw_python
from .conv2d_transpose_python import conv2d_transpose_nchw_python, conv2d_transpose_nhwc_python
from .conv1d_transpose_ncw_python import conv1d_transpose_ncw_python
from .correlation_nchw_python import correlation_nchw_python
from .deformable_conv2d_python import deformable_conv2d_nchw_python, deformable_conv2d_nhwc_python
from .depthwise_conv2d_python import (
depthwise_conv2d_python_nchw,
depthwise_conv2d_python_nhwc,
depthwise_conv2d_python_nchwc,
)
from .dilate_python import dilate_python
from .softmax_python import softmax_python, log_softmax_python
from .resize_python import resize1d_python, resize2d_python, resize3d_python
from .reorg_python import reorg_python
from .roi_align_python import roi_align_nchw_python, roi_align_nhwc_python
from .roi_pool_python import roi_pool_nchw_python
from .layer_norm_python import layer_norm_python
from .lrn_python import lrn_python
from .l2_normalize_python import l2_normalize_python
from .gather_python import gather_python
from .gather_nd_python import gather_nd_python
from .strided_slice_python import strided_slice_python, strided_set_python
from .batch_matmul import batch_matmul
from .batch_norm import batch_norm
from .slice_axis_python import slice_axis_python
from .sequence_mask_python import sequence_mask
from .poolnd_python import poolnd_python
from .pool_grad_python import pool_grad_nchw
from .one_hot import one_hot
from .depth_to_space import depth_to_space_python
from .space_to_depth import space_to_depth_python
from .crop_and_resize_python import crop_and_resize_python
from .common import (
compare_numpy_tvm,
get_injective_schedule,
get_reduce_schedule,
get_broadcast_schedule,
get_elemwise_schedule,
get_conv2d_nchw_implement,
dispatch,
)
from .adaptive_pool_python import adaptive_pool
from .grid_sample_python import affine_grid_python, grid_sample_python
from .matrix_set_diag import matrix_set_diag
from .space_to_batch_nd import space_to_batch_nd_python
from .batch_to_space_nd import batch_to_space_nd_python
from .nll_loss import nll_loss
from .dense import dense
from .searchsorted import searchsorted_ref
from .conv2d_backcward_weight_python import conv2d_backward_weight_python
from .lstm_python import lstm_python
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/adaptive_pool_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""adaptive pool in python"""
import numpy as np
def _start_index(index, odim, idim):
return int(np.floor(index * idim / odim))
def _end_index(index, odim, idim):
return int(np.ceil((index + 1) * idim / odim))
def _pool1d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
ow = out_size[0]
for l in range(ow):
l_start = _start_index(l, ow, in_size[0])
l_end = _end_index(l, ow, in_size[0])
l_sl = slice(l_start, l_end)
out[l] = np_op(np_data[l_sl])
return out
def _pool2d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
oh, ow = out_size
for k in range(oh):
k_start = _start_index(k, oh, in_size[0])
k_end = _end_index(k, oh, in_size[0])
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = _start_index(l, ow, in_size[1])
l_end = _end_index(l, ow, in_size[1])
l_sl = slice(l_start, l_end)
out[k, l] = np_op(np_data[k_sl, l_sl])
return out
def _pool3d(in_size, out_size, np_data, np_op):
out = np.zeros(out_size).astype(np_data.dtype)
od, oh, ow = out_size
for m in range(od):
m_start = _start_index(m, od, in_size[0])
m_end = _end_index(m, od, in_size[0])
m_sl = slice(m_start, m_end)
for k in range(oh):
k_start = _start_index(k, oh, in_size[1])
k_end = _end_index(k, oh, in_size[1])
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = _start_index(l, ow, in_size[2])
l_end = _end_index(l, ow, in_size[2])
l_sl = slice(l_start, l_end)
out[m, k, l] = np_op(np_data[m_sl, k_sl, l_sl])
return out
def adaptive_pool_channel_first(np_data, out_size, pool_op, np_op):
"""The reference function for adaptive pool, channel first layout"""
ishape = np_data.shape
n, c = ishape[:2]
oshape = (n, c) + out_size
np_out = np.zeros(oshape).astype(np_data.dtype)
for i in range(n):
for j in range(c):
np_out[i, j] = pool_op(ishape[2:], out_size, np_data[i, j], np_op)
return np_out
def adaptive_pool_channel_last(np_data, out_size, pool_op, np_op):
"""The reference function for adaptive pool, channel last layout"""
ishape = np_data.shape
n, c = ishape[0], ishape[-1]
oshape = (n,) + out_size + (c,)
np_out = np.zeros(oshape).astype(np_data.dtype)
for i in range(n):
for j in range(c):
if len(out_size) == 1:
np_out[i, :, j] = pool_op(ishape[1:-1], out_size, np_data[i, :, j], np_op)
elif len(out_size) == 2:
np_out[i, :, :, j] = pool_op(ishape[1:-1], out_size, np_data[i, :, :, j], np_op)
else:
np_out[i, :, :, :, j] = pool_op(
ishape[1:-1], out_size, np_data[i, :, :, :, j], np_op
)
return np_out
def adaptive_pool(np_data, out_size, pool_type, layout):
"""The reference function for adaptive pool, for 2d and 3d"""
if isinstance(out_size, int):
out_size = (out_size,)
if len(out_size) == 1:
pool_op = _pool1d
elif len(out_size) == 2:
pool_op = _pool2d
else:
assert len(out_size) == 3
pool_op = _pool3d
np_op = np.mean if pool_type == "avg" else np.max
if layout in ["NCW", "NCHW", "NCDHW"]:
return adaptive_pool_channel_first(np_data, out_size, pool_op, np_op)
assert layout in ["NWC", "NHWC", "NDHWC"]
return adaptive_pool_channel_last(np_data, out_size, pool_op, np_op)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Batch matmul in python"""
import numpy as np
def batch_matmul(x, y, out_dtype=None, trans_x=False, trans_y=True):
"""batch_matmul operator implemented in numpy.
Parameters
----------
x : numpy.ndarray
3-D with shape [batch, M, K]
y : numpy.ndarray
3-D with shape [batch, N, K]
out_dtype: string, optional
Specify the dtype of output
Returns
-------
out : numpy.ndarray
3-D with shape [batch, M, N]
"""
if trans_x:
XB, _, M = x.shape
else:
XB, M, _ = x.shape
if trans_y:
YB, N, _ = y.shape
else:
YB, _, N = y.shape
batch = max(XB, YB)
dtype = x.dtype if out_dtype is None else out_dtype
out = np.zeros((batch, M, N)).astype(dtype)
for i in range(batch):
xx = x[i if XB != 1 else 0].astype(dtype)
yy = y[i if YB != 1 else 0].astype(dtype)
out[i] = np.dot(
xx.T if trans_x else xx,
yy.T if trans_y else yy,
)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/batch_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Batch Normalization implemented in Numpy."""
import numpy as np
def batch_norm(
x: np.ndarray,
gamma: np.ndarray,
beta: np.ndarray,
moving_mean: np.ndarray,
moving_var: np.ndarray,
axis: int,
epsilon: float,
center: bool,
scale: bool,
):
"""Batch Normalization operator implemented in Numpy.
Parameters
----------
data : np.ndarray
Input to be batch-normalized.
gamma : np.ndarray
Scale factor to be applied to the normalized tensor.
beta : np.ndarray
Offset to be applied to the normalized tensor.
moving_mean : np.ndarray
Running mean of input.
moving_var : np.ndarray
Running variance of input.
axis : int
Specify along which shape axis the normalization should occur.
epsilon : float
Small float added to variance to avoid dividing by zero.
center : bool
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : bool
If True, scale normalized tensor by gamma. If False, gamma
is ignored.
Returns
-------
output : np.ndarray
Normalized data with same shape as input
moving_mean : np.ndarray
Running mean of input.
moving_var : np.ndarray
Running variance of input.
"""
shape = [1] * len(x.shape)
shape[axis] = x.shape[axis]
moving_mean_rs = moving_mean.reshape(shape)
moving_var_rs = moving_var.reshape(shape)
out = (x - moving_mean_rs) / np.sqrt(moving_var_rs + epsilon)
if scale:
out = out * gamma.reshape(shape)
if center:
out = out + beta.reshape(shape)
return [out, moving_mean, moving_var]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/batch_to_space_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Batch to space ND in python"""
import numpy as np
from . import strided_slice_python
def batch_to_space_nd_python(data, block_shape, crop_begin_list, crop_end_list):
"""Batch to Space operator in python for NHWC layout.
Parameters
----------
data : np.ndarray
N-D with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_shape : list of ints
1-D array of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
crop_begin_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
begin crop size for each spatial dimension.
crop_end_list : list of ints
list of shape [M] where M is number of spatial dims, specifies
end crop size for each spatial dimension.
Returns
-------
b2s_out : np.ndarray
N-D with shape
[batch / prod(block_shape),
in_shape[1] * block_shape[0] - crop_begin_list[0] - crop_end_list[0], ...,
in_shape[M] * block_shape[M-1] - crop_begin_list[M-1] - crop_end_list[M-1],
remaining_shape]
"""
in_shape = data.shape
N = len(in_shape)
M = len(block_shape)
block_shape_prod = np.prod(block_shape)
in_batch = data.shape[0]
axis = []
r_p_shape = []
r_shape = [block_shape[i] for i in range(0, M)]
axis.append(len(r_shape))
r_shape.append(in_batch // block_shape_prod)
for i in range(1, N):
axis.append(len(r_shape))
if len(axis) < (M + N):
axis.append(len(r_shape) - (M + 1))
r_shape.append(in_shape[i])
r_p_shape.append(int((in_batch / block_shape_prod)))
for i in range(1, M + 1):
r_p_shape.append(in_shape[i] * block_shape[i - 1])
for i in range(M + 1, N):
r_p_shape.append(in_shape[i])
b2s_out = np.reshape(data, newshape=r_shape)
b2s_out = np.transpose(b2s_out, axes=axis)
b2s_out = np.reshape(b2s_out, newshape=r_p_shape)
# Crop the start and end of dimensions of b2s_out
begin_idx = []
end_idx = []
strides = []
for i, _ in enumerate(r_p_shape):
strides.append(1)
if 0 < i <= M:
# begin and end index for spatial dimensions
begin_idx.append(crop_begin_list[i - 1])
end_idx.append(r_p_shape[i] - crop_end_list[i - 1])
else:
begin_idx.append(0)
end_idx.append(r_p_shape[i])
b2s_out = strided_slice_python(b2s_out, begin_idx, end_idx, strides)
return b2s_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common utility for topi test"""
import numpy as np
import scipy.signal
import tvm
from tvm import topi
from tvm.testing import assert_allclose
_injective_schedule = {
"generic": topi.generic.schedule_injective,
"cpu": topi.x86.schedule_injective,
"arm_cpu": topi.arm_cpu.schedule_injective,
"gpu": topi.cuda.schedule_injective,
"hls": topi.hls.schedule_injective,
}
_reduce_schedule = {
"generic": topi.generic.schedule_reduce,
"cpu": topi.x86.schedule_reduce,
"gpu": topi.cuda.schedule_reduce,
"hls": topi.cuda.schedule_reduce,
}
def dispatch(target, dispatch_map):
if isinstance(target, str):
target = tvm.target.Target(target)
assert isinstance(target, tvm.target.Target)
for key in target.keys:
if key in dispatch_map:
return dispatch_map[key]
return dispatch_map["generic"]
def get_injective_schedule(target):
return dispatch(target, _injective_schedule)
def get_reduce_schedule(target):
return dispatch(target, _reduce_schedule)
get_broadcast_schedule = get_injective_schedule
get_elemwise_schedule = get_injective_schedule
_conv2d_nchw_implement = {
"generic": (topi.nn.conv2d_nchw, topi.generic.schedule_conv2d_nchw),
"cpu": (topi.x86.conv2d_nchw, topi.x86.schedule_conv2d_nchw),
"arm_cpu": (
topi.arm_cpu.conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_conv2d_nchw_spatial_pack,
),
"gpu": (topi.cuda.conv2d_nchw, topi.cuda.schedule_conv2d_nchw),
"mali": (topi.mali.conv2d_nchw_spatial_pack, topi.mali.schedule_conv2d_nchw_spatial_pack),
"bifrost": (
topi.bifrost.conv2d_nchw_spatial_pack,
topi.bifrost.schedule_conv2d_nchw_spatial_pack,
),
"intel_graphics": (topi.intel_graphics.conv2d_nchw, topi.intel_graphics.schedule_conv2d_nchw),
"hls": (topi.nn.conv2d_nchw, topi.hls.schedule_conv2d_nchw),
}
def get_conv2d_nchw_implement(target):
return dispatch(target, _conv2d_nchw_implement)
def compare_numpy_tvm(inputs, output, target, device, compute, schedule):
"""Compare a numpy inputs and output of a function to the results of the TVM version.
Parameters
----------
inputs : Sequence[numpy.nd.array]
List of input numpy arrays to pass to the function.
output : numpy.nd.array
Verified correct function output.
target : tvm.target.Target
Target to run on.
device : tvm.runtime.Device
Context to run on.
compute : callable
Topi compute function to test against.
schedule : callable
Topi scheduling function to test against.
"""
te_inputs = [tvm.te.placeholder(shape=i.shape, dtype=str(i.dtype)) for i in inputs]
te_out = tvm.nd.array(np.zeros(output.shape).astype(output.dtype), device=device)
with tvm.target.Target(target):
out = compute(*te_inputs)
s = schedule([out])
func = tvm.build(s, te_inputs + [out])
arys = [tvm.nd.array(x, device=device) for x in inputs]
func(*(arys + [te_out]))
assert_allclose(te_out.numpy(), output, atol=1e-4, rtol=1e-4)
def _convolve2d(data, weights):
"""2d convolution operator in HW layout.
This is intended to be used as a replacement for
scipy.signals.convolve2d, with wider support for different dtypes.
scipy.signal.convolve2d does not support all TVM-supported
dtypes (e.g. float16). Where possible, this function uses
scipy.signal.convolve2d to take advantage of compiled scipy
routines, falling back to an explicit loop only where needed.
Parameters
----------
data : numpy.ndarray
2-D with shape [in_height, in_width]
weights : numpy.ndarray
2-D with shape [filter_height, filter_width].
Returns
-------
b_np : np.ndarray
2-D with shape [out_height, out_width]
Return value and layout conventions are matched to
``scipy.signal.convolve2d(data, weights, mode="valid")``
"""
try:
return scipy.signal.convolve2d(data, weights, mode="valid")
except ValueError:
pass
weights = np.rot90(weights, k=2)
assert len(data.shape) == len(weights.shape) == 2
dtype = data.dtype
kernel_h, kernel_w = weights.shape
output_shape = [a_dim - w_dim + 1 for a_dim, w_dim in zip(data.shape, weights.shape)]
output = np.zeros(output_shape, dtype=dtype)
for y in range(output_shape[0]):
for x in range(output_shape[1]):
output[y][x] = np.sum(data[y : y + kernel_h, x : x + kernel_w] * weights)
return output
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv1d_ncw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable, invalid-name
"""1D convolution in python"""
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple1d
def dilate_np(x, dilation):
"""1D dilation using numpy
Parameters
----------
x : numpy.ndarray
Array to dilate with shape [batch, in_channel, in_width]
dilation : int
dilation rate of output
Returns
-------
out : numpy.ndarray
Dilated output with shape [batch, in_channel, (in_width - 1) * dilation + 1]
"""
irange = range(len(x) - 1)
for d in range(dilation - 1):
indices = [(d + 1) * (i + 1) for i in irange]
x = np.insert(x, indices, 0)
return x
def group_conv1d_ncw_python(a_np, w_np, stride, padding, dilation, groups):
"Grouped version of `conv1d_ncw_python`, see that for documentation"
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
conv1d_ncw_python(a_slice, w_slice, stride, padding, dilation)
for a_slice, w_slice in zip(a_slices, w_slices)
]
return np.concatenate(b_slices, axis=1)
def conv1d_ncw_python(a_np, w_np, stride, padding, dilation):
"""1D convolution operator in NCW layout
Parameters
----------
a_np : numpy.ndarray
3-D with shape [batch, in_channel, in_width]
w_np : numpy.ndarray
3-D with shape [num_filter, in_channel, filter_width]
stride : int
Stride size
padding : int, tuple, or str
Single int for padding size or tuple of (left, right) padding
or a string in ['VALID', 'SAME']
dilation : int
Dilation rate of the kernel
groups : int
Number of groups in the convolution
Returns
-------
b_np : numpy.ndarray
3-D with shape [batch, out_channel, out_width]
"""
batch, in_c, in_w = a_np.shape
out_c, _, filter_w = w_np.shape
if isinstance(stride, (tuple, list)):
stride = stride[0]
if isinstance(dilation, (tuple, list)):
dilation = dilation[0]
dilated_filter_w = (filter_w - 1) * dilation + 1
pad_left, pad_right = get_pad_tuple1d(padding, (dilated_filter_w,))
out_w = ((in_w - dilated_filter_w + pad_left + pad_right) // stride) + 1
padded_a_np = np.zeros((batch, in_c, in_w + pad_left + pad_right))
padded_a_np[:, :, pad_left : (in_w + pad_left)] = a_np
b_np = np.zeros((batch, out_c, out_w))
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = np.convolve(
padded_a_np[n, c], np.flip(dilate_np(w_np[f, c], dilation)), mode="valid"
)
b_np[n, f] += out[::stride]
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv1d_transpose_ncw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""Transposed 1D convolution in python"""
import numpy as np
import scipy
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple1d
def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 1D convolution operator in NCW layout.
Parameters
----------
a_np : numpy.ndarray
3-D with shape [batch, in_channel, in_width]
w_np : numpy.ndarray
3-D with shape [in_channel, num_filter, filter_width]
stride : int or a list/tuple of one int
Stride size, or [stride_width]
padding : int, tuple, or str
Single int for padding size, or
tuple of 2 ints for left and right padding, or
['VALID', 'SAME']
output_padding : tuple
Used to recover the actual output shape in case more than one
is possible
Returns
-------
b_np : np.ndarray
3-D with shape [batch, out_channel, out_width]
"""
batch, in_c, in_w = a_np.shape
_, out_c, filter_w = w_np.shape
opad = output_padding[0]
if isinstance(stride, int):
stride_w = stride
else:
stride_w = stride[0]
assert opad < stride_w
fpad_left, fpad_right = get_pad_tuple1d(padding, filter_w)
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_w])
# padding stage
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad
padded_a_np = np.zeros((batch, in_c, dilated_a_np.shape[2] + bpad_left + bpad_right))
padded_a_np[:, :, bpad_left : dilated_a_np.shape[2] + bpad_left] = dilated_a_np
# convolution stage
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w + opad
b_np = np.zeros((batch, out_c, out_w))
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = scipy.signal.convolve(padded_a_np[n, c], w_np[c, f], mode="valid")
b_np[n, f] += out
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv2d_backcward_weight_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"""Gradient of conv2d with respect to weight in python"""
import numpy as np
# Reference: cutlass/tools/util/include/cutlass/util/reference/host/convolution.h
def conv2d_backward_weight_nchw_python(
dy_np, x_np, kernel_size, stride, padding, groups=1, channels=None
):
"""Gradient of the conv2d op with respect to weight, in NCHW layout.
Parameters
----------
dy_np : numpy.ndarray
4-D with shape [batch, in_channel, out_height, out_width]
x_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
kernel_size : tuple of two ints
Height and width of the weight
stride : tuple of two ints
Stride size, or [stride_height, stride_width]
padding : tuple of two ints
Spatial padding, or [pad_h, pad_w]
Returns
-------
dw_np : np.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
"""
N, C, H, W = x_np.shape
_, K, P, Q = dy_np.shape
R, S = kernel_size
pad_h, pad_w = padding
stride_h, stride_w = stride
is_depth_wise = C == K and C == groups
if is_depth_wise:
assert channels == groups, "Only channel_mult == 1 supported for now."
dw = np.zeros((K, 1, R, S)).astype(dy_np.dtype)
else:
assert groups == 1, "General grouped conv2d not supported for now."
dw = np.zeros((K, C, R, S)).astype(dy_np.dtype)
for k in range(K):
for r in range(R):
for s in range(S):
for c in range(dw.shape[1]):
acc = 0
for n in range(N):
for p in range(P):
for q in range(Q):
if not is_depth_wise:
in_c = c
else:
in_c = k
coord = (
n,
in_c,
p * stride_h - pad_h + r,
q * stride_w - pad_w + s,
)
if (
coord[2] < H
and coord[2] >= 0
and coord[3] < W
and coord[3] >= 0
):
acc += dy_np[n, k, p, q] * x_np[coord]
dw[k, c, r, s] = acc
return dw
def conv2d_backward_weight_python(
dy_np, x_np, kernel_size, stride, padding, layout="NCHW", groups=1, channels=None
):
"""Gradient of the conv2d op with respect to weight, in NCHW or NHWC layout.
Parameters
----------
dy_np : numpy.ndarray
4-D with shape [batch, in_channel, out_height, out_width] for NCHW layout
x_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width] for NCHW layout
kernel_size : tuple of two ints
Height and width of the weight
stride : tuple of two ints
Stride size, or [stride_height, stride_width]
padding : tuple of two ints
Spatial padding, or [pad_h, pad_w]
layout: string
Layout of dy_np and x_np
groups: int
Number of groups for grouped convolution.
channels : int
Number of output channels of this convolution.
Returns
-------
dw_np : np.ndarray
Tensor of shape [num_filter, in_channel, filter_height, filter_width] for NCHW layout,
[num_filter, filter_height, filter_width, in_channel] for NHWC layout.
"""
if layout == "NCHW":
return conv2d_backward_weight_nchw_python(
dy_np, x_np, kernel_size, stride, padding, groups, channels
)
dw_np_oihw = conv2d_backward_weight_nchw_python(
np.transpose(dy_np, [0, 3, 1, 2]),
np.transpose(x_np, [0, 3, 1, 2]),
kernel_size,
stride,
padding,
groups,
channels,
)
return np.transpose(dw_np_oihw, [0, 2, 3, 1])
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv2d_hwcn_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple
def conv2d_hwcn_python(a_np, w_np, stride, padding):
"""Convolution operator in HWCN layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [in_height, in_width, in_channel, batch]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
Returns
-------
b_np : np.ndarray
4-D with shape [out_height, out_width, out_channel, batch]
"""
in_height, in_width, in_channel, batch = a_np.shape
kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from HWCN to NCHW
at = a_np.transpose((3, 2, 0, 1))
wt = w_np.transpose((3, 2, 0, 1))
bt = np.zeros((batch, out_channel, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w))
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve2d(apad, np.rot90(np.rot90(wt[f, c])), mode="valid")
bt[n, f] += out[::stride_h, ::stride_w]
return bt.transpose((2, 3, 1, 0))
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv2d_nchw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution in python"""
import numpy as np
import scipy
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_nchw_python(a_np, w_np, stride, padding):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
num_filter, _, kernel_h, kernel_w = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
b_np = np.zeros((batch, out_channel, out_height, out_width), dtype=a_np.dtype)
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w), dtype=a_np.dtype)
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = a_np[n, c]
else:
apad = a_np[n, c]
out = _conv2d_hw(apad, w_np[f, c])
b_np[n, f] += out[::stride_h, ::stride_w]
return b_np
def _conv2d_hw(apad, w_np_fc):
"""2d convolution operator in HW layout.
This is intended to be used as a subroutine from
_conv2d_nchw_python. Using scipy.signal.convolve2d directly does
not work for all dtypes (e.g. float16). Where possible, this
function uses scipy.signal.convolve2d to take advantage of
compiled scipy routines, falling back to an explicit loop only
where needed
Parameters
----------
a_np : numpy.ndarray
2-D with shape [in_height, in_width]
w_np : numpy.ndarray
2-D with shape [filter_height, filter_width].
Returns
-------
b_np : np.ndarray
2-D with shape [out_height, out_width]
"""
try:
return scipy.signal.convolve2d(apad, np.rot90(np.rot90(w_np_fc)), mode="valid")
except ValueError:
pass
assert len(apad.shape) == len(w_np_fc.shape) == 2
dtype = apad.dtype
in_height, in_width = apad.shape
kernel_h, kernel_w = w_np_fc.shape
output_shape = [a_dim - w_dim + 1 for a_dim, w_dim in zip(apad.shape, w_np_fc.shape)]
output = np.zeros(output_shape, dtype=apad.dtype)
for y in range(output_shape[0]):
for x in range(output_shape[1]):
output[y][x] = np.sum(apad[y : y + kernel_h, x : x + kernel_w] * w_np_fc)
return output
def conv2d_nchw_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv2d_nchw_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv2d_nhwc_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_nhwc_python(a_np, w_np, stride, padding):
"""Convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of two ints
Padding size, or ['VALID', 'SAME'], or [pad_height, pad_width]
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
batch, in_height, in_width, in_channel = a_np.shape
kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from NHWC to NCHW
at = a_np.transpose((0, 3, 1, 2))
wt = w_np.transpose((3, 2, 0, 1))
bt = np.zeros((batch, out_channel, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w))
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve2d(apad, np.rot90(np.rot90(wt[f, c])), mode="valid")
bt[n, f] += out[::stride_h, ::stride_w]
return bt.transpose((0, 2, 3, 1))
def conv2d_nhwc_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel // groups, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
a_slices = np.array_split(a_np, groups, axis=3)
w_slices = np.array_split(w_np, groups, axis=3)
b_slices = [
_conv2d_nhwc_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=3)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv2d_transpose_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""Transposed convolution in python"""
import numpy as np
import scipy
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple
def _conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [in_channel, num_filter, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
output_padding : int or a list/tuple of two ints
Use to disambiguate the output shape.
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_c, in_h, in_w = a_np.shape
_, out_c, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_h = opad_w = output_padding
else:
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w])
# padding stage
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_top + bpad_bottom,
dilated_a_np.shape[3] + bpad_left + bpad_right,
)
).astype(a_np.dtype)
padded_a_np[
:,
:,
bpad_top : dilated_a_np.shape[2] + bpad_top,
bpad_left : dilated_a_np.shape[3] + bpad_left,
] = dilated_a_np
# convolution stage
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h + opad_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w + opad_w
b_np = np.zeros((batch, out_c, out_h, out_w)).astype(a_np.dtype)
for n in range(batch):
for f in range(out_c):
for c in range(in_c):
out = scipy.signal.convolve2d(padded_a_np[n, c], w_np[c, f], mode="valid")
b_np[n, f] += out
return b_np
def conv2d_transpose_nhwc_python(
a_nhwc, weight, weight_format, stride, padding, output_padding=(0, 0)
):
"""Transposed convolution operator in NHWC layout.
Parameters
----------
a_nhwc : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
weight : numpy.ndarray
4-D in formats HWIO, HWOI, OIHW or IOHW
weight_format : str
['HWIO', 'HWOI', 'OIHW', 'IOHW']
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert a_nhwc.ndim == 4, "a_nhwc number of dimensions should be 4"
assert weight.ndim == 4, "weight number of dimensions should be 4"
a_nchw = np.transpose(a_nhwc, (0, 3, 1, 2))
# conv2d_transpose_nchw_python needs kernel layout to be IOHW
if weight_format == "HWIO":
w_iohw = np.transpose(weight, (2, 3, 0, 1))
elif weight_format == "HWOI":
w_iohw = np.transpose(weight, (3, 2, 0, 1))
elif weight_format == "OIHW":
w_iohw = np.transpose(weight, (1, 0, 2, 3))
elif weight_format == "IOHW":
w_iohw = weight
else:
raise ValueError("Valid weight_formats are HWIO, HWOI, OIHW or IOHW")
res_nchw = conv2d_transpose_nchw_python(
a_nchw, w_iohw, stride, padding, output_padding=output_padding
)
res_nhwc = np.transpose(res_nchw, (0, 2, 3, 1))
return res_nhwc
def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding, groups=1):
"""Convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
w_np : numpy.ndarray
4-D with shape [in_channel, num_filter // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str
Padding size, or ['VALID', 'SAME']
output_padding : int or a list/tuple of two ints
Use to disambiguate the output shape.
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv2d_transpose_nchw_python(a_slice, w_slice, stride, padding, output_padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv3d_ncdhw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple3d
def _conv3d_ncdhw_python(a_np, w_np, stride, padding):
batch, in_channel, in_depth, in_height, in_width = a_np.shape
num_filter, _, kernel_d, kernel_h, kernel_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_depth = (in_depth - kernel_d + pad_d) // stride_d + 1
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
b_np = np.zeros((batch, out_channel, out_depth, out_height, out_width))
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_d > 0 or pad_h > 0 or pad_w > 0:
apad = np.zeros((in_depth + pad_d, in_height + pad_h, in_width + pad_w))
apad[
pad_front : pad_front + in_depth,
pad_top : pad_top + in_height,
pad_left : pad_left + in_width,
] = a_np[n, c]
else:
apad = a_np[n, c]
out = scipy.signal.convolve(apad, np.flip(w_np[f, c]), mode="valid")
b_np[n, f] += out[::stride_d, ::stride_h, ::stride_w]
return b_np
def conv3d_ncdhw_python(a_np, w_np, stride, padding, groups=1):
"""Convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=1)
w_slices = np.array_split(w_np, groups, axis=0)
b_slices = [
_conv3d_ncdhw_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=1)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv3d_ndhwc_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution 3D in python"""
import numpy as np
import scipy.signal
from tvm.topi.nn.utils import get_pad_tuple3d
def _conv3d_ndhwc_python(a_np, w_np, stride, padding):
"""Convolution 3D operator in NDHWC layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_depth, in_height, in_width, in_channel = a_np.shape
kernel_d, kernel_h, kernel_w, _, num_filter = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
# compute the output shape
out_channel = num_filter
out_depth = (in_depth - kernel_d + pad_d) // stride_d + 1
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
# change the layout from NHWC to NCHW
at = a_np.transpose((0, 4, 1, 2, 3))
wt = w_np.transpose((4, 3, 0, 1, 2))
bt = np.zeros((batch, out_channel, out_depth, out_height, out_width), dtype=a_np.dtype)
# computation
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_d > 0 or pad_h > 0 or pad_w > 0:
apad = np.zeros(
(in_depth + pad_d, in_height + pad_h, in_width + pad_w), dtype=a_np.dtype
)
apad[
pad_front : pad_front + in_depth,
pad_top : pad_top + in_height,
pad_left : pad_left + in_width,
] = at[n, c]
else:
apad = at[n, c]
out = scipy.signal.convolve(apad, np.flip(wt[f, c]), mode="valid")
bt[n, f] += out[::stride_d, ::stride_h, ::stride_w]
return bt.transpose((0, 2, 3, 4, 1))
def conv3d_ndhwc_python(a_np, w_np, stride, padding, groups=1):
"""Convolution 3D operator in NDHWC layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [num_filter, in_channel, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of three ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str or a list/tuple of three ints
Padding size, or ['VALID', 'SAME'], or [pad_depth, pad_height, pad_width]
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
a_slices = np.array_split(a_np, groups, axis=4)
w_slices = np.array_split(w_np, groups, axis=4)
b_slices = [
_conv3d_ndhwc_python(a_slice, w_slice, stride, padding)
for a_slice, w_slice in zip(a_slices, w_slices)
]
b_np = np.concatenate(b_slices, axis=4)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches
"""Convolution 3D transpose in python"""
import numpy as np
import tvm.topi.testing
from tvm.topi.nn.utils import get_pad_tuple3d
def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding, output_padding):
"""Transposed 3d convolution operator in NCDHW layout.
Parameters
----------
a_np : numpy.ndarray
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
w_np : numpy.ndarray
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_depth, stride_height, stride_width]
padding : int or str
Padding size
output_padding : int or list/tuple of three ints
Used to disambiguate output shape.
Returns
-------
b_np : np.ndarray
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
batch, in_c, in_d, in_h, in_w = a_np.shape
_, out_c, filter_d, filter_h, filter_w = w_np.shape
if isinstance(stride, int):
stride_d = stride_h = stride_w = stride
else:
stride_d, stride_h, stride_w = stride
if isinstance(output_padding, int):
opad_d = opad_h = opad_w = output_padding
else:
opad_d, opad_h, opad_w = output_padding
assert opad_d < stride_d and opad_h < stride_h and opad_w < stride_w
# dilate stage
dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w])
# padding stage
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w)
)
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back + opad_d
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
padded_a_np = np.zeros(
(
batch,
in_c,
dilated_a_np.shape[2] + bpad_front + bpad_back,
dilated_a_np.shape[3] + bpad_top + bpad_bottom,
dilated_a_np.shape[4] + bpad_left + bpad_right,
)
)
padded_a_np[
:,
:,
bpad_front : dilated_a_np.shape[2] + bpad_front,
bpad_top : dilated_a_np.shape[3] + bpad_top,
bpad_left : dilated_a_np.shape[4] + bpad_left,
] = dilated_a_np
# convolution stage
out_d = (in_d - 1) * stride_d - bpad_front - bpad_back + filter_d
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4))
b_np = tvm.topi.testing.conv3d_ncdhw_python(
padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0)
)
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/correlation_nchw_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Convolution 3D in python"""
import numpy as np
def correlation_nchw_python(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply
):
"""Correlationn operator in NCHW layout.
Parameters
----------
data1_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
data2_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding: int
Padding for correlation
is_multiply: bool
operation type is either multiplication or substraction
Returns
-------
c_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
# compute output's dimension
pad_data_height = data1.shape[2] + 2 * padding
pad_data_width = data1.shape[3] + 2 * padding
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
out_width = (pad_data_width - border_size * 2) // stride1
out_height = (pad_data_height - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
out_channel = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], out_channel, out_height, out_width))
pad_data1 = np.zeros((data1.shape[0], data1.shape[1], pad_data_height, pad_data_width))
pad_data2 = np.zeros((data1.shape[0], data1.shape[1], pad_data_height, pad_data_width))
pad_data1[:, :, padding : padding + data1.shape[2], padding : padding + data1.shape[3]] = data1[
:, :, :, :
]
pad_data2[:, :, padding : padding + data2.shape[2], padding : padding + data2.shape[3]] = data2[
:, :, :, :
]
if is_multiply:
corr_func = lambda x, y: x * y
else:
corr_func = lambda x, y: abs(x - y)
# pylint: disable=too-many-nested-blocks
for i in range(out_height):
for j in range(out_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for q in range(out_channel):
# location in data2
x2 = x1 + (q % neighborhood_grid_width - neighborhood_grid_radius) * stride2
y2 = y1 + (q // neighborhood_grid_width - neighborhood_grid_radius) * stride2
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
out[nbatch, q, i, j] += corr_func(
pad_data1[nbatch, channel, y1 + h, x1 + w],
pad_data2[nbatch, channel, y2 + h, x2 + w],
)
out /= float(kernel_size**2 * data1.shape[1])
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/crop_and_resize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-nested-blocks
"""crop and resize in python"""
import math
import numpy as np
def crop_and_resize_python(
image, boxes, box_indices, crop_size, layout, method="bilinear", extrapolation_value=0
):
"""Crop and resize using python"""
(target_h, target_w) = crop_size
if layout == "NHWC":
batch = boxes.shape[0]
image_height, image_width, channel = image.shape[1], image.shape[2], image.shape[3]
scaled_image = np.ones((batch, target_h, target_w, channel))
else:
batch = boxes.shape[0]
channel, image_height, image_width = image.shape[1], image.shape[2], image.shape[3]
scaled_image = np.ones((batch, channel, target_h, target_w))
for n, box in enumerate(boxes):
b_in = box_indices[n]
y1, x1 = boxes[n][0], boxes[n][1]
y2, x2 = boxes[n][2], boxes[n][3]
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = np.float32(in_h) / np.float32(target_h - 1)
w_scale = np.float32(in_w) / np.float32(target_w - 1)
for y in range(target_h):
in_y = y1 * (image_height - 1) + h_scale * y
if in_y < 0 or in_y > image_height - 1:
for x in range(target_w):
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
if method == "bilinear":
top_y_index = math.floor(in_y)
bottom_y_index = math.ceil(in_y)
y_lerp = in_y - top_y_index
for x in range(target_w):
in_x = x1 * (image_width - 1) + x * w_scale
if in_x < 0 or in_x > image_width - 1:
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
left_x_index = math.floor(in_x)
right_x_index = math.ceil(in_x)
x_lerp = in_x - left_x_index
for d in range(channel):
if layout == "NHWC":
top_left = image[b_in][top_y_index][left_x_index][d]
top_right = image[b_in][top_y_index][right_x_index][d]
bottom_left = image[b_in][bottom_y_index][left_x_index][d]
bottom_right = image[b_in][bottom_y_index][right_x_index][d]
top = top_left + (top_right - top_left) * x_lerp
bottom = bottom_left + (bottom_right - bottom_left) * x_lerp
scaled_image[n][y][x][d] = top + (bottom - top) * y_lerp
else:
top_left = image[b_in][d][top_y_index][left_x_index]
top_right = image[b_in][d][top_y_index][right_x_index]
bottom_left = image[b_in][d][bottom_y_index][left_x_index]
bottom_right = image[b_in][d][bottom_y_index][right_x_index]
top = top_left + (top_right - top_left) * x_lerp
bottom = bottom_left + (bottom_right - bottom_left) * x_lerp
scaled_image[n][d][y][x] = top + (bottom - top) * y_lerp
elif method == "nearest_neighbor":
for x in range(target_w):
in_x = x1 * (image_width - 1) + x * w_scale
if in_x < 0 or in_x > image_width - 1:
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = extrapolation_value
else:
scaled_image[n][d][y][x] = extrapolation_value
continue
closest_x_index = np.round(in_x).astype("int32")
closest_y_index = np.round(in_y).astype("int32")
for d in range(channel):
if layout == "NHWC":
scaled_image[n][y][x][d] = image[b_in][closest_y_index][
closest_x_index
][d]
else:
scaled_image[n][d][y][x] = image[b_in][d][closest_y_index][
closest_x_index
]
return scaled_image
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/deformable_conv2d_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Deformable convolution in python"""
import itertools
import math
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple
def deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
offset_np : numpy.ndarray
4-D with shape [batch, deformable_groups * filter_height * filter_width * 2,
out_height, out_width]
w_np : numpy.ndarray
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
out_channel, _, kernel_h, kernel_w = w_np.shape
out_height, out_width = offset_np.shape[-2:]
dtype = a_np.dtype
ic_per_dgroup = in_channel // deformable_groups
assert groups == 1, "deformable_conv2d_nchw_python does not support groups > 1"
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, _, _ = get_pad_tuple(padding, (kernel_h, kernel_w))
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
def _bilinear(n, c, h, w):
y_low = int(math.floor(h))
x_low = int(math.floor(w))
y_high = y_low + 1
x_high = x_low + 1
wy_h = h - y_low
wx_h = w - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < in_height and 0 <= xp < in_width:
val += wx * wy * a_np[n, c, yp, xp]
return val
a_deform = np.zeros((batch, in_channel, out_height, out_width, kernel_h, kernel_w), dtype=dtype)
for n, h, w in itertools.product(range(batch), range(out_height), range(out_width)):
offset = offset_np[n, :, h, w].reshape(deformable_groups, kernel_h, kernel_w, 2)
in_h = h * stride_h - pad_top
in_w = w * stride_w - pad_left
index_h_base, index_w_base = np.meshgrid(
np.arange(in_h, in_h + kernel_h * dilation_h, dilation_h, dtype=offset_np.dtype),
np.arange(in_w, in_w + kernel_w * dilation_w, dilation_w, dtype=offset_np.dtype),
indexing="ij",
)
for c, kh, kw in itertools.product(range(in_channel), range(kernel_h), range(kernel_w)):
dg = c // ic_per_dgroup
index_h = index_h_base + offset[dg, ..., 0]
index_w = index_w_base + offset[dg, ..., 1]
y, x = index_h[kh, kw], index_w[kh, kw]
if y < 0 or y >= in_height or x < 0 or x >= in_width:
continue
a_deform[n, c, h, w, kh, kw] = _bilinear(n, c, y, x)
b_np = np.zeros((batch, out_channel, out_height, out_width), dtype=dtype)
for n, c, f, h, w in itertools.product(
range(batch), range(in_channel), range(out_channel), range(out_height), range(out_width)
):
b_np[n, f, h, w] += np.tensordot(a_deform[n, c, h, w], w_np[f, c])
return b_np
def deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
offset_np : numpy.ndarray
4-D with shape [batch, out_height, out_width,
deformable_groups * filter_height * filter_width * 2]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_np = np.transpose(a_np, [0, 3, 1, 2]) # NHWC -> NCHW
offset_np = np.transpose(offset_np, [0, 3, 1, 2]) # NHWC -> NCHW
w_np = np.transpose(w_np, [3, 2, 0, 1]) # HWIO -> OIHW
b_np = deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
b_np = np.transpose(b_np, [0, 2, 3, 1]) # NCHW -> NHWC
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dense in python"""
import numpy as np
def dense(x, y, bias, use_bias=False, use_relu=False, out_dtype=None):
"""dense operator implemented in numpy.
Parameters
----------
x : numpy.ndarray
2-D with shape [M, K]
y : numpy.ndarray
2-D with shape [N, K]
bias: numpy.ndarray
1-D with shape [M,]
out_dtype: string, optional
Specify the dtype of output
Returns
-------
out : numpy.ndarray
2-D with shape [M, N]
"""
dtype = x.dtype if out_dtype is None else out_dtype
if use_bias:
out = np.dot(x.astype(dtype), y.T.astype(dtype)) + bias
else:
out = np.dot(x.astype(dtype), y.T.astype(dtype))
if use_relu:
out = np.maximum(out, 0)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Depth to space in python"""
import numpy as np
def depth_to_space_python(data, block_size, mode="DCR"):
"""Depth to Space operator in python for NCHW layout.
Parameters
----------
data : np.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
block_size : int
Size of blocks to convert channel pixels into.
Returns
-------
d2s_out : np.ndarray
4-D with shape [batch, in_channel / (block_size * block_size),
out_height * block_size, out_width * block_size]
"""
in_n, in_c, in_h, in_w = data.shape
new_h = int(in_h * block_size)
new_w = int(in_h * block_size)
new_c = int(in_c / (block_size * block_size))
if mode == "DCR":
expanded = np.reshape(data, newshape=[in_n, block_size, block_size, new_c, in_h, in_w])
transposed = np.transpose(expanded, axes=[0, 3, 4, 1, 5, 2])
else:
expanded = np.reshape(data, newshape=(in_n, new_c, block_size, block_size, in_h, in_w))
transposed = np.transpose(expanded, axes=(0, 1, 4, 2, 5, 3))
newshape = [in_n, new_c, new_h, new_w]
d2s_out = np.reshape(transposed, newshape=newshape)
return d2s_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/depthwise_conv2d_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, line-too-long
"""Depthwise convolution in python"""
import numpy as np
from tvm.topi.nn.utils import get_pad_tuple
from .common import _convolve2d
def depthwise_conv2d_python_nchw(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHW layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
filter_np : numpy.ndarray
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = input_np.shape
_, channel_multiplier, filter_height, filter_width = filter_np.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (filter_height, filter_width))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
out_channel = in_channel * channel_multiplier
out_height = (in_height - filter_height + pad_h) // stride_h + 1
out_width = (in_width - filter_width + pad_w) // stride_w + 1
output_np = np.zeros((batch, out_channel, out_height, out_width))
for i in range(batch):
for j in range(out_channel):
apad = input_np[i, j // channel_multiplier, :, :]
if pad_h or pad_w:
apad = np.pad(apad, [(pad_top, pad_bottom), (pad_left, pad_right)])
conv = _convolve2d(
apad,
np.rot90(filter_np[j // channel_multiplier, j % channel_multiplier, :, :], k=2),
)
output_np[i, j, :, :] = conv[
::stride_h,
::stride_w,
]
return output_np
def depthwise_conv2d_python_nchwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHWc layout.
Parameters
----------
input_np : numpy.ndarray
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
filter_np : numpy.ndarray
6-D with shape [out_channel_chunk, channel_multiplier_chunk,
filter_height, filter_width,
channel_multiplier_block, out_channel_block]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
# Transform to NCHW
batch_size, in_channel_chunk, in_height, in_width, in_channel_block = input_np.shape
input_nchw = input_np.transpose(0, 1, 4, 2, 3).reshape(
(batch_size, in_channel_chunk * in_channel_block, in_height, in_width)
)
(
out_channel_chunk,
channel_multiplier_chunk,
filter_height,
filter_width,
channel_multiplier_block,
out_channel_block,
) = filter_np.shape
filter_nchw = filter_np.transpose(0, 5, 1, 4, 2, 3).reshape(
(
out_channel_chunk * out_channel_block,
channel_multiplier_chunk * channel_multiplier_block,
filter_height,
filter_width,
)
)
# Perform conv2d
output_np = depthwise_conv2d_python_nchw(input_nchw, filter_nchw, stride, padding)
# Transform back to NCHWc
# pylint: disable=unpacking-non-sequence
batch_size, out_channel, out_height, out_width = output_np.shape
return output_np.reshape(
(batch_size, out_channel_chunk, out_channel_block, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
def depthwise_conv2d_python_nhwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in nhwc layout.
Parameters
----------
input_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
filter_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
4-D with shape [batch, out_height, out_width, out_channel]
"""
input_nchw = input_np.transpose(0, 3, 1, 2)
filter_nchw = filter_np.transpose(2, 3, 0, 1)
output_nchw = depthwise_conv2d_python_nchw(input_nchw, filter_nchw, stride, padding)
return output_nchw.transpose(0, 2, 3, 1)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/dilate_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilate operation in python"""
import numpy as np
def dilate_python(input_np, strides, dilation_value=0.0, out_dtype=None):
"""Dilate operation.
Parameters
----------
input_np : numpy.ndarray
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
out_dtype : Option[str]
The datatype of the dilated array. If unspecified, will use
the same dtype as the input array.
Returns
-------
output_np : numpy.ndarray
n-D, the same layout as Input.
"""
assert len(input_np.shape) == len(
strides
), "Input dimension and strides size dismatch : %d vs %d" % (
len(input_np.shape),
len(strides),
)
if out_dtype is None:
out_dtype = input_np.dtype
output_size = [
(input_dim - 1) * stride + 1 for input_dim, stride in zip(input_np.shape, strides)
]
non_zero_elements = np.ix_(
*[range(0, output_dim, stride) for output_dim, stride in zip(output_size, strides)]
)
output_np = np.full(shape=output_size, fill_value=dilation_value, dtype=out_dtype)
output_np[non_zero_elements] = input_np
return output_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/gather_nd_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""gather_nd in python"""
import numpy as np
def gather_nd_python(a_np, indices_np):
"""Python version of GatherND operator
Parameters
----------
a_np : numpy.ndarray
Numpy array
indices_np : numpy.ndarray
Numpy array
Returns
-------
b_np : numpy.ndarray
Numpy array
"""
a_shape = a_np.shape
indices_np = indices_np.astype("int32")
indices_shape = indices_np.shape
assert len(indices_shape) > 1
assert indices_shape[0] <= len(a_shape)
b_shape = list(indices_shape[1:])
for i in range(indices_shape[0], len(a_shape)):
b_shape.append(a_shape[i])
b_np = np.zeros(b_shape)
for idx in np.ndindex(*indices_shape[1:]):
a_idx = []
for i in range(indices_shape[0]):
indices_pos = tuple([i] + list(idx))
a_idx.append(indices_np[indices_pos])
b_np[idx] = a_np[tuple(a_idx)]
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/gather_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""gather in python"""
import numpy as np
def gather_python(data, axis, indices):
"""Python version of Gather operator
Parameters
----------
data : numpy.ndarray
Numpy array
axis: int
integer
indices : numpy.ndarray
Numpy array
Returns
-------
b_np : numpy.ndarray
Numpy array
"""
shape_indices = indices.shape
out = np.zeros(shape_indices, dtype=data.dtype)
for index in np.ndindex(*shape_indices):
new_index = list(index)
new_index[axis] = indices[index]
out[index] = data[tuple(new_index)]
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/grid_sample_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""affine_grid and grid_sample operators in python"""
import math
import numpy as np
def affine_grid_python(data, target_shape):
yv, xv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
yv = yv.T * 2 / (target_shape[0] - 1) - 1
xv = xv.T * 2 / (target_shape[1] - 1) - 1
ones = np.ones_like(xv)
grid = np.stack([xv, yv, ones]).reshape(3, -1)
return data.reshape(-1, 3).dot(grid).reshape(data.shape[0], 2, *target_shape)
def grid_sample_2d(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_2d for NCHW layout"""
assert method in ("bilinear", "nearest", "bicubic"), f"{method} is not supported"
assert layout == "NCHW"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert len(data.shape) == len(grid.shape) == 4
batch, channel = data.shape[:2]
in_height, in_width = data.shape[2:]
out_height, out_width = grid.shape[2:]
out_shape = [batch, channel, out_height, out_width]
out = np.zeros(out_shape)
def _get_pixel(b, c, h, w):
if 0 <= h <= in_height - 1 and 0 <= w <= in_width - 1:
return data[b, c, h, w]
return 0
def _unnormalize(h, w):
if align_corners:
new_h = (h + 1) * (in_height - 1) / 2
new_w = (w + 1) * (in_width - 1) / 2
else:
new_h = -0.5 + (h + 1) * in_height / 2
new_w = -0.5 + (w + 1) * in_width / 2
return (new_h, new_w)
def _clip_coordinates(x, size):
return min(max(x, 0), size - 1)
def _reflect_coordinates(i, size):
def __refelection(i, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = abs(corner_start - index)
size_times = index_align_corner // size
even = size_times % 2 == 0
extra = index_align_corner - size_times * size
return extra + corner_start if even else size - extra + corner_start
if corner_start <= i <= size + corner_start:
new_i = i
else:
new_i = __reflect(i, size, corner_start)
return new_i
if align_corners:
x = __refelection(i, size - 1, 0)
else:
x = __refelection(i, size, -0.5)
return x
def _compute_source_index(b, h, w):
y = grid[b, 1, h, w]
x = grid[b, 0, h, w]
y, x = _unnormalize(y, x)
if padding_mode == "reflection":
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (y, x)
def _nearest_sample():
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y, x = _compute_source_index(_b, _h, _w)
# python round is not used here,
# beacause it is done toward the even choice
new_y = int(y + 0.5) if y > 0 else int(y - 0.5)
new_x = int(x + 0.5) if x > 0 else int(x - 0.5)
out[_b, _c, _h, _w] = _get_pixel(_b, _c, new_y, new_x)
def _bilinear_sample():
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y, x = _compute_source_index(_b, _h, _w)
y0 = int(math.floor(y))
x0 = int(math.floor(x))
y1 = y0 + 1
x1 = x0 + 1
out[_b, _c, _h, _w] = (
_get_pixel(_b, _c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0))
+ _get_pixel(_b, _c, y0, x1) * (1.0 - (y - y0)) * (x - x0)
+ _get_pixel(_b, _c, y1, x0) * (y - y0) * (1.0 - (x - x0))
+ _get_pixel(_b, _c, y1, x1) * (y - y0) * (x - x0)
)
def _bicubic_sample():
A = -0.75
def cubic_weight_1(x_fraction):
return ((A + 2) * x_fraction - (A + 3)) * x_fraction * x_fraction + 1
def cubic_weight_2(x_fraction):
return ((A * x_fraction - 5 * A) * x_fraction + 8 * A) * x_fraction - 4 * A
def cubic_interp_1d(pixel_0, pixel_1, pixel_2, pixel_3, x_fraction):
weights = [0] * 4
weights[0] = cubic_weight_2(x_fraction + 1)
weights[1] = cubic_weight_1(x_fraction)
weights[2] = cubic_weight_1(1 - x_fraction)
weights[3] = cubic_weight_2(2 - x_fraction)
return (
pixel_0 * weights[0]
+ pixel_1 * weights[1]
+ pixel_2 * weights[2]
+ pixel_3 * weights[3]
)
def coefficients_along_x(x_floor, y_floor, x_fraction):
coefficients = [0] * 4
for i in range(4):
y_ = y_floor - 1 + i
x_0 = x_floor - 1
x_1 = x_floor + 0
x_2 = x_floor + 1
x_3 = x_floor + 2
if padding_mode == "border":
y_ = _clip_coordinates(y_, in_height)
x_0 = _clip_coordinates(x_0, in_width)
x_1 = _clip_coordinates(x_1, in_width)
x_2 = _clip_coordinates(x_2, in_width)
x_3 = _clip_coordinates(x_3, in_width)
elif padding_mode == "reflection":
y_ = _reflect_coordinates(y_, in_height)
x_0 = _reflect_coordinates(x_0, in_width)
x_1 = _reflect_coordinates(x_1, in_width)
x_2 = _reflect_coordinates(x_2, in_width)
x_3 = _reflect_coordinates(x_3, in_width)
y_ = int(_clip_coordinates(y_, in_height))
x_0 = int(_clip_coordinates(x_0, in_width))
x_1 = int(_clip_coordinates(x_1, in_width))
x_2 = int(_clip_coordinates(x_2, in_width))
x_3 = int(_clip_coordinates(x_3, in_width))
coefficients[i] = cubic_interp_1d(
_get_pixel(_b, _c, y_, x_0),
_get_pixel(_b, _c, y_, x_1),
_get_pixel(_b, _c, y_, x_2),
_get_pixel(_b, _c, y_, x_3),
x_fraction,
)
return coefficients
for _b in range(batch):
for _c in range(channel):
for _h in range(out_height):
for _w in range(out_width):
y = grid[_b, 1, _h, _w]
x = grid[_b, 0, _h, _w]
y, x = _unnormalize(y, x)
y_floor = int(math.floor(y))
x_floor = int(math.floor(x))
y_fraction = y - y_floor
x_fraction = x - x_floor
coefficients = coefficients_along_x(x_floor, y_floor, x_fraction)
out[_b, _c, _h, _w] = cubic_interp_1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_fraction,
)
if method == "bilinear":
_bilinear_sample()
elif method == "nearest":
_nearest_sample()
else: # mode == "bicubic":
_bicubic_sample()
return out
def grid_sample_3d(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCDHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_3d for NCDHW layout"""
assert method in ("bilinear", "nearest"), f"{method} is not supported"
assert layout == "NCDHW"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert len(data.shape) == len(grid.shape) == 5
batch, channel = data.shape[:2]
in_depth, in_height, in_width = data.shape[2:]
out_depth, out_height, out_width = grid.shape[2:]
out_shape = [batch, channel, out_depth, out_height, out_width]
out = np.zeros(out_shape)
def _get_pixel(b, c, d, h, w):
if 0 <= d <= in_depth - 1 and 0 <= h <= in_height - 1 and 0 <= w <= in_width - 1:
return data[b, c, d, h, w]
return 0
def _unnormalize(d, h, w):
if align_corners:
new_d = (d + 1) * (in_depth - 1) / 2
new_h = (h + 1) * (in_height - 1) / 2
new_w = (w + 1) * (in_width - 1) / 2
else:
new_d = -0.5 + (d + 1) * in_depth / 2
new_h = -0.5 + (h + 1) * in_height / 2
new_w = -0.5 + (w + 1) * in_width / 2
return (new_d, new_h, new_w)
def _clip_coordinates(x, size):
return min(max(x, 0), size - 1)
def _reflect_coordinates(i, size):
def __refelection(i, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = abs(corner_start - index)
size_times = index_align_corner // size
even = size_times % 2 == 0
extra = index_align_corner - size_times * size
return extra + corner_start if even else size - extra + corner_start
if corner_start <= i <= size + corner_start:
new_i = i
else:
new_i = __reflect(i, size, corner_start)
return new_i
if align_corners:
x = __refelection(i, size - 1, 0)
else:
x = __refelection(i, size, -0.5)
return x
def _compute_source_index(b, d, h, w):
z = grid[b, 2, d, h, w]
y = grid[b, 1, d, h, w]
x = grid[b, 0, d, h, w]
z, y, x = _unnormalize(z, y, x)
if padding_mode == "reflection":
z = _reflect_coordinates(z, in_depth)
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (z, y, x)
def _nearest_sample():
for _b in range(batch):
for _c in range(channel):
for _d in range(out_depth):
for _h in range(out_height):
for _w in range(out_width):
z, y, x = _compute_source_index(_b, _d, _h, _w)
# python round is not used here,
# beacause it is done toward the even choice
new_z = int(z + 0.5) if z > 0 else int(z - 0.5)
new_y = int(y + 0.5) if y > 0 else int(y - 0.5)
new_x = int(x + 0.5) if x > 0 else int(x - 0.5)
out[_b, _c, _d, _h, _w] = _get_pixel(_b, _c, new_z, new_y, new_x)
def _triilinear_sample():
for _b in range(batch):
for _c in range(channel):
for _d in range(out_depth):
for _h in range(out_height):
for _w in range(out_width):
z, y, x = _compute_source_index(_b, _d, _h, _w)
z0 = int(math.floor(z))
y0 = int(math.floor(y))
x0 = int(math.floor(x))
z1 = z0 + 1
y1 = y0 + 1
x1 = x0 + 1
out[_b, _c, _d, _h, _w] = (
_get_pixel(_b, _c, z0, y0, x0)
* (1 - (x - x0))
* (1 - (y - y0))
* (1 - (z - z0))
+ _get_pixel(_b, _c, z0, y0, x1)
* (x - x0)
* (1 - (y - y0))
* (1 - (z - z0))
+ _get_pixel(_b, _c, z1, y1, x0)
* (1 - (x - x0))
* (y - y0)
* (z - z0)
+ _get_pixel(_b, _c, z1, y1, x1) * (x - x0) * (y - y0) * (z - z0)
+ _get_pixel(_b, _c, z0, y1, x0)
* (1 - (x - x0))
* (y - y0)
* (1 - (z - z0))
+ _get_pixel(_b, _c, z1, y0, x1)
* (x - x0)
* (1 - (y - y0))
* (z - z0)
+ _get_pixel(_b, _c, z1, y0, x0)
* (1 - (x - x0))
* (1 - (y - y0))
* (z - z0)
+ _get_pixel(_b, _c, z0, y1, x1)
* (x - x0)
* (y - y0)
* (1 - (z - z0))
)
if method == "bilinear":
_triilinear_sample()
else: # method == "nearest":
_nearest_sample()
return out
def grid_sample_python(
data: np.ndarray,
grid: np.ndarray,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
r"""grid_sample_3d for NCDHW layout or grid_sample_2d for NCHW layout"""
if len(data.shape) == 4:
grid_sample = grid_sample_2d
elif len(data.shape) == 5:
grid_sample = grid_sample_3d
else:
raise ValueError("invalid shape")
return grid_sample(data, grid, method, layout, padding_mode, align_corners)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/l2_normalize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""L2 normalize in python"""
import numpy as np
def l2_normalize_python(a_np, eps, axis=None):
"""L2 normalize operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
eps : float
epsilon constant value
axis : list of int
axis over the normalization applied
Returns
-------
l2_normalize_out : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
dot_value = np.power(a_np, 2.0)
sqr_sum = np.sum(dot_value, axis, keepdims=True)
sqrt_sum = np.sqrt(np.maximum(np.broadcast_to(sqr_sum, a_np.shape), eps))
l2_normalize_out = np.divide(a_np, sqrt_sum)
return l2_normalize_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/layer_norm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Layer normalization in python"""
import numpy as np
def layer_norm_python(data, gamma, beta, axis, epsilon=1e-5):
"""Layer normalization operator in Python.
Parameters
----------
data : numpy.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
gamma: numpy.ndarray
K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
beta: numpy.ndarray
Optional, K-D with shape (r_0, r_1, ..., r_{K-1}) where K == len(axis) and d_{axis_k} == r_k
axis : int or tuple of ints
Axis over the normalization applied
epsilon : float
The epsilon value to avoid division by zero.
Returns
-------
result : np.ndarray
N-D with shape (d_0, d_1, ..., d_{N-1})
"""
mean = np.mean(data, axis, keepdims=True)
var = np.var(data, axis, keepdims=True)
result = (data - mean) / np.sqrt(var + epsilon)
result *= gamma
if beta is not None:
result += beta
return result
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/lrn_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""LRN in python"""
from itertools import product
import numpy as np
def lrn_python(a_np, size, axis, bias, alpha, beta):
"""Local response normalization operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
size : int
normalization window size
axis : int
input data layout channel axis
bias : float
offset to avoid dividing by 0. constant value
alpha : float
constant value
beta : float
exponent constant value
Returns
-------
lrn_out : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
radius = size // 2
sqr_sum = np.zeros(shape=a_np.shape).astype(a_np.dtype)
for i, j, k, l in product(*[range(_axis) for _axis in a_np.shape]):
axis_size = a_np.shape[axis]
if axis == 1:
# NCHW layout
sum_start = j - radius if j - radius >= 0 else 0
sum_end = j + radius + 1 if j + radius + 1 < axis_size else axis_size
sqr_sum[i, j, k, l] = sum(
a_np[i, sum_start:sum_end, k, l] * a_np[i, sum_start:sum_end, k, l]
)
elif axis == 3:
# NHWC layout
sum_start = l - radius if l - radius >= 0 else 0
sum_end = l + radius + 1 if l + radius + 1 < axis_size else axis_size
sqr_sum[i, j, k, l] = sum(
a_np[i, j, k, sum_start:sum_end] * a_np[i, j, k, sum_start:sum_end]
)
sqr_sum_up = np.power((bias + (alpha * sqr_sum / size)), beta)
lrn_out = np.divide(a_np, sqr_sum_up)
return lrn_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/lstm_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""LSTM reference implementation using numpy."""
import numpy as np
def lstm_python(
Xs: np.array,
Wi: np.array,
Wh: np.array,
Bi: np.array = None,
Bh: np.array = None,
h_init: np.array = None,
c_init: np.array = None,
proj: np.array = None,
p_i: np.array = None,
p_f: np.array = None,
p_o: np.array = None,
f_act: str = "sigmoid",
g_act: str = "tanh",
h_act: str = "tanh",
reverse: bool = False,
weight_layout: str = "IFGO",
):
"""LSTM reference implementation using numpy
Parameters
----------
Xs : np.array
(seq_length, batch_size, in_dim)
Wi : np.array
(4 * hidden_dim, in_dim)
Wh : np.array
(4 * hidden_dim, out_dim) where out_dim = proj_dim if proj_dim > 0, else hidden_dim
Bi : np.array, optional
(4 * hidden_dim,), by default None
Bh : np.array, optional
(4 * hidden_dim,), by default None
h_init : np.array, optional
(batch_size, out_dim), by default None
c_init : np.array, optional
(batch_size, hidden_dim), by default None
proj : np.array, optional
(proj_dim, hidden_dim), by default None
p_i, p_f, p_o: np.array, optional
(batch_size, hidden_dim), by default None
f_act, g_act, h_act: str, optional
activations, by default "sigmoid", "tanh", "tanh"
reverse : bool, optional
process Xs in reverse, by default False
weight_layout : str, optional
Packed layout for weights and biases, by default "IFGO"
"""
i_gate_idx = weight_layout.find("I")
f_gate_idx = weight_layout.find("F")
g_gate_idx = weight_layout.find("G")
o_gate_idx = weight_layout.find("O")
str2act = {"sigmoid": lambda x: 1 / (1 + np.exp(-x)), "tanh": np.tanh}
f_act = str2act[f_act]
g_act = str2act[g_act]
h_act = str2act[h_act]
S, B, F = Xs.shape
H = Wi.shape[0] // 4
O = Wh.shape[1]
# make life a bit easier
Wi = np.reshape(Wi, (4, H, F))
Wh = np.reshape(Wh, (4, H, O))
if Bi is not None:
Bi = np.reshape(Bi, (4, H))
if Bh is not None:
Bh = np.reshape(Bh, (4, H))
h0 = h_init if h_init is not None else np.zeros((B, O), "float32")
c0 = c_init if c_init is not None else np.zeros((B, H), "float32")
hs = [h0]
cs = [c0]
for t in range(S):
x = Xs[S - t - 1 if reverse else t]
xh = [np.matmul(x, Wi[g].T) for g in range(4)]
if Bi is not None:
xh = [xh[g] + Bi[g] for g in range(4)]
hh = [np.matmul(hs[t], Wh[g].T) for g in range(4)]
if Bh is not None:
hh = [hh[g] + Bh[g] for g in range(4)]
sums = [xh[g] + hh[g] for g in range(4)]
if p_i is not None and p_f is not None:
i_gate = f_act(sums[i_gate_idx] + p_i * cs[t])
f_gate = f_act(sums[f_gate_idx] + p_f * cs[t])
else:
i_gate = f_act(sums[i_gate_idx])
f_gate = f_act(sums[f_gate_idx])
g_gate = g_act(sums[g_gate_idx])
next_c = f_gate * cs[t] + i_gate * g_gate
if p_o is not None:
o_gate = f_act(sums[o_gate_idx] + p_o * next_c)
else:
o_gate = f_act(sums[o_gate_idx])
next_h = o_gate * h_act(next_c)
if proj is not None:
next_h = np.matmul(next_h, proj.T)
hs.append(next_h)
cs.append(next_c)
return np.stack(hs[1:], axis=0), np.stack(cs[1:], axis=0)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/matrix_set_diag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""MatrixSetDiag in Python"""
import numpy as np
def matrix_set_diag(input_np, diagonal, k=0, align="RIGHT_LEFT"):
"""matrix_set_diag operator implemented in numpy.
Returns a numpy array with the diagonals of input array
replaced with the provided diagonal values.
Parameters
----------
input_np : numpy.ndarray
Input Array.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
diagonal : numpy.ndarray
Values to be filled in the diagonal.
k : int or tuple of int
Diagonal Offsets.
align : string
Some diagonals are shorter than max_diag_len and need to be padded.
Possible Vales:
["RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", "RIGHT_RIGHT"]
Returns
-------
result : numpy.ndarray
New Array with given diagonal values.
Shape = [D1, D2, D3, ... , Dn-1 , Dn]
"""
out = np.array(input_np, copy=True)
cols = input_np.shape[-1]
rows = input_np.shape[-2]
onlyOneDiagonal = True
if isinstance(k, (tuple, list)):
if len(k) < 2 or k[0] == k[1]:
k = k[0]
else:
onlyOneDiagonal = False
if onlyOneDiagonal:
for i in range(diagonal.shape[-1]):
if k >= 0:
out[..., i, i + k] = diagonal[..., i]
else:
out[..., i - k, i] = diagonal[..., i]
else:
for ki in range(k[0], k[1] + 1):
diag_len = min(cols - max(ki, 0), rows + min(ki, 0))
offset = 0
if ki >= 0:
if align[:5] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
else:
if align[-5:] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
for i in range(diag_len):
if ki >= 0:
out[..., i, i + ki] = diagonal[..., k[1] - ki, i + offset]
else:
out[..., i - ki, i] = diagonal[..., k[1] - ki, i + offset]
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/nll_loss.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""NLLLoss in python"""
import numpy as np
def nll_loss(predictions, targets, weights, reduction="mean", ignore_index=-100):
"""nll_loss operator implemented in numpy.
output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0
result = reduction(output)
Parameters
----------
predictions : numpy.ndarray
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes
targets : numpy.ndarray
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.
weights : numpy.ndarray
1-D with shape (C,)
The weight of each target value.
reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".
ignore_index : int
The target value to ignore.
Returns
-------
output : numpy.ndarray
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
res = np.zeros(targets.shape)
weight_sum = 0.0
for index in np.ndindex(targets.shape):
class_id = targets[index]
if class_id != ignore_index:
index_list = list(index)
pred_index = tuple(index_list[:1] + [class_id] + index_list[1:])
res[index] = -predictions[pred_index] * weights[class_id]
weight_sum += weights[class_id]
if reduction == "mean":
return np.sum(res) / weight_sum
if reduction == "sum":
return np.sum(res)
return res
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/one_hot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""OneHot in python"""
import numpy as np
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""one_hot operator implemented in numpy.
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : numpy.ndarray
Locations to set to on_value.
on_value : int/float
Value to fill at indices.
off_value : int/float
Value to fill at all other positions besides indices.
depth : int
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : str
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
"""
oshape = []
true_axis = len(indices.shape) if axis == -1 else axis
ndim = len(indices.shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices.shape[indices_index])
indices_index += 1
out = np.empty(oshape)
output_indices = list(np.ndindex(out.shape))
for output_index in output_indices:
indices_indices = []
for i, out_idx in enumerate(output_index):
if i == true_axis:
continue
indices_indices.append(out_idx)
index = output_index[true_axis]
if indices[tuple(indices_indices)] == index:
out[output_index] = on_value
else:
out[output_index] = off_value
return out.astype(dtype)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/pool_grad_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""Gradient of pooling in python"""
import numpy as np
def pool_grad_nchw(
a_np, out_grad_np, pool_size, strides, padding, pool_type, ceil_mode, count_include_pad=True
):
"""pool_grad for NCHW layout in python"""
dtype = a_np.dtype
n, ic, ih, iw = a_np.shape
kh, kw = pool_size
sh, sw = strides
pt, pl, pb, pr = padding
pad_np = np.zeros(shape=(n, ic, ih + pt + pb, iw + pl + pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pt, ih + pt)), (range(pl, iw + pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, _, oh, ow = out_grad_np.shape
pool_grad_np = np.zeros(shape=a_np.shape)
pad_pool_grad_np = np.zeros(shape=pad_np.shape)
if pool_type == "avg":
for i in range(oh):
for j in range(ow):
if count_include_pad:
shape = pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw].shape
# this can be different from kh*kw if input size cannot divide stride
pad_count = shape[2] * shape[3]
else:
pad_count = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] > 0, axis=(2, 3)
)
# take the first element, as they are the same across batch and channel
pad_count = pad_count.ravel()[0]
pad_pool_grad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] += out_grad_np[
:, :, i, j
].reshape(n, ic, 1, 1) / np.maximum(pad_count, 1)
elif pool_type == "max":
for i in range(oh):
for j in range(ow):
a_patch = pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw]
a_patch = np.reshape(a_patch, (n, ic, -1))
max_indices = np.argmax(a_patch, axis=2)
c_idx, n_idx = np.meshgrid(range(ic), range(n), sparse=True)
h_idx, w_idx = np.unravel_index(max_indices, (kh, kw))
pad_pool_grad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw][
n_idx, c_idx, h_idx, w_idx
] += out_grad_np[n_idx, c_idx, i, j]
for i in range(pool_grad_np.shape[2]):
for j in range(pool_grad_np.shape[3]):
pool_grad_np[:, :, i, j] = pad_pool_grad_np[:, :, i + pt, j + pl]
return pool_grad_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/poolnd_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, unused-variable
"""Ground truth max and average pooling operators in python."""
import itertools
import math
from typing import List, Tuple, Optional
import numpy as np
import tvm
def _get_supported_layout(dims: int):
"""
Returns layout that is supported by poolnd_python based on number of
dimensions of input tensor
"""
assert dims in [3, 4, 5], f"{dims}-dimensional tensor is not supported"
if dims == 3:
return "NCW"
if dims == 4:
return "NCHW"
# dims == 5
return "NCDHW"
def _convert_to_layout(
input_tensor: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts back to original layout after the algorithm is finished
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in layout:
transpose_list.append(supported_layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor
def _convert_from_layout(
input_tensor: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts tensor to one of suppored layouts
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in supported_layout:
transpose_list.append(layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor
def get_slice(
spatial_dimensions: int,
pad_np: np.array,
dim_coord: Tuple[int],
kernel: Tuple[int],
strides: Tuple[int],
dilation: Tuple[int],
) -> List[slice]:
"""
Programmatically create a slice object of the right dimensions for pad_np.
We assume pad_np's first two dimensions are not spatial and are not touched by the pad.
pad_np[slice] should give the elements of the data that a pool operation will use for the
step given in dim_coord.
"""
slices = [slice(None)] * spatial_dimensions
for nd in range(spatial_dimensions):
slices[nd] = slice(
dim_coord[nd] * strides[nd],
dim_coord[nd] * strides[nd] + (kernel[nd] - 1) * dilation[nd] + 1,
dilation[nd],
)
# Add back batch and channel dimensions
slices = [slice(None), slice(None)] + slices
return slices
def pad_tensor(
np_arr: np.array,
pad_value: float,
padding_before: List[int],
padding_after: List[int],
dtype: str,
) -> np.array:
"""Pad the spatial dimensions of the given array."""
orig_shape = list(np_arr.shape)
padded_shape = list(np_arr.shape)
n = len(orig_shape)
for dim in range(2, n):
i = dim - 2
padded_shape[dim] += padding_after[i] + padding_before[i]
pad_np = (np.zeros(shape=padded_shape) + pad_value).astype(dtype)
ranges_it = [range(padded_shape[0]), range(padded_shape[1])]
for dim in range(2, n):
i = dim - 2
ranges_it.append(range(padding_before[i], padding_before[i] + orig_shape[dim]))
pad_np[np.ix_(*ranges_it)] = np_arr
return pad_np
def poolnd_python(
np_data: np.array,
kernel: Tuple[int],
strides: Tuple[int],
dilation: Tuple[int],
padding_before: Tuple[int],
padding_after: Tuple[int],
pool_type: str,
count_include_pad: bool = True,
ceil_mode: bool = False,
dtype: str = "float32",
layout: Optional[str] = None,
) -> np.array:
"""Ground truth pooling operator impelmented in numpy."""
np_data = _convert_from_layout(np_data, layout)
out_shape = [np_data.shape[0], np_data.shape[1]]
for dim in range(2, len(np_data.shape)):
i = dim - 2
val = (
float(
np_data.shape[dim]
- (kernel[i] - 1) * dilation[i]
- 1
+ padding_before[i]
+ padding_after[i]
)
/ strides[i]
)
if ceil_mode:
out_shape.append(int(math.ceil(val) + 1))
else:
out_shape.append(int(math.floor(val) + 1))
out_shape = tuple(out_shape)
# Create a padded array, and a boolean mask showing which values are padded values
pad_value = 0
if pool_type == "max" and not count_include_pad:
pad_value = tvm.te.min_value(dtype).value
pad_data = pad_tensor(np_data, pad_value, padding_before, padding_after, dtype)
pad_map = pad_tensor(np.ones_like(np_data), 0, padding_before, padding_after, "bool")
# Create iterator which gives all indices for output array
dim_iterators = []
for spatial_dimension in range(2, len(np_data.shape)):
dim_iterators.append(range(out_shape[spatial_dimension]))
coord_iterator = itertools.product(*dim_iterators)
ret_np = np.zeros(shape=out_shape).astype(dtype)
for coordinate in coord_iterator:
# Get index into the values that any pool operation will use for given coordinate
np_index = get_slice(
spatial_dimensions=len(out_shape) - 2,
pad_np=pad_data,
dim_coord=coordinate,
kernel=kernel,
strides=strides,
dilation=dilation,
)
output_slice = [slice(None), slice(None)] + list(coordinate)
reduction_axis = tuple(range(2, len(np_data.shape)))
if pool_type == "avg":
count_non_padded = (
pad_data[np_index].size if count_include_pad else np.sum(pad_map[np_index])
)
# We summed over the non spatial dimensions too so divide by them
count_non_padded /= out_shape[0] * out_shape[1]
if count_non_padded == 0:
ret_np[output_slice] = 0
else:
ret_np[output_slice] = (
np.sum(pad_data[np_index], axis=reduction_axis) / count_non_padded
)
elif pool_type == "max":
count_non_padded = np.sum(pad_map[np_index])
# All padded values, default to 0
ret_np[output_slice] = np.max(pad_data[np_index], axis=reduction_axis)
else:
raise ValueError("Pool type {} is not supported".format(pool_type))
return _convert_to_layout(ret_np, layout)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/reorg_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Reorg in python"""
import numpy as np
def reorg_python(a_np, stride):
"""Reorg operator
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
stride : int
Stride size
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
batch, in_channel, in_height, in_width = a_np.shape
a_np = np.reshape(a_np, batch * in_channel * in_height * in_width)
out_c = int(in_channel / (stride * stride))
out_channel = in_channel * stride * stride
out_height = int(in_height / stride)
out_width = int(in_width / stride)
b_np = np.zeros(batch * out_channel * out_height * out_width)
cnt = 0
for b in range(batch):
for k in range(in_channel):
for j in range(in_height):
for i in range(in_width):
c2 = k % out_c
offset = int(k / out_c)
w2 = int(i * stride + offset % stride)
h2 = int(j * stride + offset / stride)
out_index = int(
w2 + in_width * stride * (h2 + in_height * stride * (c2 + out_c * b))
)
b_np[cnt] = a_np[int(out_index)]
cnt = cnt + 1
b_np = np.reshape(b_np, (batch, out_channel, out_height, out_width))
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/resize_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Upsampling in python"""
import math
import numpy as np
from tvm.topi.utils import nchw_pack_layout
def get_inx(x, image_width, target_width, coordinate_transformation_mode):
"""Infer input x from output x with various coordinate transformation methods"""
scale = image_width / target_width
if coordinate_transformation_mode == "half_pixel":
in_x = (x + 0.5) * scale - 0.5
elif coordinate_transformation_mode == "align_corners":
in_x = (image_width - 1) / (target_width - 1) * x if target_width > 1 else 0
elif coordinate_transformation_mode == "asymmetric":
in_x = scale * x
else:
raise ValueError(
"Unsupported coordinate_transformation_mode: {}".format(coordinate_transformation_mode)
)
return in_x
def get_index(x, image_width, target_width, coordinate_transformation_mode):
"""get and round the nearest index for nearest_neighbor"""
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode)
if coordinate_transformation_mode == "align_corners":
# round prefer ceil
out = int(math.floor(in_x + 0.5))
else:
out = int(math.floor(in_x))
out = max(min(out, image_width - 1), 0)
return out
def resize3d_nearest(arr, scale, coordinate_transformation_mode):
"""Populate the array by scale factor"""
d, h, w = arr.shape
out_d, out_h, out_w = [int(round(i * s)) for i, s in zip(arr.shape, scale)]
out = np.empty((out_d, out_h, out_w))
for z in range(out_d):
for y in range(out_h):
for x in range(out_w):
in_z = get_index(z, d, out_d, coordinate_transformation_mode)
in_y = get_index(y, h, out_h, coordinate_transformation_mode)
in_x = get_index(x, w, out_w, coordinate_transformation_mode)
out[z, y, x] = arr[in_z, in_y, in_x]
return out
def resize3d_linear(data_in, scale, coordinate_transformation_mode):
"""Trilinear 3d scaling using python"""
dtype = data_in.dtype
d, h, w = data_in.shape
new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)]
data_out = np.ones((new_d, new_h, new_w))
indexes = np.mgrid[0:2, 0:2, 0:2]
def _get_patch(zint, yint, xint):
# Get the surrounding values
indices = indexes.copy()
indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0)
indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0)
indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0)
p = data_in[indices[0], indices[1], indices[2]]
return p
for m in range(new_d):
for j in range(new_h):
for k in range(new_w):
in_z = get_inx(m, d, new_d, coordinate_transformation_mode)
in_y = get_inx(j, h, new_h, coordinate_transformation_mode)
in_x = get_inx(k, w, new_w, coordinate_transformation_mode)
zint = math.floor(in_z)
zfract = in_z - math.floor(in_z)
yint = math.floor(in_y)
yfract = in_y - math.floor(in_y)
xint = math.floor(in_x)
xfract = in_x - math.floor(in_x)
wz = np.array([1.0 - zfract, zfract], dtype=dtype)
wy = np.array([1.0 - yfract, yfract], dtype=dtype)
wx = np.array([1.0 - xfract, xfract], dtype=dtype)
p = _get_patch(zint, yint, xint)
l = np.sum(p * wx, axis=-1)
col = np.sum(l * wy, axis=-1)
data_out[m, j, k] = np.sum(col * wz)
return data_out
def resize3d_cubic(data_in, scale, coordinate_transformation_mode):
"""Tricubic 3d scaling using python"""
dtype = data_in.dtype
d, h, w = data_in.shape
new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)]
data_out = np.ones((new_d, new_h, new_w))
def _cubic_spline_weights(t, alpha=-0.5):
"""create cubic spline weights in 1D"""
t2 = t * t
t3 = t * t * t
w1 = alpha * (t3 - 2 * t2 + t)
w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
w4 = -alpha * t3 + alpha * t2
return np.array([w1, w2, w3, w4])
indexes = np.mgrid[-1:3, -1:3, -1:3]
def _get_patch(zint, yint, xint):
# Get the surrounding values
indices = indexes.copy()
indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0)
indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0)
indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0)
p = data_in[indices[0], indices[1], indices[2]]
return p
for m in range(new_d):
for j in range(new_h):
for k in range(new_w):
in_z = get_inx(m, d, new_d, coordinate_transformation_mode)
in_y = get_inx(j, h, new_h, coordinate_transformation_mode)
in_x = get_inx(k, w, new_w, coordinate_transformation_mode)
zint = math.floor(in_z)
zfract = in_z - math.floor(in_z)
yint = math.floor(in_y)
yfract = in_y - math.floor(in_y)
xint = math.floor(in_x)
xfract = in_x - math.floor(in_x)
wz = _cubic_spline_weights(zfract)
wy = _cubic_spline_weights(yfract)
wx = _cubic_spline_weights(xfract)
p = _get_patch(zint, yint, xint)
l = np.sum(p * wx, axis=-1)
col = np.sum(l * wy, axis=-1)
data_out[m, j, k] = np.sum(col * wz)
return data_out
def resize3d_ncdhw(
data, scale, method="nearest_neighbor", coordinate_transformation_mode="align_corners"
):
"""reference kernel for 3D image resizing"""
ishape = data.shape
oshape = (
ishape[0],
ishape[1],
int(round(ishape[2] * scale[0])),
int(round(ishape[3] * scale[1])),
int(round(ishape[4] * scale[2])),
)
output_np = np.zeros(oshape, dtype=data.dtype)
for b in range(oshape[0]):
for c in range(oshape[1]):
if method == "nearest_neighbor":
output_np[b, c, :, :, :] = resize3d_nearest(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
elif method == "linear":
output_np[b, c, :, :, :] = resize3d_linear(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
elif method == "cubic":
output_np[b, c, :, :, :] = resize3d_cubic(
data[b, c, :, :, :], scale, coordinate_transformation_mode
)
else:
raise ValueError("Unknown resize method", method)
return output_np
def resize1d_python(
data,
scale,
layout="NCW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of 3D scaling using nearest neighbour"""
if layout == "NWC":
data = data.transpose([0, 2, 1])
data = np.expand_dims(data, axis=[2, 3])
output_np = resize3d_ncdhw(data, (1, 1) + scale, method, coordinate_transformation_mode)
output_np = np.squeeze(output_np, axis=2)
output_np = np.squeeze(output_np, axis=2)
if layout == "NWC":
output_np = output_np.transpose([0, 2, 1])
return output_np
def resize2d_python(
data,
scale,
layout="NCHW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of scaling using nearest neighbour"""
if layout == "NHWC":
data = data.transpose([0, 3, 1, 2])
elif nchw_pack_layout(layout):
ishape = data.shape
transposed = data.transpose([0, 4, 1, 5, 2, 3])
tshape = transposed.shape
data = transposed.reshape(
tshape[0] * tshape[1], tshape[2] * tshape[3], tshape[4], tshape[5]
)
data = np.expand_dims(data, axis=2)
output_np = resize3d_ncdhw(data, (1,) + scale, method, coordinate_transformation_mode)
output_np = np.squeeze(output_np, axis=2)
if layout == "NHWC":
output_np = output_np.transpose([0, 2, 3, 1])
elif nchw_pack_layout(layout):
output_np = output_np.reshape(tshape[0:4] + output_np.shape[2:])
output_np = output_np.transpose([0, 2, 4, 5, 1, 3])
return output_np
def resize3d_python(
data,
scale,
layout="NCDHW",
method="nearest_neighbor",
coordinate_transformation_mode="align_corners",
):
"""Python version of 3D scaling using nearest neighbour"""
if layout == "NDHWC":
data = data.transpose([0, 4, 1, 2, 3])
output_np = resize3d_ncdhw(data, scale, method, coordinate_transformation_mode)
if layout == "NDHWC":
output_np = output_np.transpose([0, 2, 3, 4, 1])
return output_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/roi_align_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def _bilinear(a_np, n, c, y, x, height, width, layout):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = min(max(y, 0), height - 1)
x = min(max(x, 0), width - 1)
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
if layout == "NCHW":
val += wx * wy * a_np[n, c, yp, xp]
else:
val += wx * wy * a_np[n, yp, xp, c]
return val
def roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
layout,
):
"""Common code used by roi align NCHW and NHWC"""
num_roi = rois_np.shape[0]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size_h))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size_w))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
if avg_mode:
total = 0.0
if max_mode:
total = float("-inf")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
if avg_mode:
total += (
_bilinear(a_np, batch_index, c, y, x, height, width, layout)
/ count
)
if max_mode:
total = max(
total,
_bilinear(a_np, batch_index, c, y, x, height, width, layout),
)
if layout == "NCHW":
b_np[i, c, ph, pw] = total
else:
b_np[i, ph, pw, c] = total
return b_np
def roi_align_nchw_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NCHW in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, channel, height, width = a_np.shape
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((rois_np.shape[0], channel, pooled_size_h, pooled_size_w), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NCHW",
)
def roi_align_nhwc_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NHWC in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, height, width, channel = a_np.shape
num_roi = rois_np.shape[0]
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((num_roi, pooled_size_h, pooled_size_w, channel), dtype=a_np.dtype)
return roi_align_common(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NHWC",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/roi_pool_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi pool in python"
import math
import numpy as np
def roi_pool_nchw_python(a_np, rois_np, pooled_size, spatial_scale):
"""Roi pool in python"""
_, channel, height, width = a_np.shape
num_roi = rois_np.shape[0]
b_np = np.zeros((num_roi, channel, pooled_size, pooled_size), dtype=a_np.dtype)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w = int(round(roi[1] * spatial_scale))
roi_start_h = int(round(roi[2] * spatial_scale))
roi_end_w = int(round(roi[3] * spatial_scale))
roi_end_h = int(round(roi[4] * spatial_scale))
roi_h = max(roi_end_h - roi_start_h + 1, 1)
roi_w = max(roi_end_w - roi_start_w + 1, 1)
bin_h = float(roi_h) / pooled_size_h
bin_w = float(roi_w) / pooled_size_w
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
hstart = int(math.floor(ph * bin_h))
wstart = int(math.floor(pw * bin_w))
hend = int(math.ceil((ph + 1) * bin_h))
wend = int(math.ceil((pw + 1) * bin_w))
hstart = min(max(hstart + roi_start_h, 0), height)
hend = min(max(hend + roi_start_h, 0), height)
wstart = min(max(wstart + roi_start_w, 0), width)
wend = min(max(wend + roi_start_w, 0), width)
is_empty = (hend <= hstart) or (wend <= wstart)
for c in range(channel):
if is_empty:
b_np[i, c, ph, pw] = 0.0
else:
b_np[i, c, ph, pw] = np.max(a_np[batch_index, c, hstart:hend, wstart:wend])
return b_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The reference implementation of searchsorted in Numpy."""
import numpy as np
def searchsorted_ref(sorted_sequence, values, right, out_dtype):
"""Run Numpy searchsorted on 1-D or N-D sorted_sequence."""
side = "right" if right else "left"
if len(sorted_sequence.shape) == 1 and len(values.shape) > 1:
sorted_sequence_2d = np.tile(sorted_sequence, (np.prod(values.shape[:-1]), 1))
else:
sorted_sequence_2d = np.reshape(sorted_sequence, (-1, sorted_sequence.shape[-1]))
values_2d = np.reshape(values, (-1, values.shape[-1]))
indices = np.zeros(values_2d.shape, dtype=out_dtype)
for i in range(indices.shape[0]):
indices[i] = np.searchsorted(sorted_sequence_2d[i], values_2d[i], side=side)
return np.reshape(indices, values.shape)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/sequence_mask_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Sequence mask in python"""
import numpy as np
def sequence_mask(data, valid_length, mask_value, axis):
"""batch_matmul operator implemented in numpy.
Parameters
----------
data : numpy.ndarray
N-D with shape [batch_size, MAX_LENGTH, ...] or [MAX_LENGTH, batch_size, ...]
valid_length : numpy.ndarray
1-D with shape [batch_size,]
mask_value : float
Masking value
axis : int
The axis of the length dimension
Returns
-------
out : numpy.ndarray
N-D with shape same as data
"""
in_shape = data.shape
max_length = data.shape[axis]
val_len_expand_shape = [1 for _ in range(len(in_shape))]
val_len_expand_shape[1 - axis] = in_shape[1 - axis]
seq_len_expand_shape = [1 for _ in range(len(in_shape))]
seq_len_expand_shape[axis] = in_shape[axis]
mask = np.broadcast_to(
np.arange(max_length).reshape(seq_len_expand_shape), in_shape
) >= valid_length.reshape(val_len_expand_shape)
out = data * (1 - mask) + mask_value * mask
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/slice_axis_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Slice axis in python"""
def slice_axis_python(data, axis, begin, end=None):
"""Slice input array along specific axis.
Parameters
----------
data : numpy.ndarray
The source array to be sliced.
axis : int
Axis to be sliced.
begin: int
The index to begin with in the slicing.
end: int, optional
The index indicating end of the slice.
Returns
-------
ret : numpy.ndarray
The computed result.
"""
dshape = data.shape
if axis < 0:
axis += len(dshape)
if begin < 0:
begin += dshape[axis]
if end <= 0:
end += dshape[axis]
slc = [slice(None)] * len(dshape)
slc[axis] = slice(begin, end)
return data[tuple(slc)]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/softmax_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, trailing-whitespace
"""Softmax and log_softmax operation in python"""
import numpy as np
def softmax_python(a_np, axis=1):
"""Softmax operator.
Parameters
----------
a_np : numpy.ndarray
N-D input data
Returns
-------
output_np : numpy.ndarray
N-D output with same shape
"""
max_elem = np.amax(a_np, axis=axis, keepdims=True)
e = np.exp(a_np - max_elem)
expsum = np.sum(e, axis=axis, keepdims=True)
out_np = e / expsum
return out_np
def log_softmax_python(a_np, axis=1):
"""Log_softmax operator.
Parameters
----------
a_np : numpy.ndarray
N-D input data
Returns
-------
output_np : numpy.ndarray
N-D output with same shape
"""
max_elem = np.amax(a_np, axis=axis, keepdims=True)
e = np.exp(a_np - max_elem)
expsum = np.sum(e, axis=axis, keepdims=True)
out_np = a_np - max_elem - np.log(expsum)
return out_np
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/space_to_batch_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Space to batch ND in python"""
import numpy as np
def space_to_batch_nd_python(data, block_shape, pad_before, pad_after, pad_value=0):
"""Space to Batch operator in python for NHWC layout.
Parameters
----------
data : np.ndarray
N-D with shape [batch, spatial_shape, remaining_shapes],
where spatial_shape has M dimensions.
block_shape : list of ints
1-D array of size [M] where M is number of spatial dims, specifies block
size for each spatial dimension.
pad_before : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size before each spatial dimension.
pad_after : list of ints
list of shape [M] where M is number of spatial dims, specifies
zero-padding size after each spatial dimension.
pad_value : float, optional
the value used for padding. Defaults to 0.
Returns
-------
s2b_out : np.ndarray
N-D with shape [batch * prod(block_shape),
padded_data[1] / block_shape[0], ..., padded_data[M] / block_shape[M-1],
remaining_shape]
"""
M = len(block_shape)
in_batch = data.shape[0]
block_shape_prod = np.prod(block_shape)
# Apply padding to input data
input_shape = data.shape
# Add the paddings for batch and remaining dims
paddings = map(list, zip(pad_before, pad_after))
paddings = [[0, 0]] + list(paddings) + [[0, 0]] * (data.ndim - 1 - M)
padded_data = np.pad(data, paddings, mode="constant", constant_values=pad_value)
padded_shape = padded_data.shape
# Get the reshape shape and transpose axes
r_shape = []
trans_axis = []
r_shape.append(in_batch)
for i in range(1, M + 1):
r_shape.append((int(padded_shape[i] // block_shape[i - 1])))
r_shape.append(block_shape[i - 1])
trans_axis.append(len(r_shape) - 1)
axis_len = len(trans_axis)
trans_axis.append(0)
for i in range(axis_len):
trans_axis.append(trans_axis[i] - 1)
out_shape = []
out_shape.append(int((in_batch * block_shape_prod)))
for i in range(1, M + 1):
out_shape.append(int(padded_shape[i] // block_shape[i - 1]))
for i in range(M + 1, len(input_shape)):
r_shape.append(input_shape[i])
trans_axis.append(len(r_shape) - 1)
out_shape.append(input_shape[i])
s2b_out = np.reshape(padded_data, newshape=r_shape)
s2b_out = np.transpose(s2b_out, axes=trans_axis)
s2b_out = np.reshape(s2b_out, newshape=out_shape)
return s2b_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/space_to_depth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals
"""Space to depth in python"""
import numpy as np
def space_to_depth_python(data, block_size):
"""Space to Depth operator in python for NCHW layout.
Parameters
----------
data : np.ndarray
4-D with shape [batch, in_channel, in_height, in_width]
block_size : int
Size of spatial blocks to decompose into channels.
Returns
-------
d2s_out : np.ndarray
4-D with shape [batch, in_channel * (block_size * block_size),
out_height / block_size, out_width / block_size]
"""
in_n, in_c, in_h, in_w = data.shape
new_h = int(in_h / block_size)
new_w = int(in_h / block_size)
new_c = int(in_c * (block_size * block_size))
expanded = np.reshape(data, newshape=[in_n, in_c, new_h, block_size, new_w, block_size])
transposed = np.transpose(expanded, axes=[0, 3, 5, 1, 2, 4])
newshape = [in_n, new_c, new_h, new_w]
d2s_out = np.reshape(transposed, newshape=newshape)
return d2s_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/testing/strided_slice_python.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""strided_slice/set in python"""
def strided_slice_python(data, begin, end, strides, slice_mode="end", axes=None):
"""Python version of strided slice operator.
Parameters
----------
data : numpy.ndarray
Input data
begin : list
Beginning of the slices.
end : list
End of the slices.
strides : list
The stride of each slice.
slice_mode : str, optional
The slice mode [end, size].
end: The default slice mode, ending indices for the slice.
size: The input strides will be ignored, input end in this mode indicates
the sizeof a slice starting at the location specified by begin. If end[i] is -1,
all remaining elements in that dimension are included in the slice.
axes : list, optional
Axes along which slicing is applied
Returns
-------
result : numpy.ndarray
The sliced result.
"""
strides = [] if strides is None else strides
if axes is not None:
rank = len(data.shape)
new_begin = [0] * rank
new_end = [data.shape[i] for i in range(rank)]
new_strides = [1] * rank
for i, axis in enumerate(axes):
new_begin[axis] = begin[i]
new_end[axis] = end[i]
if len(strides) > i:
new_strides[axis] = strides[i]
begin = new_begin
end = new_end
strides = new_strides
slices = []
for i in range(len(data.shape)):
new_stride = None
if slice_mode == "end" and i < len(strides):
new_stride = strides[i]
new_begin = begin[i] if i < len(begin) else None
if i >= len(end):
new_end = None
elif slice_mode == "size":
if end[i] < 0:
new_end = None
else:
new_end = new_begin + end[i]
else:
new_end = end[i]
slices.append(slice(new_begin, new_end, new_stride))
return data[tuple(slices)]
def strided_set_python(data, v, begin, end, strides):
"""Python version of strided slice operator.
Parameters
----------
data : numpy.ndarray
Input data
v : numpy.ndarray
Value data
begin : list
Beginning of the slices.
end : list
End of the slices.
strides : list
The stride of each slice.
Returns
-------
result : numpy.ndarray
The updated result.
"""
strides = [] if strides is None else strides
slices = []
res = data.copy()
for i in range(len(data.shape)):
slices.append(
slice(
begin[i] if i < len(begin) else None,
end[i] if i < len(end) else None,
strides[i] if i < len(strides) else None,
)
)
res[tuple(slices)] = v
return res
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,consider-using-enumerate,redefined-outer-name
"""Injective transformation operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import topi
from tvm.te import hybrid
from . import cpp
from . import tag
from .utils import within_index, make_idx, const_vector
def expand_dims(a, axis, num_newaxis=1):
"""Expand the shape of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
num_newaxis: int, optional
Number of newaxis to be inserted on axis
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.expand_dims(a, axis, num_newaxis)
def expand_like(a, shape_like, axis):
"""Expand an input array with the shape of second array.
This operation can always be composed of unsqueezing and
expanding dims on those unsqueezed axes.
Examples
--------
.. code-block::
input = [ 12. 19. 27.]
input.shape = (3,)
new_shape_array = [[[1,2],[2,3],[1,3]],
[[1,4],[4,3],[5,2]],
[[7,1],[7,2],[7,3]]]
new_shape_array.shape = (3, 3, 2)
expand_like(input, [1,2], new_shape_array) =
[[[12,12],[12,12],[12,12]],
[[19,19],[19,19],[19,19]],
[[27,27],[27,27],[27,27]]]
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
shape_like : tvm.te.Tensor
The tensor to with target shape.
axis: list of int
axis to be expanded on
Returns
-------
ret : tvm.te.Tensor
"""
odim = len(axis) + len(a.shape)
if odim != len(shape_like.shape):
if len(a.shape) == 1 and len(axis) == len(shape_like.shape):
# A special case: `a` is a scalar represented as a 1-dim tensor
return te.compute(shape_like.shape, lambda *idxs: a(0))
raise ValueError(
"shape inconsistent when expand_like ({}, {}, {})".format(
len(axis), len(a.shape), len(shape_like.shape)
)
)
real_axis = topi.reduction._get_real_axis(len(shape_like.shape), axis)
real_axis = sorted(real_axis)
def _compute(*idxs):
indices = []
axis_index = 0
for i in range(0, len(idxs)):
if i not in real_axis:
indices.append(idxs[i])
axis_index += 1
return a(*indices)
return te.compute(shape_like.shape, _compute)
def transpose(a, axes=None):
"""Permute the dimensions of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
axes: tuple of ints, optional
By default, reverse the dimensions.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.transpose(a, axes)
def flip(a, axis=0):
"""Flip/reverse elements of an array in a particular axis.
Parameters
----------
a : tvm.te.Tensor
The tensor to be expanded.
axis : int, optional
The axis along which the tensors will be reveresed.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.flip(a, axis)
def reverse_sequence(a, seq_lengths, seq_axis=1, batch_axis=0):
"""Reverse the tensor for variable length slices.
Input is first sliced along batch axis and then elements are reversed along seq axis.
Parameters
----------
a : tvm.te.Tensor
The tensor to be reversed.
seq_lengths : tvm.te.Tensor
A 1D Tensor with length a.dims[batch_axis]
Must be one of the following types: int32, int64
if seq_lengths[i] > a.dims[seq_axis], it is rounded to a.dims[seq_axis]
if seq_lengths[i] < 1, it is rounded to 1
seq_axis : int, optional
The axis along which the elements will be reversed. Default is 1.
batch_axis : int, optional
The axis along which the tensor will be sliced. Default is 0.
Returns
-------
ret : tvm.te.Tensor
The computed result of same shape and type as of input.
"""
return cpp.reverse_sequence(a, seq_lengths, seq_axis, batch_axis)
def strided_slice(a, begin, end, strides=None, axes=None, slice_mode="end"):
"""Slice of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be sliced.
begin : list of int
The indices to begin with in the slicing.
end : list of int
Indices indicating end of the slice.
strides : list of int, optional
Specifies the stride values, it can be negative
in that case, the input tensor will be reversed
in that particular axis.
axes : list of int, optional
Axes along which slicing is applied. When it is specified, begin, end
strides, and axes need to a list of integers of the same length.
slice_mode : str, optional
The slice mode [end, size].
end - The ending indices for the slice [default].
size - The input strides will be ignored, input end in this mode indicates
the sizeof a slice starting at the location specified by begin. If end[i]
is -1, all remaining elements in that dimension are included in the slice.
Returns
-------
ret : tvm.te.Tensor
"""
if (
isinstance(begin, tvm.te.Tensor)
or isinstance(end, tvm.te.Tensor)
or isinstance(strides, tvm.te.Tensor)
):
assert axes is None, "axes argument is not supported by dynamic strided slice yet."
if not isinstance(begin, tvm.te.Tensor):
begin = const_vector(begin)
if not isinstance(end, tvm.te.Tensor):
end = const_vector(end)
if strides is None:
strides = [1] * begin.shape[0].value
if not isinstance(strides, tvm.te.Tensor):
strides = const_vector(strides)
return cpp.dynamic_strided_slice(a, begin, end, strides)
if strides is None:
strides = []
if axes is None:
axes = []
return cpp.strided_slice(a, begin, end, strides, axes, slice_mode)
@tvm.te.tag_scope(tag=tag.INJECTIVE + ",strided_set")
def strided_set(a, v, begin, end, strides=None):
"""Set slice of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be sliced.
v : tvm.te.Tensor
The values to set
begin: tvm.te.Tensor
The indices to begin with in the slicing.
end: tvm.te.Tensor
Indices indicating end of the slice.
strides: tvm.te.Tensor, optional
Specifies the stride values, it can be negative
in that case, the input tensor will be reversed
in that particular axis.
Returns
-------
ret : tvm.te.Tensor
"""
n = len(a.shape)
if len(begin.shape) != 1:
raise ValueError("begin should be a vector")
if not begin.dtype == "int32":
raise TypeError("begin should be int32")
if len(end.shape) != 1:
raise ValueError("end should be a vector")
if not end.dtype == "int32":
raise TypeError("end should be int32")
if strides is not None:
if len(strides.shape) != 1:
raise ValueError("strides should be a vector")
if not strides.dtype == "int32":
raise TypeError("strides should be int32")
def _max(a, b):
return tvm.tir.Select(a > b, a, b)
if strides is None:
strides = [tvm.tir.const(1, "int32")] * n
else:
strides = [
tvm.tir.if_then_else(strides.shape[0] > i, strides[i], tvm.tir.const(1, "int32"))
for i in range(n)
]
begin = [
tvm.tir.if_then_else(
begin.shape[0] > i,
begin[i],
tvm.tir.Select(strides[i] > 0, tvm.tir.const(0, "int32"), a.shape[i]),
)
for i in range(n)
]
end = [
tvm.tir.if_then_else(
end.shape[0] > i,
end[i],
tvm.tir.Select(strides[i] > 0, a.shape[i] + 1, -(a.shape[i] + 1)),
)
for i in range(n)
]
# Convert negative indexes
for i in range(n):
begin[i] = tvm.tir.if_then_else(begin[i] < 0, begin[i] + a.shape[i], begin[i])
end[i] = tvm.tir.if_then_else(end[i] < 0, end[i] + a.shape[i], end[i])
def _select(*indices):
from_val = []
index_tuple = []
for i in range(n):
from_val.append(within_index(begin[i], end[i], strides[i], indices[i]))
index_tuple.append(make_idx(begin[i], end[i], strides[i], a.shape[i], indices[i]))
return tvm.tir.if_then_else(tvm.tir.all(*from_val), v(*index_tuple), a(*indices))
return te.compute(a.shape, _select, name="strided_set")
def reshape(a, newshape):
"""Reshape the array
Parameters
----------
a : tvm.te.Tensor
The tensor to be reshaped
newshape : tuple of ints
The new shape
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.reshape(a, newshape)
def squeeze(a, axis=None):
"""Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : tvm.te.Tensor
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one, an error is raised.
Returns
-------
squeezed : tvm.te.Tensor
"""
return cpp.squeeze(a, axis)
def concatenate(a_tuple, axis=0):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a_tuple : tuple of tvm.te.Tensor
The arrays to concatenate
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.concatenate(a_tuple, axis)
def stack(a, axis):
"""Repeats the whole array multiple times.
Parameters
----------
a : tvm.te.Tensor
The tensor to be stacked.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.stack(a, axis)
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : tvm.te.Tensor
indices_or_sections : int or 1-D array
axis : int
Returns
-------
ret : tuple of tvm.te.Tensor
"""
return cpp.split(ary, indices_or_sections, axis)
def take(a, indices, axis=None, batch_dims=0, mode="clip"):
"""Take elements from an array along an axis.
Parameters
----------
a : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default,
the flattened input array is used.
batch_dims : int
The number of batch dimensions. By default is 0.
mode : str, optional
Specifies how out-of-bound indices will behave.
clip - clip to the range (default)
wrap - wrap around the indices
fast - no clip or wrap around (user must make sure indices are in-bound)
Returns
-------
ret : tvm.te.Tensor
"""
if axis is None:
return cpp.take(a, indices, int(batch_dims), mode)
return cpp.take(a, indices, int(batch_dims), int(axis), mode)
@tvm.target.generic_func
def take_legalize(attrs, inputs, types):
"""Legalizes dyn.topk op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if tvm.relay.ty.is_dynamic(types[0]):
return tvm.relay.take(tvm.relay.annotation.stop_fusion(inputs[0]), inputs[1], **attrs)
return None
def gather(data, axis, indices):
"""Gather values along given axis from given indices.
E.g. for a 3D tensor, output is computed as:
.. code-block:: python
out[i][j][k] = data[indices[i][j][k]][j][k] # if axis == 0
out[i][j][k] = data[i][indices[i][j][k]][k] # if axis == 1
out[i][j][k] = data[i][j][indices[i][j][k]] # if axis == 2
``indices`` must have same shape as ``data``, except at dimension ``axis``
which must just be not null. Output will have same shape as ``indices``.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis: int
The axis along which to index.
indices : tvm.te.Tensor
The indices of the values to extract.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.gather(data, axis, indices)
def gather_nd(a, indices):
"""Gather elements from a n-dimension array..
Parameters
----------
a : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.gather_nd(a, indices)
def matmul(a, b, transp_a=False, transp_b=False):
"""
Creates an operation that calculates a matrix multiplication (row-major notation):
A(i, k) * B(k, j)
if trans_a == trans_b, the usual transposed combinations, otherwise
Parameters
----------
a : The matrix A
b : The matrix B
trans_a : Is A's layout transposed?
trans_b : Is B's layout transposed?
Returns
-------
A Tensor whose op member is the matmul operation
"""
return cpp.matmul(a, b, transp_a, transp_b)
def tensordot(a, b, axes):
"""A generalization of matrix multiplication to tensor.
Parameters
----------
a : The tensor A
b : The tensor B
axes : The number of dimensions to reduce over
Returns
-------
A Tensor computing the result
"""
if isinstance(axes, int):
return cpp.tensordot(a, b, axes)
if isinstance(axes[0], int):
return cpp.tensordot(a, b, (axes[0],), (axes[1],))
return cpp.tensordot(a, b, axes[0], axes[1])
def arange(start, stop=None, step=1, dtype="float32"):
"""Creates a tensor with evenly spaced values within a given interval.
Parameters
----------
start : tvm.Expr, optional
Start of interval. The interval includes this value. The default start
value is 0.
stop : tvm.Expr
Stop of interval. The interval does not include this value.
step : tvm.Expr, optional
Spacing between values. The default step size is 1.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
if stop is None:
stop = start
start = 0
return cpp.arange(start, stop, step, dtype)
def meshgrid(a_tuple, indexing):
"""Create coordinate matrices from coordinate vectors.
Parameters
----------
a_tuple : tuple of tvm.te.Tensor
The coordinate vectors or scalars.
indexing : str
Indexing mode, either "ij" or "xy".
Returns
-------
result : tuple of tvm.te.Tensor
The resulting grids for each axis.
"""
return cpp.meshgrid(a_tuple, indexing)
def repeat(a, repeats, axis):
"""Repeats elements of an array.
Parameters
----------
a : tvm.te.Tensor
The tensor to be repeated.
repeats: int, required
Number of repetitions for each element
axis: int, optional
The axis along which to repeat values
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.repeat(a, repeats, axis)
def tile(a, reps):
"""Repeats the whole array multiple times.
Parameters
----------
a : tvm.te.Tensor
The tensor to be tiled.
reps: tuple of ints, required
The number of times for repeating the tensor
Returns
-------
ret : tvm.te.Tensor
"""
return cpp.tile(a, reps)
def layout_transform(array, src_layout, dst_layout):
"""Transform the layout according to src_layout and dst_layout
Parameters
----------
array : tvm.te.Tensor
The source array.
src_layout : str
the source layout.
dst_layout : str
the destination layout.
"""
return cpp.layout_transform(array, src_layout, dst_layout)
def shape(array, dtype="int32"):
"""Get the shape of input array
Parameters
----------
array : tvm.te.Tensor
The source tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
return cpp.shape(array, dtype)
def sequence_mask(data, valid_length, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
`axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0,
the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have
shape [batch_size, MAX_LENGTH, ...].
`valid_length` gives the length of each sequence. `valid_length` should be
a 1D int array with positive ints and has dimension [batch_size,].
Parameters
----------
data : tvm.te.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
valid_length : tvm.te.Tensor
1-D with shape [batch_size,]
mask_value : float, optional
The masking value, default 0
axis : int, optional
axis of the length dimension, must be 0 or 1, default 0
Returns
-------
output : tvm.te.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
"""
assert len(data.shape) >= 2, "only support data.ndim >= 2, received data.shape = {}".format(
data.shape
)
assert axis in (0, 1), "only support axis = 0, 1, received axis = {}".format(axis)
return cpp.sequence_mask(data, valid_length, mask_value, axis)
def ndarray_size(array, dtype="int32"):
"""Get the number of elements of input array
Parameters
----------
array : tvm.te.Tensor
The source tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.te.Tensor
The resulting tensor.
"""
return cpp.ndarray_size(array, dtype)
def where(condition, x, y):
"""Get the elements, either from x or y, depending on the condition.
Parameters
----------
condition : tvm.te.Tensor
The condition array.
x : tvm.te.Tensor
First array to be selected.
y : tvm.te.Tensor
Second array to be selected.
Returns
-------
result : tvm.te.Tensor
A Tensor selected from x or y depending on condition.
"""
return cpp.where(condition, x, y)
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : tvm.te.Tensor
Locations to set to on_value.
on_value : tvm.te.Tensor
Value to fill at indices.
off_value : tvm.te.Tensor
Value to fill at all other positions besides indices.
depth : int
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : relay.DataType
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
Examples
--------
.. code-block:: python
indices = [0, 1, 2]
relay.one_hot(indices, 3) =
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
return cpp.one_hot(indices, on_value, off_value, depth, axis, dtype)
def unravel_index(indices, shape):
"""Convert a flat index or array of flat indices into a tuple of coordinate arrays.
Example::
- unravel_index([22, 41, 37], [7, 6]) = [[3, 6, 6], [4, 5, 1]]
Parameters
----------
indices : relay.Expr
An integer array containing indices.
shape : relay.Expr
The shape of the array.
Returns
-------
result : relay.Expr
The tuple of coordinate arrays.
"""
return cpp.unravel_index(indices, shape)
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0):
"""Converts a sparse representation into a dense tensor.
Example::
- sparse_to_dense([[0, 0], [1, 1]], [2, 2], [3, 3], 0) = [[3, 0], [0, 3]]
Parameters
----------
sparse_indices : tvm.te.Tensor
A 0-D, 1-D, or 2-D tensor of integers containing location of sparse values.
output_shape : A list of integers
Shape of the dense output tensor.
sparse_values : tvm.te.Tensor
A 0-D or 1-D tensor containing the sparse values for the sparse indices.
default_value : tvm.te.Tensor
A 0-D tensor containing the default value for the remaining locations.
Defaults to 0.
Returns
-------
result : tvm.te.Tensor
Dense tensor of shape output_shape. Has the same type as sparse_values.
"""
return cpp.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
def matrix_set_diag(data, diagonal, k=0, align="RIGHT_LEFT"):
"""
Returns a tensor with the diagonals of input tensor replaced with the provided diagonal values.
Parameters
----------
data : relay.Expr
Input Tensor.
diagonal : relay.Expr
Values to be filled in the diagonal.
k : int or tuple of int, optional
Diagonal Offset(s). The diagonal or range of diagonals to set. (0 by default)
Positive value means superdiagonal, 0 refers to the main diagonal, and
negative value means subdiagonals. k can be a single integer (for a single diagonal)
or a pair of integers specifying the low and high ends of a matrix band.
k[0] must not be larger than k[1].
align : string, optional
Some diagonals are shorter than max_diag_len and need to be padded.
align is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT",
"LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right
(left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing
format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns
-------
result : relay.Expr
New tensor with given diagonal values.
Examples
--------
.. code-block:: python
data = [[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]
diagonal = [[1, 2, 3],
[4, 5, 6]]
topi.matrix_set_diag(input, diagonal) =
[[[1, 7, 7, 7],
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
"""
if isinstance(k, (tuple, list)):
k_one = k[0]
if len(k) >= 2:
k_two = k[1]
else:
k_two = k[0]
else:
k_one = k
k_two = k
super_diag_right_align = align[:5] == "RIGHT"
sub_diag_right_align = align[-5:] == "RIGHT"
return cpp.matrix_set_diag(
data, diagonal, k_one, k_two, super_diag_right_align, sub_diag_right_align
)
def adv_index(data, indices):
"""Numpy style indexing with tensors.
Parameters
----------
data : tvm.te.Tensor
Input data.
indices : A list of tvm.te.Tensor
Tensor index.
Returns
-------
result : tvm.te.Tensor
Output tensor
"""
return cpp.adv_index(data, indices)
@hybrid.script
def invert_permutation(data):
"""Computes the inverse permutation of data.
Parameters
----------
data : tvm.te.Tensor
Input data
Returns
-------
result : tvm.te.Tensor
Output tensor
Examples
--------
.. code-block:: python
data = [3, 4, 0, 2, 1]
topi.invert_permutation(data) = [2, 4, 3, 0, 1]
"""
result = output_tensor(data.shape, data.dtype)
nums = data.shape[0]
for ind in range(nums):
r_ind = data[ind]
result[r_ind] = ind
return result
def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return cpp.sliding_window(data, axis, window_shape, strides)
def trilu(data, k, upper):
"""
Given a 2-D matrix or batches of 2-D matrices, returns the
upper or lower triangular part of the tensor.
Parameters
----------
data: tvm.te.Tensor
The tensor that trilu will be applied to. Must be either
a 2D matrix or a tensor of batches of 2D matrices.
k: tvm.te.Tensor
The number of diagonals above or below the main diagonal
to exclude or include.
upper: bool
If True, only upper triangular values of input are kept,
if False, the lower triangular values are kept.
Returns
-------
ret : relay.Expr
The new tensor with appropriate diagonals set to zero.
Examples
--------
.. code-block:: python
x = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]
relay.trilu(x, True, 0) =
[[0, 1, 2],
[0, 4, 5],
[0, 0, 8]]
"""
# Make sure datatype is consistent.
if k.dtype != "int32":
k = tvm.tir.Cast("int32", k)
# Check either above or below diagonal depending on upper.
check_op = tvm.tir.GE
if upper:
check_op = tvm.tir.LE
def _apply_trilu(*indices):
row_index = indices[-2]
col_index = indices[-1]
# promote row & col indices
if row_index.dtype != col_index.dtype:
target_type = (col_index + row_index).dtype
if row_index.dtype != target_type:
row_index = tvm.tir.Cast(target_type, row_index)
else:
col_index = tvm.tir.Cast(target_type, col_index)
other_indices = indices[:-2]
check_position = check_op(row_index, col_index - k)
value = data(*other_indices, row_index, col_index)
return tvm.tir.Select(check_position, value, tvm.tir.const(0, data.dtype))
return te.compute(data.shape, _apply_trilu, name="trilu", tag=topi.tag.ELEMWISE)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
from tvm import te, tir
from ..te import hybrid
from .scan import cumsum
from .sort import sort, argsort
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
with ib.for_range(0, data.shape[0], kind="parallel") as i:
with ib.if_scope(i == 0):
output_ptr[0] = 0
with ib.else_scope():
output_ptr[i] = tir.Cast(output.dtype, binop(data_ptr[i], data_ptr[i - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_cpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
output[0] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
data_length = data.shape[0]
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
with ib.if_scope(i > 0):
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_seq_indices_ptr[inc_scan_ptr[i] - 1] = i
with ib.new_scope():
with ib.for_range(0, num_unique, kind="parallel") as i:
unique_idx = i if not index_converter_ptr else index_converter_ptr[i]
with ib.if_scope(i == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[i]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[i] - unique_seq_indices_ptr[i - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
data_idx = argsorted_indices_ptr[i]
unique_idx = (
inc_scan_ptr[i] if not index_converter_ptr else index_converter_ptr[inc_scan_ptr[i]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(i == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
@hybrid.script
def _calc_first_occurence(argsorted_indices, inc_scan):
"""Hybrid script to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : tvm.te.Tensor
A tensor that stores the argsorted indices of the input data.
inc_scan : tvm.te.Tensor
A tensor that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : tvm.te.Tensor
A tensor that stores the first occurence of each unique element in the input data.
"""
first_occurence = output_tensor(argsorted_indices.shape, "int32")
for i in parallel(argsorted_indices.shape[0]):
first_occurence[i] = argsorted_indices.shape[0]
for i in parallel(argsorted_indices.shape[0]):
if i == 0 or inc_scan[i] != inc_scan[i - 1]:
first_occurence[inc_scan[i]] = argsorted_indices[i]
return first_occurence
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in
the unique array. (Note that inverse_indices is very similar to indices if output is not
sorted.)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [2, 3, 4, 0, 1, _, _, _]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, "int32", tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# prepare outputs
if return_counts:
out_data_shape = [data.shape] * 3
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
first_occurence = _calc_first_occurence(argsorted_indices, inc_scan)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
in_data = [data, argsorted_indices, inc_scan, index_converter]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
# First occurence is in order of sorted unique output, if we sort the first_occurence array
# we get the correct result
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
name="_calc_unique",
tag="_calc_unique_cpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common topi utilities"""
from __future__ import absolute_import as _abs
from numbers import Integral
import numpy as np
import tvm
from tvm import te
from tvm.tir import bijective_layout, layout
from . import cpp, tag
class InvalidShapeError(ValueError):
"""Invalid shape for a topi function. i.e. call winograd template for non-3x3 kernel)"""
def ncw_pack_layout(layout_info):
"""Check whether the layout type is NCWinic"""
return layout_info[:3] == "NCW" and "c" in layout_info and "n" in layout_info
def ncw_xc_layout(layout_info):
"""Check whether the layout type is NCWxc"""
return layout_info[:3] == "NCW" and "c" in layout_info and layout_info[3:-1].isnumeric()
def nchw_pack_layout(layout_info):
"""Check whether the layout type is NCHWinic"""
return layout_info[:4] == "NCHW" and "c" in layout_info and "n" in layout_info
def nchw_xc_layout(layout_info):
"""Check whether the layout type is NCHWxc"""
return layout_info[:4] == "NCHW" and "c" in layout_info and layout_info[4:-1].isnumeric()
def traverse_inline(s, final_op, callback):
"""Traverse computation graph and do auto inline
Parameters
----------
s: schedule
The schedule
final_op: Operation
The final output operator.
callback: callable
The callback function on each op
"""
visited = set()
def _traverse(op):
if op in visited:
return
visited.add(op)
if tag.is_injective(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
_traverse(tensor.op)
callback(op)
_traverse(final_op)
def prod(x):
"""Get the product of every items in the tuple.
Parameters
----------
x: tuple
Input tuple
Returns
-------
value : Expr
The result value
"""
if not x:
return tvm.tir.const(1, "int32")
res = x[0]
for i in range(1, len(x)):
res = res * x[i]
return res
def get_const_int(expr):
"""Verifies expr is integer and get the constant value.
Parameters
----------
expr : tvm.Expr or int
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(expr, Integral):
return expr
if not isinstance(expr, tvm.tir.IntImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.IntImm):
raise ValueError("Expect value to be constant int")
return int(expr.value)
def get_const_float(expr):
"""Verifies expr is a floating point and get the constant value.
Parameters
----------
expr : tvm.Expr or float
The input expression.
Returns
-------
out_value : float
The output.
"""
if isinstance(expr, float):
return float(expr)
if not isinstance(expr, tvm.tir.FloatImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.FloatImm):
raise ValueError("Expect value to be constant float")
return float(expr.value)
def equal_const_int(expr, value):
"""Returns if expr equals value.
Parameters
----------
expr : tvm.Expr
The input expression.
Returns
-------
equal : bool
Whether they equals.
"""
if isinstance(expr, Integral):
return expr == value
if not isinstance(expr, tvm.tir.IntImm):
ana = tvm.arith.Analyzer()
expr = ana.simplify(expr)
if not isinstance(expr, tvm.tir.IntImm):
return False
return expr.value == value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm or Var, returns tuple of int or Var.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of int
The output.
"""
ret = []
ana = None
for elem in in_tuple:
if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):
ret.append(elem)
elif not isinstance(elem, (tvm.tir.IntImm, int)):
ana = tvm.arith.Analyzer() if ana is None else ana
elem = ana.simplify(elem)
if not isinstance(elem, tvm.tir.IntImm):
ret.append(elem)
else:
ret.append(get_const_int(elem))
else:
ret.append(get_const_int(elem))
return tuple(ret)
def const_vector(vector, name="const_vector"):
"""convert a const numpy 1-dimensional vector to tvm tensor
Parameters
----------
vector: numpy.ndarray
Const input array
name: str, optional
The name of output op
Returns
-------
tensor: Tensor
The created tensor
"""
if not isinstance(vector, np.ndarray):
vector = np.array(vector)
row = vector.shape[0]
dtype = str(vector.dtype)
idxm = tvm.tir.indexmod
def select_array(i):
now = tvm.tir.const(0.0, dtype)
for ii in range(row):
now = tvm.tir.Select(
tvm.tir.all(idxm(i, row) == ii),
tvm.tir.const(vector[ii], dtype),
now,
)
return now
return te.compute(vector.shape, select_array, name=name)
def get_float_tuple(in_tuple):
"""Verifies input tuple is FloatImm, returns tuple of float.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of float
The output.
"""
return tuple(get_const_float(elem) for elem in in_tuple)
def simplify(expr):
"""Simplify the expression if it is Expr, directly return if it is int.
Parameters
----------
expr : Expr or int
The input.
Returns
-------
out : Expr or int
The simplified output
"""
return tvm.arith.Analyzer().simplify(expr) if isinstance(expr, tvm.tir.PrimExpr) else expr
def ravel_index(indices, shape):
"""Flatten the index tuple to 1D
Parameters
----------
indices : tuple of int or tvm.tir.IntImm
The input coordinates
shape : tuple of int
Shape of the tensor.
Returns
-------
idx : int or Expr
The index after flattening
"""
idx = None
for i, (shape_val, ind) in enumerate(zip(shape, indices)):
if i != 0:
idx = idx * shape_val + ind
else:
idx = ind
return idx
def unravel_index(idx, shape):
"""Convert the flattened ind to the coordinate array
Parameters
----------
idx : int or tvm.tir.IntImm
The 1D index
shape : tuple of int
Shape of the tensor
Returns
-------
indices : tuple of int or tvm.tir.IntImm
Corresponding coordinate of the 1D index
"""
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
indices = []
for i, dim in enumerate(reversed(shape)):
if dim == 0:
indices.append(0)
elif i == len(shape) - 1:
# Assuming the index is in-bounds, the last coordinate is
# already less than dim, and doesn't need the be remainder
# mod dim.
indices.append(idx)
else:
indices.append(idxm(idx, dim))
idx = idxd(idx, dim)
indices = indices[::-1]
return indices
def const_matrix(matrix, name="const_matrix", attrs=None):
"""convert a const numpy 2-dimensional matrix to tvm tensor
Parameters
----------
matrix: numpy.ndarray
Const input array
name: str, optional
The name of output op
Returns
-------
tensor: Tensor
The created tensor
"""
row, col = matrix.shape
dtype = str(matrix.dtype)
idxm = tvm.tir.indexmod
def select_array(i, j):
now = tvm.tir.const(0.0, dtype)
for ii in range(row):
for jj in range(col):
now = tvm.tir.Select(
tvm.tir.all(idxm(i, row) == ii, idxm(j, col) == jj),
tvm.tir.const(matrix[ii][jj], dtype),
now,
)
return now
if attrs is None:
attrs = {
"const_matrix": True,
"schedule_rule": "None",
}
return te.compute(
matrix.shape,
select_array,
name=name,
attrs=attrs,
)
def get_max_power2_factor(n, max_value=None):
"""Get max factor of n in power of 2. If max_value is specificed, max factor
value will be no more max_value,
Parameter
---------
n : int
The input value
max_value : int, optional
The max value for the factor
Returns
-------
factor : int
The max factor in power of 2.
"""
x = 1
while n % 2 == 0:
if max_value is not None and max_value < x * 2:
break
x *= 2
n /= 2
return x
def get_shape(src_shape, src_layout, dst_layout):
"""Given a source shape, a source layout and a destination layout, infer
the destination shape.
Parameter
---------
src_shape : tuple of int or IntImm
Source shape
src_layout : str or Layout
Source layout
dst_layout : str or Layout
Destination layout
Returns
-------
dst_shape : tuple of int
Destination shape
"""
if src_layout == dst_layout:
return get_const_tuple(src_shape)
if isinstance(src_layout, str):
src_layout = layout(src_layout)
if isinstance(dst_layout, str):
dst_layout = layout(dst_layout)
assert len(src_layout) == len(dst_layout), "Incompatible layout %s vs %s" % (
src_layout,
dst_layout,
)
layout_mapping = bijective_layout(src_layout, dst_layout)
dst_indices = layout_mapping.forward_index(tvm.runtime.convert(list(range(len(src_layout)))))
return get_const_tuple(tuple([src_shape[i.value] for i in dst_indices]))
def within_index(b, e, s, i):
"""Return a boolean value that indicates if i is within the given index.
Parameters
----------
b : Expr
beginning of the index
e : Expr
end of the index
s : Expr
strides of index
i : Expr
array position
Returns
-------
selected: Expr
bool expression that is True is the array position would be selected
by the index and False otherwise
"""
bc = tvm.tir.Select(s < 0, i <= e, i < b)
ec = tvm.tir.Select(s < 0, i > b, i >= e)
ss = te.if_then_else(s < 0, ((i - e) + (e % te.abs(s)) + 1) % te.abs(s), (i - b) % s)
return tvm.tir.Select(tvm.tir.Or(bc, ec), tvm.tir.const(False), ss.equal(0))
def make_idx(b, e, s, z, i):
"""Return the array position in the selection that corresponds to an
array position in the full array.
The returned value is only meaningful if within_index() returns True
for the same set of parameters.
Parameters
----------
b : Expr
beginning of the index
e : Expr
end of the index
s : Expr
strides of index
z : Expr
size of the indexed dimension
i : Expr
array position
Returns
-------
position: Expr
int expression that corresponds to an array position in the selection.
"""
bc = tvm.tir.Select(s < 0, i <= e, i < b)
ec = tvm.tir.Select(s < 0, i > b, i >= e)
# Clamp to array size
b = tvm.tir.Select(z < b, z - 1, b)
ss = tvm.tir.if_then_else(s < 0, (b - i) // te.abs(s), (i - b) // s)
return tvm.tir.if_then_else(tvm.tir.Or(bc, ec), 88, ss)
def is_empty_shape(shape):
"""Check whether an input shape has dimesion with size 0.
Parameter
---------
shape : list of Expr
Input shape
Returns
-------
is_empty: bool
Whether input shape is empty or has dimesion with size 0.
"""
return cpp.utils.is_empty_shape(shape)
def ceil_div(a, b):
"""Return ceil division of a by b"""
return tvm.tir.indexdiv(a + (b - 1), b)
def swap(arr, axis):
"""swap arr[axis] and arr[-1]"""
return arr[:axis] + [arr[-1]] + arr[axis + 1 : -1] + [arr[axis]]
def is_target(names):
"""Return True if the name of the current target is one of provided names"""
names = [names] if isinstance(names, str) else names
target = tvm.target.Target.current(allow_none=False)
return any(name in target.keys for name in names)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from . import ssd
from .reorg import *
from .nms import *
from .rcnn import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/nms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-error, invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable, too-many-nested-blocks, too-many-branches, too-many-statements, too-many-function-args
"""Non-maximum suppression operator"""
import tvm
from tvm import te
from tvm.te import hybrid
from tvm.tir import if_then_else
from ..sort import argsort
from ..math import cast
from ..transform import reshape, gather
from .. import reduction
from ..scan import cumsum
from .nms_util import (
binary_search,
collect_selected_indices,
collect_selected_indices_and_scores,
run_all_class_nms,
)
@hybrid.script
def hybrid_rearrange_box_out(data, one, batch_size, num_anchors):
"""Hybrid routine to rearrange nms output to
move all valid entries to top.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6].
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
Transformed NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6].
"""
elem_length = data.shape[2]
output = output_tensor((batch_size, num_anchors, elem_length), data.dtype)
valid_indices = allocate((batch_size,), "int32")
for i in parallel(batch_size):
valid_indices[i] = 0
for j in range(num_anchors):
if data[i, j, 0] >= 0:
for k in range(elem_length):
output[i, valid_indices[i], k] = data[i, j, k]
valid_indices[i] += 1
if j >= valid_indices[i]:
for k in range(elem_length):
output[i, j, k] = -one
return output
@hybrid.script
def hybrid_rearrange_indices_out(data, one, batch_size, num_anchors):
"""Hybrid routine to rearrange nms output to
move all valid entries to top.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
NMS output. 3-D tensor with shape
[batch_size, num_anchors, 6] or
[batch_size, num_anchors, 5], or 2-D
tensor with shape [batch_size, num_anchors].
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
2-D tensor with shape [batch_size, num_anchors].
valid_box_count : tvm.te.Tensor or numpy NDArray
Tensor with shape [batch_size, 1], indicates
the valid number of boxes.
"""
valid_box_count = output_tensor((batch_size, 1), "int32")
output = output_tensor((batch_size, num_anchors), data.dtype)
valid_indices = allocate((batch_size,), "int32")
for i in parallel(batch_size):
valid_indices[i] = 0
for j in range(num_anchors):
if data[i, j] >= 0:
output[i, valid_indices[i]] = data[i, j]
valid_indices[i] += 1
if data[i, j] > num_anchors or data[i, j] < -num_anchors:
output[i, valid_indices[i]] = 0
valid_indices[i] += 1
if j >= valid_indices[i]:
output[i, j] = -one
valid_box_count[i, 0] = valid_indices[i]
return output, valid_box_count
@hybrid.script
def hybrid_get_valid_counts(
data, score_threshold, id_index, score_index, one, batch_size, num_anchors
):
"""Hybrid routine to get valid count of bounding boxes
given a score threshold. Also moves valid boxes to the
top of input data.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
Input data. 3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
score_threshold : tvm.te.Tensor
Lower limit of score for valid bounding boxes.
id_index : tvm.tir.const
index of the class categories, -1 to disable.
score_index: tvm.tir.const
Index of the scores/confidence of boxes.
one: tvm.tir.const
Constant one with the same dtype as data.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
Number of anchors.
Returns
-------
valid_count : tvm.te.Tensor or numpy NDArray
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor or numpy NDArray
Rearranged data tensor.
out_indices: tvm.te.Tensor or numpy NDArray
Related index in input data.
"""
box_data_length = data.shape[2]
valid_count = output_tensor((batch_size,), "int32")
out_tensor = output_tensor((batch_size, num_anchors, box_data_length), data.dtype)
out_indices = output_tensor((batch_size, num_anchors), "int32")
for i in parallel(batch_size):
valid_count[i] = 0
for j in range(num_anchors):
score = data[i, j, score_index]
if score > score_threshold and (id_index < 0 or data[i, j, id_index] >= 0):
for k in range(box_data_length):
out_tensor[i, valid_count[i], k] = data[i, j, k]
out_indices[i, valid_count[i]] = j
valid_count[i] += 1
if j >= valid_count[i]:
for k in range(box_data_length):
out_tensor[i, j, k] = -one
out_indices[i, j] = -1
return valid_count, out_tensor, out_indices
def get_valid_counts(data, score_threshold=0, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : tvm.te.Tensor
Input data. 3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
score_threshold : optional, float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
out_tensor : tvm.te.Tensor
Rearranged data tensor.
out_indices: tvm.te.Tensor or numpy NDArray
Related index in input data.
"""
if isinstance(score_threshold, (float, int)):
score_threshold = tvm.tir.const(score_threshold, dtype=data.dtype)
id_index_const = tvm.tir.const(id_index, "int32")
score_index_const = tvm.tir.const(score_index, "int32")
return hybrid_get_valid_counts(
data,
score_threshold,
id_index_const,
score_index_const,
tvm.tir.const(1, data.dtype),
data.shape[0],
data.shape[1],
)
@hybrid.script
def hybrid_nms(
data,
sorted_index,
valid_count,
indices,
batch_size,
num_anchors,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
zero,
one,
):
"""Hybrid routing for non-maximum suppression.
Parameters
----------
data: tvm.te.Tensor or numpy NDArray
Bounding boxes with class and score. 3-D tensor with shape
[batch_size, num_anchors, 6]. It could be the second output
out_tensor of get_valid_counts.
sorted_index : tvm.te.Tensor or numpy NDArray
Bounding box indexes sorted by score, with shape
[batch_size, num_anchors].
valid_count : tvm.te.Tensor or numpy NDArray
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices : tvm.te.Tensor or numpy.NDArray
indices in original tensor, with shape [batch_size, num_anchors],
represents the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the second
dimension are like the output of arange(num_anchors) if get_valid_counts
is not used before non_max_suppression.
batch_size: tvm.tir.IntImm or tvm.tir.Var
Batch size. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
num_anchors: tvm.tir.IntImm or tvm.tir.Var
The number of anchors.
max_output_size : tvm.te.Tensor
Max number of output valid boxes for each instance.
Return all valid boxes if max_output_size < 0.
iou_threshold : tvm.te.Tensor
Overlapping(IoU) threshold to suppress object with smaller score.
force_suppress : tvm.tir.const
Whether to suppress all detections regardless of class_id.
top_k : tvm.tir.const
Keep maximum top k detections before nms, -1 for no limit.
coord_start : tvm.tir.const
Start index of the consecutive 4 coordinates.
score_index: tvm.tir.const
Index of the scores/confidence of boxes.
id_index : tvm.tir.const
index of the class categories, -1 to disable.
return_indices : tvm.tir.const
Whether to return box indices in input data.
zero: tvm.tir.const
Constant zero with the same dtype as data.
one: tvm.tir.const
Constant one with the same dtype as data.
Returns
-------
output : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
box_indices: tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors].
"""
box_data_length = data.shape[2]
# box_indices is the expected indices of boxes
box_indices = output_tensor((batch_size, num_anchors), sorted_index.dtype)
output = output_tensor(
(
batch_size,
num_anchors,
box_data_length,
),
data.dtype,
)
for i in range(batch_size):
if iou_threshold > 0:
if valid_count[i] > 0:
# Reorder output
nkeep = valid_count[i]
if 0 < top_k < nkeep:
nkeep = top_k
for j in parallel(nkeep):
for k in range(box_data_length):
output[i, j, k] = data[i, sorted_index[i, j], k]
box_indices[i, j] = sorted_index[i, j]
if 0 < top_k < valid_count[i]:
for j in parallel(valid_count[i] - nkeep):
for k in range(box_data_length):
output[i, j + nkeep, k] = -one
box_indices[i, j + nkeep] = -1
# Apply nms
box_start_idx = coord_start
batch_idx = i
num_valid_boxes = 0
for j in range(valid_count[i]):
if num_valid_boxes == max_output_size:
for k in range(box_data_length):
output[i, j, k] = -one
box_indices[i, j] = -1
elif output[i, j, score_index] > 0:
box_a_idx = j
is_valid_box = 1
# a_l: left, a_t: top, a_r: right, a_b: bottom
a_l = min(
output[batch_idx, box_a_idx, box_start_idx],
output[batch_idx, box_a_idx, box_start_idx + 2],
)
a_t = min(
output[batch_idx, box_a_idx, box_start_idx + 1],
output[batch_idx, box_a_idx, box_start_idx + 3],
)
a_r = max(
output[batch_idx, box_a_idx, box_start_idx],
output[batch_idx, box_a_idx, box_start_idx + 2],
)
a_b = max(
output[batch_idx, box_a_idx, box_start_idx + 1],
output[batch_idx, box_a_idx, box_start_idx + 3],
)
# check if current box j is valid by calculating iou with
# all existing valid boxes
for k in range(j):
check_iou = 0
if (
is_valid_box == 1
and k < j
and output[i, k, score_index] > 0
and (id_index < 0 or output[i, k, id_index] >= 0)
):
if force_suppress:
check_iou = 1
elif id_index < 0 or output[i, j, id_index] == output[i, k, id_index]:
check_iou = 1
if check_iou > 0:
box_b_idx = k
# b_l: left, b_t: top, b_r: right, b_b: bottom
b_l = min(
output[batch_idx, box_b_idx, box_start_idx],
output[batch_idx, box_b_idx, box_start_idx + 2],
)
b_t = min(
output[batch_idx, box_b_idx, box_start_idx + 1],
output[batch_idx, box_b_idx, box_start_idx + 3],
)
b_r = max(
output[batch_idx, box_b_idx, box_start_idx],
output[batch_idx, box_b_idx, box_start_idx + 2],
)
b_b = max(
output[batch_idx, box_b_idx, box_start_idx + 1],
output[batch_idx, box_b_idx, box_start_idx + 3],
)
# Overlapping width and height
w = max(zero, min(a_r, b_r) - max(a_l, b_l))
h = max(zero, min(a_b, b_b) - max(a_t, b_t))
# Overlapping area
area = h * w
# total area of the figure formed by box a and box b
# except for overlapping area
u = (a_r - a_l) * (a_b - a_t) + (b_r - b_l) * (b_b - b_t) - area
# get the iou
iou = zero if u <= zero else area / u
if iou >= iou_threshold:
is_valid_box = 0
if is_valid_box == 0:
for k in range(box_data_length):
output[i, j, k] = -one
box_indices[i, j] = -1
else:
num_valid_boxes += 1
else:
for j in parallel(valid_count[i]):
for k in range(box_data_length):
output[i, j, k] = data[i, j, k]
box_indices[i, j] = j
# Set invalid entry to be -1
for j in parallel(num_anchors - valid_count[i]):
for k in range(box_data_length):
output[i, j + valid_count[i], k] = -one
box_indices[i, j + valid_count[i]] = -1
if return_indices:
for j in range(valid_count[i]):
idx = box_indices[i, j]
if box_indices[i, j] >= 0:
box_indices[i, j] = indices[i, idx]
return output, box_indices
@tvm.target.generic_func
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6] or [batch_size, num_anchors, 5].
valid_count : tvm.te.Tensor
1-D tensor for valid number of boxes.
indices : tvm.te.Tensor
2-D tensor with shape [batch_size, num_anchors].
max_output_size : optional, int or tvm.te.Tensor
Max number of output valid boxes for each instance.
Return all valid boxes if the value of max_output_size is less than 0.
iou_threshold : optional, float or tvm.te.Tensor
Non-maximum suppression threshold.
force_suppress : optional, boolean
Whether to suppress all detections regardless of class_id.
top_k : optional, int
Keep maximum top k detections before nms, -1 for no limit.
coord_start : required, int
Start index of the consecutive 4 coordinates.
score_index: optional, int
Index of the scores/confidence of boxes.
id_index : optional, int
index of the class categories, -1 to disable.
return_indices : optional, boolean
Whether to return box indices in input data.
invalid_to_bottom : optional, boolean
Whether to move all valid bounding boxes to the top.
Returns
-------
out : tvm.te.Tensor or tuple of tvm.te.Tensor
3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5]. Out is a tuple of tvm.te.Tensor
if return_indices is True, the Tensor in the tuple is 2-D tensor
with shape [batch_size, num_anchors] and shape
[batch_size, num_valid_anchors] respectively.
Example
--------
.. code-block:: python
# An example to use non_max_suppression
dshape = (1, 5, 6)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count")
iou_threshold = 0.7
force_suppress = True
top_k = -1
out = non_max_suppression(data, valid_count, indices, iou_threshold=iou_threshold,
force_suppress=force_suppress, top_k=top_k)
np_data = np.random.uniform(dshape)
np_valid_count = np.array([4])
s = topi.generic.schedule_nms(out)
f = tvm.build(s, [data, valid_count, out], "llvm")
dev = tvm.cpu()
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f(tvm_data, tvm_valid_count, tvm_out)
"""
batch_size = data.shape[0]
num_anchors = data.shape[1]
if isinstance(max_output_size, int):
max_output_size = tvm.tir.const(max_output_size, dtype="int32")
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.const(iou_threshold, dtype=data.dtype)
score_axis = score_index
score_shape = (batch_size, num_anchors)
score_tensor = te.compute(score_shape, lambda i, j: data[i, j, score_axis])
sort_tensor = argsort(score_tensor, valid_count=valid_count, axis=1, is_ascend=False)
out, box_indices = hybrid_nms(
data,
sort_tensor,
valid_count,
indices,
batch_size,
num_anchors,
max_output_size,
iou_threshold,
tvm.tir.const(force_suppress, dtype="bool"),
tvm.tir.const(top_k, dtype="int32"),
tvm.tir.const(coord_start, dtype="int32"),
tvm.tir.const(score_index, dtype="int32"),
tvm.tir.const(id_index, dtype="int32"),
tvm.tir.const(return_indices, dtype="bool"),
zero=tvm.tir.const(0, dtype=data.dtype),
one=tvm.tir.const(1, dtype=data.dtype),
)
if return_indices:
return hybrid_rearrange_indices_out(
box_indices,
one=tvm.tir.const(1, dtype="int32"),
batch_size=batch_size,
num_anchors=num_anchors,
)
if invalid_to_bottom:
out = hybrid_rearrange_box_out(
out,
one=tvm.tir.const(1, dtype=data.dtype),
batch_size=batch_size,
num_anchors=num_anchors,
)
return out
def _nms_loop(
ib,
batch_size,
top_k,
iou_threshold,
max_output_size,
valid_count,
on_new_valid_box_func,
on_new_invalidated_box_func,
needs_bbox_check_func,
calc_overlap_func,
out_scores,
num_valid_boxes,
):
def nms_inner_loop(ib, i, j, nkeep, num_valid_boxes_local):
# The box j is valid, invalidate other boxes that overlap with j above iou_threshold
on_new_valid_box_func(ib, 0, num_valid_boxes_local[0], i, j)
num_valid_boxes_local[0] += 1
num_boxes_to_check = nkeep - (j + 1)
with ib.for_range(0, num_boxes_to_check, name="_k", kind="parallel") as _k:
k = j + 1 + _k
with ib.if_scope(
tvm.tir.all(
k < nkeep,
out_scores[i, k] > 0, # is the box k still valid?
needs_bbox_check_func(i, j, k),
)
):
iou = calc_overlap_func(i, j, k)
with ib.if_scope(iou >= iou_threshold):
# invalidate the box k
out_scores[i, k] = -1.0
on_new_invalidated_box_func(i, k)
with ib.for_range(0, batch_size, name="i") as i:
nkeep = if_then_else(tvm.tir.all(top_k > 0, top_k < valid_count[i]), top_k, valid_count[i])
max_output_size = if_then_else(max_output_size > 0, max_output_size, nkeep)
with ib.if_scope(tvm.tir.all(iou_threshold > 0, valid_count[i] > 0)):
num_valid_boxes_local = ib.allocate(
"int32", (1,), name="num_valid_boxes_local", scope="local"
)
box_idx = ib.allocate("int32", (1,), name="box_idx", scope="local")
num_valid_boxes_local[0] = 0
box_idx[0] = 0
# Apply nms
# No need to do more iteration if we have already reached max_output_size boxes
with ib.while_loop(
tvm.tir.all(box_idx[0] < nkeep, num_valid_boxes_local[0] < max_output_size)
):
# Proceed to the inner loop if the box with id box_idx is still valid
with ib.if_scope(out_scores[i, box_idx[0]] > -1.0):
nms_inner_loop(ib, i, box_idx[0], nkeep, num_valid_boxes_local)
box_idx[0] += 1
num_valid_boxes[i] = num_valid_boxes_local[0]
with ib.else_scope():
num_valid_boxes[i] = 0
return ib.get()
def _get_valid_box_count(scores, score_threshold):
batch_classes, num_boxes = scores.shape
def searchsorted_ir(scores, valid_count):
ib = tvm.tir.ir_builder.create()
scores = ib.buffer_ptr(scores)
valid_count = ib.buffer_ptr(valid_count)
with ib.for_range(0, batch_classes, name="i", kind="parallel") as i:
binary_search(ib, i, num_boxes, scores, score_threshold, valid_count)
return ib.get()
scores_buf = tvm.tir.decl_buffer(scores.shape, scores.dtype, "scores_buf", data_alignment=8)
return te.extern(
[(batch_classes,)],
[scores],
lambda ins, outs: searchsorted_ir(ins[0], outs[0]),
dtype=["int32"],
in_buffers=[scores_buf],
name="searchsorted",
tag="searchsorted",
)
def _collect_selected_indices_ir(num_class, selected_indices, num_detections, row_offsets, out):
batch_classes, _ = selected_indices.shape
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
out = ib.buffer_ptr(out)
with ib.for_range(0, batch_classes, name="i", kind="parallel") as i:
i = cast(i, "int64")
batch_id = i // num_class
class_id = i % num_class
with ib.for_range(0, num_detections[i], name="j") as j:
out[row_offsets[i] + j, 0] = batch_id
out[row_offsets[i] + j, 1] = class_id
out[row_offsets[i] + j, 2] = cast(selected_indices[i, j], "int64")
return ib.get()
def _collect_selected_indices_and_scores_ir(
selected_indices,
selected_scores,
num_detections,
row_offsets,
num_total_detections,
collected_indices,
collected_scores,
):
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
ib = tvm.tir.ir_builder.create()
selected_indices = ib.buffer_ptr(selected_indices)
selected_scores = ib.buffer_ptr(selected_scores)
num_detections = ib.buffer_ptr(num_detections)
row_offsets = ib.buffer_ptr(row_offsets)
num_total_detections = ib.buffer_ptr(num_total_detections)
collected_indices = ib.buffer_ptr(collected_indices)
collected_scores = ib.buffer_ptr(collected_scores)
zero = cast(0, "int64")
with ib.for_range(0, batch_size * num_class, name="i", kind="parallel") as i:
i = cast(i, "int64")
batch_id = i // num_class
class_id = i % num_class
with ib.for_range(0, num_boxes, name="j") as j:
with ib.if_scope(j < num_detections[batch_id, class_id]):
offset = row_offsets[batch_id, class_id] + j
collected_indices[batch_id, offset, 0] = class_id
collected_indices[batch_id, offset, 1] = cast(selected_indices[i, j], "int64")
collected_scores[batch_id, offset] = selected_scores[i, j]
with ib.else_scope():
offset = (
num_total_detections[batch_id]
+ class_id * num_boxes
- row_offsets[batch_id, class_id]
+ j
- num_detections[batch_id, class_id]
)
collected_indices[batch_id, offset, 0] = zero
collected_indices[batch_id, offset, 1] = zero
collected_scores[batch_id, offset] = 0.0
return ib.get()
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
scores: tvm.te.Tensor
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
score_threshold : float or tvm.te.Tensor, optional
Score threshold to filter out low score boxes early
output_format : str, optional
"onnx" or "tensorflow", see below.
Returns
-------
out : list of tvm.te.Tensor
If `output_format` is "onnx", the output is two tensors. The first is `indices` of size
`(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor
`num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
batch, num_class, num_boxes = scores.shape
scores = reshape(scores, (batch * num_class, num_boxes))
sorted_indices = argsort(scores, axis=1, is_ascend=False, dtype="int32")
sorted_scores = gather(scores, 1, sorted_indices)
valid_count = _get_valid_box_count(sorted_scores, score_threshold)
selected_indices, selected_scores, num_detections = run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_boxes_per_class,
iou_threshold,
_nms_loop,
return_scores=(output_format == "tensorflow"),
)
if output_format == "onnx":
row_offsets = cumsum(num_detections, exclusive=True, dtype="int64")
num_total_detections = reduction.sum(cast(num_detections, "int64"), axis=1)
selected_indices = collect_selected_indices(
num_class, selected_indices, num_detections, row_offsets, _collect_selected_indices_ir
)
return [selected_indices, num_total_detections]
num_detections_per_batch = reshape(num_detections, (batch, num_class))
row_offsets = cumsum(num_detections_per_batch, exclusive=True, dtype="int64", axis=1)
num_total_detections = reduction.sum(cast(num_detections_per_batch, "int64"), axis=1)
selected_indices, selected_scores = collect_selected_indices_and_scores(
selected_indices,
selected_scores,
num_detections_per_batch,
row_offsets,
num_total_detections,
_collect_selected_indices_and_scores_ir,
)
return [selected_indices, selected_scores, num_total_detections]
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/nms_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common utilities used in Non-maximum suppression operators"""
import tvm
from tvm import te
def _get_boundaries(output, box_idx):
l = tvm.te.min(
output[box_idx],
output[box_idx + 2],
)
t = tvm.te.min(
output[box_idx + 1],
output[box_idx + 3],
)
r = tvm.te.max(
output[box_idx],
output[box_idx + 2],
)
b = tvm.te.max(
output[box_idx + 1],
output[box_idx + 3],
)
return l, t, r, b
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
a_l, a_t, a_r, a_b = _get_boundaries(out_tensor, box_a_idx)
b_l, b_t, b_r, b_b = _get_boundaries(out_tensor, box_b_idx)
# Overlapping width and height
w = tvm.te.max(0.0, tvm.te.min(a_r, b_r) - tvm.te.max(a_l, b_l))
h = tvm.te.max(0.0, tvm.te.min(a_b, b_b) - tvm.te.max(a_t, b_t))
# Overlapping area
area = h * w
# total area of the figure formed by box a and box b
# except for overlapping area
u = (a_r - a_l) * (a_b - a_t) + (b_r - b_l) * (b_b - b_t) - area
return tvm.tir.Select(u <= 0.0, 0.0, area / u)
def binary_search(ib, y, num_boxes, scores, score_threshold, out):
"""Binary search for score_threshold on scores sorted in descending order"""
lo = ib.allocate("int32", (1,), name="lo", scope="local")
hi = ib.allocate("int32", (1,), name="hi", scope="local")
lo[0] = 0
hi[0] = num_boxes
with ib.while_loop(lo[0] < hi[0]):
mid = (hi[0] + lo[0]) >> 1
with ib.if_scope(scores[y, mid] > score_threshold):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
out[y] = lo[0]
def collect_selected_indices(num_class, selected_indices, num_detections, row_offsets, ir):
"""Collect selected indices from the core NMS loop into one linear output
Parameters
----------
num_class : int
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the indices
of selected boxes by the core NMS loop.
num_detections tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), representing
the number of boxes selected by the core NMS loop, per batch and class
row_offsets tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), this should be the exclusive scan
of num_detections
ir : function
A function to generate IR for CPU or GPU, see its usage in vision/nms.py and cuda/nms.py
Returns
-------
out : tvm.te.Tensor
The output is indices of size (batch_size * num_class* num_boxes , 3).
Rows of indices are ordered such that selected boxes from batch 0, class 0 come
first, in descending of scores, followed by boxes from batch 0, class 1 etc.
"""
batch_class, num_boxes = selected_indices.shape
return te.extern(
[(batch_class * num_boxes, 3)],
[selected_indices, num_detections, row_offsets],
lambda ins, outs: ir(num_class, ins[0], ins[1], ins[2], outs[0]),
dtype=["int64"],
name="collect_indices",
tag="collect_indices",
)
def collect_selected_indices_and_scores(
selected_indices, selected_scores, num_detections, row_offsets, num_total_detections, ir
):
"""Collect selected indices and scores from the core NMS loop into one linear output
Parameters
----------
num_class : int
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the indices
of selected boxes by the core NMS loop.
selected_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes), representing the scores
of selected boxes by the core NMS loop.
num_detections tvm.te.Tensor
2-D tensor with shape (batch_size, num_classes), representing
the number of boxes selected by the core NMS loop, per batch and class
row_offsets tvm.te.Tensor
2-D tensor with shape (batch_size, num_classes), this should be the exclusive scan
of num_detections along axis 1
ir : function
A function to generate IR for CPU or GPU, see its usage in vision/nms.py and cuda/nms.py
Returns
-------
out : [tvm.te.Tensor, tvm.te.Tensor]
The output is two tensors. The first is indices of size
(batch_size, num_class* num_boxes, 2), and the second is scores of size
(batch_size, num_class* num_boxes).
"""
batch_size, num_class = row_offsets.shape
num_boxes = selected_indices.shape[1]
return te.extern(
[(batch_size, num_class * num_boxes, 2), (batch_size, num_class * num_boxes)],
[selected_indices, selected_scores, num_detections, row_offsets, num_total_detections],
lambda ins, outs: ir(ins[0], ins[1], ins[2], ins[3], ins[4], outs[0], outs[1]),
dtype=["int64", "float32"],
name="collect_indices_and_scores",
tag="collect_indices_and_scores",
)
def _all_class_nms_ir(
boxes,
sorted_scores,
sorted_indices,
valid_count,
batch_class,
num_class,
num_anchors,
iou_threshold,
max_output_size_per_class,
box_indices,
selected_scores,
num_valid_boxes,
nms_loop,
):
ib = tvm.tir.ir_builder.create()
boxes = ib.buffer_ptr(boxes)
sorted_scores = ib.buffer_ptr(sorted_scores)
sorted_indices = ib.buffer_ptr(sorted_indices)
valid_count = ib.buffer_ptr(valid_count)
box_indices = ib.buffer_ptr(box_indices)
num_valid_boxes = ib.buffer_ptr(num_valid_boxes)
if selected_scores is not None:
selected_scores = ib.buffer_ptr(selected_scores)
if isinstance(iou_threshold, float):
iou_threshold = tvm.tir.FloatImm("float32", iou_threshold)
if isinstance(max_output_size_per_class, int):
max_output_size_per_class = tvm.tir.const(max_output_size_per_class)
def calc_overlap(i, j, k):
offset_j = sorted_indices[i, j] * 4
offset_k = sorted_indices[i, k] * 4
batch_id = i // num_class
base_bbox_idx = batch_id * num_anchors * 4
return calculate_overlap(
boxes,
base_bbox_idx + offset_j,
base_bbox_idx + offset_k,
)
def on_new_valid_box(ib, tid, num_current_valid_box, i, j):
with ib.if_scope(tid + 0 == 0):
box_indices[i, num_current_valid_box] = sorted_indices[i, j]
if selected_scores is not None:
selected_scores[i, num_current_valid_box] = sorted_scores[i, j]
def on_new_invalidated_box(*_):
pass
def needs_bbox_check(*_):
return tvm.tir.const(True)
return nms_loop(
ib,
batch_class,
tvm.tir.IntImm("int32", -1), # top_k
iou_threshold,
max_output_size_per_class,
valid_count,
on_new_valid_box,
on_new_invalidated_box,
needs_bbox_check,
calc_overlap,
sorted_scores,
num_valid_boxes,
)
def run_all_class_nms(
boxes,
sorted_scores,
sorted_indices,
valid_count,
max_output_size_per_class,
iou_threshold,
nms_loop,
return_scores=False,
):
"""The core all class NMS routine
Parameters
----------
boxes : tvm.te.Tensor
3-D tensor with shape (batch_size, num_boxes, 4)
sorted_scores: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes)
One of the outputs from argsort
sorted_indices: tvm.te.Tensor
2-D tensor with shape (batch_size * num_classes, num_boxes)
The other output from argsort
valid_count: tvm.te.Tensor
1-D tensor with shape (batch_size * num_classes,), representing
the number of boxes whose score is above score_threshold, per batch and class
max_output_boxes_per_class : int or tvm.te.Tensor, optional
The maxinum number of output selected boxes per class
iou_threshold : float or tvm.te.Tensor, optionaIl
IoU test threshold
nms_loop : function
A core NMS loop, see its usage in vision/nms.py and cuda/nms.py
return_scores : bool, optional
Whether or not to return selected scores, needed by the tensorflow output format.
Returns
-------
out : a list of tvm.te.Tensor
The output is three tensors, the first and second are indices and scores of size
(batch_size * num_class, num_boxes), and the third is a tensor
num_selected_boxes of shape (batch_size * num_class,) representing the total number of
selected boxes per batch and class. If return_scores is False, the second output is
None.
"""
batch, num_boxes, _ = boxes.shape
batch_class = sorted_scores.shape[0]
num_class = batch_class // batch
if return_scores is False:
selected_indices, num_detections = te.extern(
[(batch_class, num_boxes), (1, batch_class)],
[boxes, sorted_scores, sorted_indices, valid_count],
lambda ins, outs: _all_class_nms_ir(
ins[0], # boxes
ins[1], # sorted_scores
ins[2], # sorted_indices
ins[3], # valid_count
batch_class,
num_class,
num_boxes,
iou_threshold,
max_output_size_per_class,
outs[0], # box_indices
None, # scores
outs[1], # num_selected_boxes
nms_loop,
),
dtype=["int32", "int32"],
name="all_class_nms",
tag="all_class_nms",
)
return selected_indices, None, num_detections
return te.extern(
[(batch_class, num_boxes), (batch_class, num_boxes), (1, batch_class)],
[boxes, sorted_scores, sorted_indices, valid_count],
lambda ins, outs: _all_class_nms_ir(
ins[0], # boxes
ins[1], # sorted_scores
ins[2], # sorted_indices
ins[3], # valid_count
batch_class,
num_class,
num_boxes,
iou_threshold,
max_output_size_per_class,
outs[0], # box_indices
outs[1], # selected scores
outs[2], # num_selected_boxes
nms_loop,
),
dtype=["int32", "float32", "int32"],
name="all_class_nms",
tag="all_class_nms",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/rcnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Faster R-CNN and Mask R-CNN operators"""
from .roi_align import *
from .roi_pool import *
from .proposal import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/rcnn/proposal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, singleton-comparison, bad-continuation
"""Proposal operator"""
import math
import tvm
from tvm import te
from ...utils import get_const_tuple, get_const_int
from ...sort import argsort
def generate_anchor(ratio, scale, base_size):
"""Generate anchor"""
w = h = float(base_size)
x_ctr = 0.5 * (w - 1.0)
y_ctr = 0.5 * (h - 1.0)
size = w * h
size_ratios = math.floor(size / ratio)
new_w = math.floor(math.sqrt(size_ratios) + 0.5) * scale
new_h = math.floor((new_w / scale * ratio) + 0.5) * scale
return (
x_ctr - 0.5 * (new_w - 1.0),
y_ctr - 0.5 * (new_h - 1.0),
x_ctr + 0.5 * (new_w - 1.0),
y_ctr + 0.5 * (new_h - 1.0),
)
def reg_bbox(x1, y1, x2, y2, dx, dy, dw, dh):
"""Bounding box regression function"""
bbox_w = x2 - x1 + 1.0
bbox_h = y2 - y1 + 1.0
ctr_x = x1 + 0.5 * (bbox_w - 1.0)
ctr_y = y1 + 0.5 * (bbox_h - 1.0)
pred_ctr_x = dx * bbox_w + ctr_x
pred_ctr_y = dy * bbox_h + ctr_y
pred_w = te.exp(dw) * bbox_w
pred_h = te.exp(dh) * bbox_h
pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0)
pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0)
pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0)
pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_x1, pred_y1, pred_x2, pred_y2
def reg_iou(x1, y1, x2, y2, dx1, dy1, dx2, dy2):
"""Bounding box regression function"""
pred_x1 = x1 + dx1
pred_y1 = y1 + dy1
pred_x2 = x2 + dx2
pred_y2 = y2 + dy2
return pred_x1, pred_y1, pred_x2, pred_y2
def predict_bbox_ir(
cls_prob_buf,
bbox_pred_buf,
im_info_buf,
out_buf,
scales,
ratios,
feature_stride,
rpn_min_size,
iou_loss,
):
"""Predict bounding boxes based on anchors, scores and deltas.
Parameters
----------
cls_prob_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred_buf : tvm.te.schedule.Buffer
4-D with shape [batch, 4 * num_anchors, height, width]
im_info_buf : tvm.te.schedule.Buffer
2-D with shape [batch, 3]
out_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]
The last dimension is in format of [w_start, h_start, w_end, h_end, score]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_anchors, height, width = get_const_tuple(cls_prob_buf.shape)
num_anchors //= 2
ib = tvm.tir.ir_builder.create()
p_score = ib.buffer_ptr(cls_prob_buf)
p_delta = ib.buffer_ptr(bbox_pred_buf)
p_im_info = ib.buffer_ptr(im_info_buf)
p_out = ib.buffer_ptr(out_buf)
idxm = tvm.tir.indexmod
idxd = tvm.tir.indexdiv
with ib.for_range(0, batch * height * width) as tid:
w = idxm(tid, width)
h = idxm(idxd(tid, width), height)
b = idxd(idxd(tid, width), height)
for k in range(num_anchors):
out_index = tid * num_anchors + k
ratio = ratios[k // len(scales)]
scale = scales[k % len(scales)]
anchor = generate_anchor(ratio, scale, feature_stride)
im_height = p_im_info[b * 3]
im_width = p_im_info[b * 3 + 1]
x1 = anchor[0] + w * feature_stride
y1 = anchor[1] + h * feature_stride
x2 = anchor[2] + w * feature_stride
y2 = anchor[3] + h * feature_stride
delta = [
p_delta[((((b * num_anchors + k) * 4 + i) * height + h) * width + w)]
for i in range(4)
]
regression_func = reg_iou if iou_loss else reg_bbox
pred_x1, pred_y1, pred_x2, pred_y2 = regression_func(x1, y1, x2, y2, *delta)
pred_x1 = tvm.te.max(tvm.te.min(pred_x1, im_width - 1.0), 0.0)
pred_y1 = tvm.te.max(tvm.te.min(pred_y1, im_height - 1.0), 0.0)
pred_x2 = tvm.te.max(tvm.te.min(pred_x2, im_width - 1.0), 0.0)
pred_y2 = tvm.te.max(tvm.te.min(pred_y2, im_height - 1.0), 0.0)
real_height = (im_height / feature_stride).astype("int32")
real_width = (im_width / feature_stride).astype("int32")
bbox_w = pred_x2 - pred_x1 + 1.0
bbox_h = pred_y2 - pred_y1 + 1.0
min_size = p_im_info[b * 3 + 2] * rpn_min_size
pred_score = p_score[((b * num_anchors * 2 + num_anchors + k) * height + h) * width + w]
pred_score = tvm.tir.Select(
tvm.tir.any(h >= real_height, w >= real_width), -1.0, pred_score
)
p_out[out_index * 5 + 0] = pred_x1
p_out[out_index * 5 + 1] = pred_y1
p_out[out_index * 5 + 2] = pred_x2
p_out[out_index * 5 + 3] = pred_y2
p_out[out_index * 5 + 4] = pred_score
with ib.if_scope(tvm.tir.any(bbox_w < min_size, bbox_h < min_size)):
p_out[out_index * 5 + 0] -= min_size / 2.0
p_out[out_index * 5 + 1] -= min_size / 2.0
p_out[out_index * 5 + 2] += min_size / 2.0
p_out[out_index * 5 + 3] += min_size / 2.0
p_out[out_index * 5 + 4] = -1.0
return ib.get()
def argsort_ir(data_buf, out_index_buf):
"""Batched odd-even transposition sort.
Parameters
----------
data_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]
out_index_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Indices of data in sorted order.
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox = get_const_tuple(data_buf.shape)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(data_buf)
index_out = ib.buffer_ptr(out_index_buf)
temp_data = ib.allocate("float32", (1,), name="temp_data", scope="local")
temp_index = ib.allocate("int32", (1,), name="temp_index", scope="local")
idxm = tvm.tir.indexmod
with ib.for_range(0, batch, kind="unroll") as b:
start = b * num_bbox
for i in range(2):
with ib.for_range(0, (num_bbox + 1) // 2) as tid:
bbox_id = tid * 2 + i
with ib.if_scope(bbox_id < num_bbox):
index_out[start + bbox_id] = bbox_id
with ib.for_range(0, num_bbox) as k:
with ib.for_range(0, (num_bbox + 1) // 2) as tid:
offset = start + 2 * tid + idxm(k, 2)
with ib.if_scope(
tvm.tir.all(offset + 1 < num_bbox, p_data[offset] < p_data[offset + 1])
):
temp_data[0] = p_data[offset]
p_data[offset] = p_data[offset + 1]
p_data[offset + 1] = temp_data[0]
temp_index[0] = index_out[offset]
index_out[offset] = index_out[offset + 1]
index_out[offset + 1] = temp_index[0]
return ib.get()
def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum suppression.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
nms_threshold : float
Non-maximum suppression threshold.
Returns
-------
stmt : Stmt
The result IR statement.
"""
def calculate_overlap(out_tensor, box_a_idx, box_b_idx):
"""Calculate overlap of two boxes."""
w = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 2], out_tensor[box_b_idx + 2])
- tvm.te.max(out_tensor[box_a_idx], out_tensor[box_b_idx])
+ 1.0,
)
h = tvm.te.max(
0.0,
tvm.te.min(out_tensor[box_a_idx + 3], out_tensor[box_b_idx + 3])
- tvm.te.max(out_tensor[box_a_idx + 1], out_tensor[box_b_idx + 1])
+ 1.0,
)
i = w * h
u = (
(out_tensor[box_a_idx + 2] - out_tensor[box_a_idx] + 1.0)
* (out_tensor[box_a_idx + 3] - out_tensor[box_a_idx + 1] + 1.0)
+ (out_tensor[box_b_idx + 2] - out_tensor[box_b_idx] + 1.0)
* (out_tensor[box_b_idx + 3] - out_tensor[box_b_idx + 1] + 1.0)
- i
)
return i / u
batch, num_bbox = get_const_tuple(out_buf.shape)
ib = tvm.tir.ir_builder.create()
p_data = ib.buffer_ptr(sorted_bbox_buf)
p_out = ib.buffer_ptr(out_buf)
with ib.for_range(0, batch, kind="unroll", name="n") as b:
base_idx = b * num_bbox
for i in range(num_bbox):
p_out[base_idx + i] = False
with ib.for_range(0, num_bbox - 1) as l:
with ib.for_range(0, num_bbox) as i:
with ib.if_scope(tvm.tir.all(i < num_bbox, i > l, p_out[base_idx + l] == False)):
iou = calculate_overlap(p_data, (base_idx + l) * 5, (base_idx + i) * 5)
with ib.if_scope(iou > nms_threshold):
p_out[base_idx + i] = True
return ib.get()
def prepare_output_ir(sorted_bbox_buf, remove_mask_buf, out_buf):
"""Copy output after applying nms to continuous memory.
Parameters
----------
sorted_bbox_buf : tvm.te.schedule.Buffer
3-D with shape [batch, num_bbox, 5]. The last dimension is in format of
[w_start, h_start, w_end, h_end, score].
remove_mask_buf : tvm.te.schedule.Buffer
2-D with shape [batch, num_bbox]. Boolean mask of whether a bounding box should be removed.
out_buf : tvm.te.schedule.Buffer
2-D with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
Returns
-------
stmt : Stmt
The result IR statement.
"""
batch, num_bbox, _ = get_const_tuple(sorted_bbox_buf.shape)
rpn_post_nms_top_n = get_const_int(out_buf.shape[0]) // batch
ib = tvm.tir.ir_builder.create()
i = ib.allocate("int32", (batch,), "i", scope="local")
p_sorted_bbox = ib.buffer_ptr(sorted_bbox_buf)
p_remove = ib.buffer_ptr(remove_mask_buf)
p_out = ib.buffer_ptr(out_buf)
nkeep = ib.allocate("int32", (batch,), "nkeep", scope="local")
with ib.for_range(0, batch) as b:
nkeep[b] = 0
i[b] = 0
with ib.for_range(0, num_bbox) as j:
with ib.for_range(0, batch) as b:
with ib.if_scope(p_remove[b * num_bbox + j] == False):
nkeep[b] += 1
with ib.for_range(0, batch) as b:
with ib.if_scope(nkeep[b] > 0):
with ib.for_range(
0, te.ceil(tvm.tir.const(rpn_post_nms_top_n, "float32") / nkeep[b]).astype("int32")
):
with ib.for_range(0, num_bbox) as j:
offset_j = (b * num_bbox + j) * 5
offset_i = (b * rpn_post_nms_top_n + i[b]) * 5
with ib.if_scope(
tvm.tir.all(
i[b] < rpn_post_nms_top_n, p_remove[(b * num_bbox + j)] == False
)
):
p_out[offset_i] = tvm.tir.Cast("float32", b)
with ib.for_range(0, 4, kind="unroll") as k:
p_out[offset_i + k + 1] = p_sorted_bbox[offset_j + k]
i[b] = i[b] + 1
body = ib.get()
return body
def proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
):
"""Proposal operator.
Parameters
----------
cls_prob : tvm.te.Tensor
4-D with shape [batch, 2 * num_anchors, height, width]
bbox_pred : tvm.te.Tensor
4-D with shape [batch, 4 * num_anchors, height, width]
im_info : tvm.te.Tensor
2-D with shape [batch, 3]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
out : tvm.te.Tensor
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
# pylint: disable=unused-argument
batch, _, height, width = get_const_tuple(cls_prob.shape)
num_anchors = len(scales) * len(ratios)
num_bbox = height * width * num_anchors
rpn_pre_nms_top_n = min(rpn_pre_nms_top_n, num_bbox) if rpn_pre_nms_top_n > 0 else num_bbox
bbox = te.extern(
(batch, num_bbox, 5),
[cls_prob, bbox_pred, im_info],
lambda ins, outs: predict_bbox_ir(
ins[0], ins[1], ins[2], outs[0], scales, ratios, feature_stride, rpn_min_size, iou_loss
),
dtype=bbox_pred.dtype,
)
score = te.compute((batch, num_bbox), lambda b, i: bbox[b, i, 4], tag="bbox_score")
valid_count_shape = (1,)
valid_count = te.compute(valid_count_shape, lambda i: num_bbox)
sorted_index = argsort(score, valid_count=valid_count, axis=1, is_ascend=False)
sorted_bbox = te.compute(
(batch, rpn_pre_nms_top_n, 5),
lambda b, i, j: bbox[b, sorted_index[b, i], j],
tag="sorted_bbox",
)
nms_remove_mask = te.extern(
(batch, rpn_pre_nms_top_n),
[sorted_bbox],
lambda ins, outs: nms_ir(ins[0], outs[0], threshold),
dtype="bool",
)
nms_out = te.extern(
(batch * rpn_post_nms_top_n, 5),
[sorted_bbox, nms_remove_mask],
lambda ins, outs: prepare_output_ir(ins[0], ins[1], outs[0]),
dtype=sorted_bbox.dtype,
)
return nms_out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/rcnn/roi_align.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Roi align operator"""
import tvm
from tvm import te
from ...utils import get_const_tuple
from ...cpp.utils import bilinear_sample_nchw, bilinear_sample_nhwc
def _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
bilinear_func,
):
roi = rois[i]
batch_index = roi[0].astype("int32")
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4]
roi_start_h *= spatial_scale
roi_end_h *= spatial_scale
roi_start_w *= spatial_scale
roi_end_w *= spatial_scale
# force malformed ROIs to be 1x1
roi_h = tvm.te.max(roi_end_h - roi_start_h, tvm.tir.const(1.0, dtype))
roi_w = tvm.te.max(roi_end_w - roi_start_w, tvm.tir.const(1.0, dtype))
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = tvm.tir.const(sample_ratio, "int32")
else:
roi_bin_grid_h = te.ceil(roi_h / pooled_size_h).astype("int32")
roi_bin_grid_w = te.ceil(roi_w / pooled_size_w).astype("int32")
count = roi_bin_grid_h * roi_bin_grid_w
rh = te.reduce_axis((0, roi_bin_grid_h), name="rh")
rw = te.reduce_axis((0, roi_bin_grid_w), name="rw")
roi_start_h += ph * bin_h
roi_start_w += pw * bin_w
if avg_mode:
return te.sum(
bilinear_func(
batch_index,
c,
roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h,
roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w,
)
/ count,
axis=[rh, rw],
)
# max mode
return te.max(
bilinear_func(
batch_index,
c,
roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h,
roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w,
),
axis=[rh, rw],
)
def roi_align_nchw(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : int or str
There are two modes, average and max. For the average mode, you can pass b'avg' or 0, and
for the max mode, you can pass b'max' or 1.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
avg_mode = mode in (b"avg", 0)
max_mode = mode in (b"max", 1)
assert avg_mode or max_mode, "Mode must be avg or max. Please pass in a valid mode."
dtype = rois.dtype
_, channel, height, width = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(i, c, y, x):
outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width)
y = tvm.te.min(tvm.te.max(y, 0.0), height - 1)
x = tvm.te.min(tvm.te.max(x, 0.0), width - 1)
val = bilinear_sample_nchw(data, (i, c, y, x), height - 1, width - 1)
return tvm.tir.if_then_else(outside, 0.0, val)
def _sample(i, c, ph, pw):
return _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
_bilinear,
)
return te.compute(
(num_roi, channel, pooled_size_h, pooled_size_w), _sample, tag="pool,roi_align_nchw"
)
def roi_align_nhwc(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NHWC layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, height, width, channel]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : int or str
There are two modes, average and max. For the average mode, you can pass b'avg' or 0, and
for the max mode, you can pass b'max' or 1.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, pooled_size, pooled_size, channel]
"""
avg_mode = mode in (b"avg", 0)
max_mode = mode in (b"max", 1)
assert avg_mode or max_mode, "Mode must be avg or max. Please pass in a valid mode."
dtype = rois.dtype
_, height, width, channel = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(i, c, y, x):
outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width)
y = tvm.te.min(tvm.te.max(y, 0.0), height - 1)
x = tvm.te.min(tvm.te.max(x, 0.0), width - 1)
val = bilinear_sample_nhwc(data, (i, y, x, c), height - 1, width - 1)
return tvm.tir.if_then_else(outside, 0.0, val)
def _sample(i, ph, pw, c):
return _sample_common(
i,
c,
ph,
pw,
rois,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
dtype,
avg_mode,
_bilinear,
)
return te.compute(
(num_roi, pooled_size_h, pooled_size_w, channel), _sample, tag="pool,roi_align_nchw"
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/rcnn/roi_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""ROI pool operator"""
import tvm
from tvm import te
from ...utils import get_const_tuple
def roi_pool_nchw(data, rois, pooled_size, spatial_scale):
"""ROI pool operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
dtype = rois.dtype
_, channel, height, width = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _pool(i, c, ph, pw):
roi = rois[i]
batch_index = roi[0].astype("int32")
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4]
roi_start_h = te.round(roi_start_h * spatial_scale).astype("int32")
roi_start_w = te.round(roi_start_w * spatial_scale).astype("int32")
roi_end_h = te.round(roi_end_h * spatial_scale).astype("int32")
roi_end_w = te.round(roi_end_w * spatial_scale).astype("int32")
# force malformed ROIs to be 1x1
roi_h = tvm.te.max(roi_end_h - roi_start_h + 1, tvm.tir.const(1, "int32"))
roi_w = tvm.te.max(roi_end_w - roi_start_w + 1, tvm.tir.const(1, "int32"))
bin_h = roi_h.astype(dtype) / pooled_size_h
bin_w = roi_w.astype(dtype) / pooled_size_w
# use epsilon to prevent floating point precision loss in floor/ceil
epsilon = tvm.tir.const(0.00001, dtype)
hstart = te.floor(ph * bin_h + epsilon).astype("int32")
wstart = te.floor(pw * bin_w + epsilon).astype("int32")
hend = te.ceil((ph + 1) * bin_h - epsilon).astype("int32")
wend = te.ceil((pw + 1) * bin_w - epsilon).astype("int32")
hstart = tvm.te.min(tvm.te.max(hstart + roi_start_h, 0), height)
wstart = tvm.te.min(tvm.te.max(wstart + roi_start_w, 0), width)
hend = tvm.te.min(tvm.te.max(hend + roi_start_h, 0), height)
wend = tvm.te.min(tvm.te.max(wend + roi_start_w, 0), width)
non_empty = tvm.tir.all(hstart < hend, wstart < wend)
min_value = lambda dtype: tvm.tir.if_then_else(
non_empty, tvm.te.min_value(dtype), tvm.tir.const(0.0, dtype)
)
# pylint: disable=unnecessary-lambda
_max = te.comm_reducer(lambda x, y: tvm.te.max(x, y), min_value, name="max")
rh = te.reduce_axis((0, hend - hstart), "rh")
rw = te.reduce_axis((0, wend - wstart), "rw")
return _max(data[batch_index, c, hstart + rh, wstart + rw], axis=[rh, rw])
return te.compute((num_roi, channel, pooled_size_h, pooled_size_w), _pool, tag="pool,roi_pool")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/reorg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
REORG Operator
====================
Reorg operator, used in darknet.
"""
from __future__ import absolute_import as _abs
from .. import cpp
def reorg(data, stride):
"""Reorg forward operators.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
stride : int
Stride value for reorganization
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return cpp.vision.reorg(data, stride)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/ssd/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""VISION network operators"""
from __future__ import absolute_import as _abs
from .multibox import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/vision/ssd/multibox.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable
"""SSD multibox operators"""
import tvm
from tvm.te import hybrid
from tvm.tir import exp, sqrt
from tvm import topi
from ..nms import non_max_suppression
@hybrid.script
def hybrid_multibox_prior(data, sizes, ratios, steps, offsets):
"""Hybrid routing for multibox_prior operator.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D tensor with shape [batch, channel, height, width]]
sizes : tvm ConsExpr
Sizes for anchor boxes.
ratios : tvm ConsExpr
Ratios for anchor boxes.
steps : tvm ConsExpr
Priorbox step across y and x, -1 for auto calculation.
offsets : tvm ConsExpr
Priorbox center offsets, y and x respectively.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
in_height = data.shape[2]
in_width = data.shape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
num_boxes = in_height * in_width * (num_sizes + num_ratios - 1)
output = output_tensor((1, num_boxes, 4), "float32")
steps_h = steps[0] * 1.0 if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] * 1.0 if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
# Need to define var out of const_range + if
w = 0.0
h = 0.0
for i in parallel(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in const_range(num_sizes + num_ratios - 1):
if k < num_sizes:
w = float32(sizes[k] * in_height) / in_width / 2.0
h = sizes[k] / 2.0
else:
w = (
float32(sizes[0] * in_height)
/ in_width
* sqrt(ratios[k - num_sizes + 1] * 1.0)
/ 2.0
)
h = sizes[0] / sqrt(ratios[k - num_sizes + 1] * 1.0) / 2.0
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
output[0, count, 0] = center_w - w
output[0, count, 1] = center_h - h
output[0, count, 2] = center_w + w
output[0, count, 3] = center_h + h
return output
def multibox_prior(data, sizes=(1,), ratios=(1,), steps=(-1, -1), offsets=(0.5, 0.5), clip=False):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, c_in, h_in, w_in]]
sizes : tuple of float
Tuple of sizes for anchor boxes.
ratios : tuple of float
Tuple of ratios for anchor boxes.
steps : Tuple of float
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int
Priorbox center offsets, y and x respectively.
clip : boolean
Whether to clip out-of-boundary boxes.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
out = hybrid_multibox_prior(
data,
tvm.runtime.convert(sizes),
tvm.runtime.convert(ratios),
tvm.runtime.convert(steps),
tvm.runtime.convert(offsets),
)
if clip:
out = topi.clip(out, 0, 1)
return out
@hybrid.script
def _hybridy_transform_loc(box, pred_loc, variance, clip):
"""Transform prior anchor box to output box through location predictions."""
al = box[0]
at = box[1]
ar = box[2]
ab = box[3]
px = pred_loc[0]
py = pred_loc[1]
pw = pred_loc[2]
ph = pred_loc[3]
vx = variance[0]
vy = variance[1]
vw = variance[2]
vh = variance[3]
output = output_tensor((4,), pred_loc.dtype)
aw = ar - al
ah = ab - at
ax = (al + ar) / 2.0
ay = (at + ab) / 2.0
ox = px * vx * aw + ax
oy = py * vy * ah + ay
ow = exp(pw * vw) * aw / 2.0
oh = exp(ph * vh) * ah / 2.0
output[0] = max(0.0, min(1.0, ox - ow)) if clip else ox - ow
output[1] = max(0.0, min(1.0, oy - oh)) if clip else oy - oh
output[2] = max(0.0, min(1.0, ox + ow)) if clip else ox + ow
output[3] = max(0.0, min(1.0, oy + oh)) if clip else oy + oh
return output
@hybrid.script
def hybrid_multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances):
"""Hybrid routing for transform location in multibox_detection operator.
Parameters
----------
cls_prob : tvm.te.Tensor or numpy NDArray
3-D tensor of class probabilities.
loc_pred : tvm.te.Tensor or numpy NDArray
2-D tensor of location regression predictions.
anchor : tvm.te.Tensor or numpy NDArray
3-D tensor of prior anchor boxes.
clip : tvm.tir.const
Whether to clip out-of-boundary boxes.
threshold : tvm.tir.const
Threshold to be a positive prediction.
variances : tvm.nd.NDArray
Variances to be decoded from box regression output.
Returns
-------
out_loc : tvm.te.Tensor or numpy NDArray
3-D tensor of transformed location.
valid_count : tvm.te.Tensor or numpy NDArray
1_d tensor of valid counts for boxes.
"""
batch_size = cls_prob.shape[0]
num_classes = cls_prob.shape[1]
num_anchors = cls_prob.shape[2]
box_coord = allocate((4,), loc_pred.dtype)
pred_coord = allocate((4,), loc_pred.dtype)
out_loc = output_tensor((batch_size, num_anchors, 6), loc_pred.dtype)
valid_count = output_tensor((batch_size,), "int32")
for i in parallel(batch_size):
valid_count[i] = 0
for j in range(num_anchors):
# Find the predicted class id and probability
score = -1.0
cls_id = 0
for k in range(num_classes):
if k > 0:
temp = cls_prob[i, k, j]
cls_id = k if temp > score else cls_id
score = max(temp, score)
if cls_id > 0 and score < threshold:
cls_id = 0
# [id, prob, xmin, ymin, xmax, ymax]
# Remove background, restore original id
if cls_id > 0:
out_loc[i, valid_count[i], 0] = cls_id - 1.0
out_loc[i, valid_count[i], 1] = score
for l in range(4):
box_coord[l] = anchor[0, j, l]
pred_coord[l] = loc_pred[i, j * 4 + l]
out_coord = _hybridy_transform_loc(box_coord, pred_coord, variances, clip)
out_loc[i, valid_count[i], 2] = out_coord[0]
out_loc[i, valid_count[i], 3] = out_coord[1]
out_loc[i, valid_count[i], 4] = out_coord[2]
out_loc[i, valid_count[i], 5] = out_coord[3]
valid_count[i] += 1
return out_loc, valid_count
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
Returns
-------
ret : tuple of tvm.te.Tensor
"""
return hybrid_multibox_transform_loc(
cls_prob,
loc_pred,
anchor,
tvm.tir.const(clip, "bool"),
tvm.tir.const(threshold, "float32"),
tvm.runtime.convert(variances),
)
def multibox_detection(
cls_prob,
loc_pred,
anchor,
clip=True,
threshold=0.01,
nms_threshold=0.5,
force_suppress=False,
variances=(0.1, 0.1, 0.2, 0.2),
nms_topk=-1,
):
"""Convert multibox detection predictions.
Parameters
----------
cls_prob : tvm.te.Tensor
Class probabilities.
loc_pred : tvm.te.Tensor
Location regression predictions.
anchor : tvm.te.Tensor
Prior anchor boxes.
clip : boolean
Whether to clip out-of-boundary boxes.
nms_threshold : float
Non-maximum suppression threshold.
force_suppress : boolean
Whether to suppress all detections regardless of class_id.
threshold : float
Threshold to be a positive prediction.
variances : tuple of float
Variances to be decoded from box regression output.
nms_topk : int
Keep maximum top k detections before nms, -1 for no limit.
Returns
-------
out : tvm.te.Tensor
3-D tensor with shape (batch_size, num_anchors, 6)
"""
inter_out = multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances)
out = non_max_suppression(
inter_out[0],
inter_out[1],
inter_out[1],
max_output_size=-1,
iou_threshold=nms_threshold,
force_suppress=force_suppress,
top_k=nms_topk,
return_indices=False,
)
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""x86 specific declaration and schedules."""
from __future__ import absolute_import as _abs
from .conv1d import *
from .conv2d import *
from .conv3d import *
from .binarize_pack import schedule_binarize_pack
from .binary_dense import schedule_binary_dense
from .nn import *
from .conv2d_int8 import *
from .injective import *
from .reduction import *
from .pooling import schedule_pool, schedule_adaptive_pool
from .bitserial_conv2d import *
from .bitserial_dense import *
from .depthwise_conv2d import *
from .dense import *
from .batch_matmul import *
from .roi_align import roi_align_nchw
from .conv2d_transpose import *
from .conv3d_transpose import *
from .sparse import *
from .conv2d_alter_op import *
from .dense_alter_op import *
from .scatter import *
from .group_conv2d import *
from .math_alter_op import *
from .concat import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable
"""x86 batch_matmul operators"""
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from tvm.contrib import cblas, mkl
from .. import generic, nn
from ..transform import layout_transform
from ..utils import get_const_tuple, get_max_power2_factor, traverse_inline
from .dense import dense_vnni_schedule
from .injective import schedule_injective_from_existing
@autotvm.register_topi_compute("batch_matmul_vnni.x86")
def batch_matmul_vnni_compute(cfg, x, y, *_):
"""Compute for uint8 x int8 -> int32 batch_matmul"""
batch, m, k = x.shape
packed_y_layout = "BNK16n4k"
packed_y = layout_transform(y, "BNK", packed_y_layout)
_, n_o, _, n_i, _ = packed_y.shape
ak = te.reduce_axis((0, k), name="k")
z = te.compute(
(batch, m, n_o * n_i),
lambda b, i, j: te.sum(
x[b, i, ak].astype("int32")
* packed_y[b, tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
tag="batch_matmul_vnni",
attrs={"schedule_rule": "batch_matmul_vnni"},
)
_, a_y, _ = z.op.axis
cfg.define_split("tile_y", a_y, num_outputs=2)
cfg.define_knob("layout_trans_compute_root", [0, 1])
return z
def batch_matmul_vnni_schedule(cfg, s, C, O, layout_trans):
"""Schedule batch_matmul compute using VNNI vpdpbusd instruction"""
# C: The output of batched GEMM
# O: The output of the fused op
# Schedule the GEMM part
s, fused_inner = dense_vnni_schedule(cfg, s, C, O, do_parallel=False)
# Parallelize over batch
fused = s[O].fuse(O.op.axis[0], fused_inner)
s[O].parallel(fused)
if cfg["layout_trans_compute_root"].val:
s[layout_trans].compute_root()
schedule_injective_from_existing(s, layout_trans)
else:
s[layout_trans].compute_at(s[O], fused)
_, _, _, ni, ki = s[layout_trans].op.axis
s[layout_trans].vectorize(ki)
s[layout_trans].unroll(ni)
return s
@autotvm.register_topi_compute("batch_matmul.x86")
def batch_matmul(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file.
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
if cfg.is_fallback:
if transpose_a:
_, K, M = get_const_tuple(tensor_a.shape)
else:
_, M, K = get_const_tuple(tensor_a.shape)
if transpose_b:
_, N, _ = get_const_tuple(tensor_b.shape)
else:
_, _, N = get_const_tuple(tensor_b.shape)
_default_batch_matmul_config(cfg, M, N, K)
return nn.batch_matmul(
tensor_a,
tensor_b,
out_shape,
out_dtype,
transpose_a,
transpose_b,
)
@autotvm.register_topi_schedule("batch_matmul.x86")
def schedule_batch_matmul(cfg, outs):
"""Schedule for batch_matmul
Parameters
----------
cfg : ConfigSpace
AutoTVM tuning space config file.
outs : Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "batch_matmul" in op.tag:
C = op.output(0)
A, B = op.input_tensors
if len(B.op.input_tensors) == 1 and B.op.input_tensors[0] == A:
s[B].compute_inline()
_, M, K = get_const_tuple(A.shape)
_, _, N = get_const_tuple(C.shape)
if op not in s.outputs:
s[C].compute_inline()
O = outs[0]
else:
O = C
CC = s.cache_write(C, "global")
# create tuning space
cfg.define_split("tile_y", M, num_outputs=2)
cfg.define_split("tile_x", N, num_outputs=2)
cfg.define_split("tile_k", K, num_outputs=2)
b, y, x = s[O].op.axis
yo, yi = cfg["tile_y"].apply(s, O, y)
xo, xi = cfg["tile_x"].apply(s, O, x)
s[O].reorder(b, yo, xo, yi, xi)
bxyo = s[O].fuse(b, yo, xo)
s[O].parallel(bxyo)
s[CC].compute_at(s[O], bxyo)
(k,) = s[CC].op.reduce_axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
Crf = s.rfactor(CC, ki)
s[Crf].compute_at(s[CC], s[CC].op.axis[0])
_, _, y, x = s[Crf].op.axis
s[Crf].fuse(y, x)
s[Crf].vectorize(s[Crf].op.axis[0])
s[O].pragma(bxyo, "auto_unroll_max_step", 16)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("batch_matmul_vnni.x86")
def schedule_batch_matmul_vnni(cfg, outs):
"""Schedule for batch_matmul_vnni"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "batch_matmul_vnni" in op.tag:
layout_trans = op.input_tensors[1]
batch_matmul_vnni_schedule(cfg, s, op.output(0), outs[0], layout_trans)
traverse_inline(s, outs[0].op, _callback)
return s
def _default_batch_matmul_config(cfg, M, N, K):
cfg["tile_k"] = SplitEntity([K // 16, 16])
x_bn = get_max_power2_factor(N, 8)
cfg["tile_x"] = SplitEntity([N // x_bn, x_bn])
y_bn = get_max_power2_factor(M, 8)
cfg["tile_y"] = SplitEntity([M // y_bn, y_bn])
def batch_matmul_blas_common(cfg, tensor_a, tensor_b, out_shape, trans_a, trans_b, lib):
"""Computes batch matrix multiplication of `tensor_a` and `tensor_b` when `tensor_a` and
`tensor_b` are data in batch, using one of BLAS libraries. Supports broadcasting in batch
dimension.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M].
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K].
out_shape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
trans_a : Optional[bool] = False
Whether the first tensor is in transposed format.
trans_b : Optional[bool] = True
Whether the second tensor is in transposed format.
lib : A contrib module which implements batch_matmul function
cblas and mkl are supported
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3 and len(tensor_b.shape) == 3, "only support 3-dim batch_matmul"
if trans_a:
XB, XK, M = get_const_tuple(tensor_a.shape)
else:
XB, M, XK = get_const_tuple(tensor_a.shape)
if trans_b:
YB, N, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, N = get_const_tuple(tensor_a.shape)
assert (XB == YB) or (YB == 1) or (XB == 1), "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistent"
if out_shape is not None:
assert out_shape[0] in (XB, YB), "got invalid output shape"
assert out_shape[1] == M, "got invalid output shape"
assert out_shape[2] == N, "got invalid output shape"
cfg.add_flop(XB * M * N * XK * 2)
return lib.batch_matmul(tensor_a, tensor_b, trans_a, trans_b)
@autotvm.register_topi_compute("batch_matmul_cblas.x86")
def batch_matmul_cblas(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch_matmul using cblas"""
del out_dtype # Unused argument
return batch_matmul_blas_common(
cfg, tensor_a, tensor_b, out_shape, transpose_a, transpose_b, cblas
)
@autotvm.register_topi_schedule("batch_matmul_cblas.x86")
def schedule_batch_matmul_cblas(_, outs):
"""Create schedule for batch_matmul_cblas"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("batch_matmul_mkl.x86")
def batch_matmul_mkl(
cfg, tensor_a, tensor_b, out_shape=None, out_dtype=None, transpose_a=False, transpose_b=True
):
"""Compute batch_matmul using mkl"""
del out_dtype # Unused argument
return batch_matmul_blas_common(
cfg, tensor_a, tensor_b, out_shape, transpose_a, transpose_b, mkl
)
@autotvm.register_topi_schedule("batch_matmul_mkl.x86")
def schedule_batch_matmul_mkl(_, outs):
"""Create schedule for batch_matmul_mul"""
return generic.schedule_extern(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/binarize_pack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Schedule for binarization and bit-packing."""
from tvm import te
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack.
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for binarize_pack.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(Out):
s[Out].parallel(Out.op.axis[0])
def traverse(OP):
# schedule binarize_pack
if OP.tag == "binarize_pack":
Out = OP.output(0)
_schedule(Out)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/binary_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for binary dense operator."""
from tvm import te
from .. import tag
def schedule_binary_dense(outs):
"""Schedule for binary_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for binary_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(A, B, C):
s[C].split(s[C].op.reduce_axis[0], factor=8)
s[C].parallel(s[C].op.axis[0])
if C.op in s.outputs:
Out = C
else:
Out = outs[0].op.output(0)
xo, xi = s[Out].split(Out.op.axis[1], factor=8)
s[Out].vectorize(xi)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule binary_dense
elif OP.tag == "binary_dense":
output = OP.output(0)
data = OP.input_tensors[0]
weight = OP.input_tensors[1]
_schedule(data, weight, output)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,invalid-name
"""Bitserial conv2d schedule on x86"""
import tvm
from tvm import te
from tvm import autotvm
from .. import tag
from ..utils import get_const_int, get_const_tuple
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_conv2d_nchw.x86")
def bitserial_conv2d_nchw(
cfg,
data,
kernel,
stride,
padding,
in_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype)
# Check if kernel is already bitpacked
if len(kernel.shape) == 4:
kernel_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype)
KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape)
else:
kernel_vec = kernel
OCO, _, KH, KW, KB, VC = get_const_tuple(kernel_vec.shape)
CO = OCO * VC
IB, N, CI, H, W = get_const_tuple(data_q.shape)
KB, CO, _, KH, KW = get_const_tuple(kernel_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, 0, 0, TPAD, LPAD]
pad_after = [0, 0, 0, DPAD, RPAD]
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
TH = H + TPAD + DPAD
TW = W + LPAD + RPAD
OH = (H + TPAD + DPAD - KH) // HSTR + 1
OW = (W + LPAD + RPAD - KW) // WSTR + 1
# ==================== define configuration space ====================
n, co, oh, ow = cfg.axis(N), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
cfg.define_annotate("ann_reduce", [ib, kb, kh, kw], policy="try_unroll")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, vc, vh, vw, kh, kw, kb, ib, ci],
policy="interval_all",
interval=(6, 11),
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (1, TH // (VH * HSTR), TW // (VW * WSTR), CI, VH * HSTR + HCAT, VW * WSTR + WCAT, IB)
kvshape = (CO // VC, CI, KH, KW, KB, VC)
ovshape = (1, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (1, CO, OH, OW)
if TPAD != 0 and RPAD != 0:
data_pad = pad(data_q, pad_before, pad_after, name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw, b: data_pad[b][n][ci][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if len(kernel.shape) == 4:
kernel_vec = te.compute(
kvshape,
lambda co, ci, dh, dw, b, vc: kernel_q[b][co * VC + vc][ci][dh][dw],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
b1 = te.reduce_axis((0, IB), name="ib")
b2 = te.reduce_axis((0, KB), name="kb")
def _conv(n, co, h, w, vh, vw, vc):
b1b2 = (b1 + b2).astype(out_dtype)
if unipolar:
return te.sum(
(
tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1].astype(out_dtype)
& kernel_vec[co, ci, dh, dw, b2, vc].astype(out_dtype)
)
- tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1].astype(out_dtype)
& ~kernel_vec[co, ci, dh, dw, b2, vc]
).astype(out_dtype)
)
<< b1b2,
axis=[ci, dh, dw, b1, b2],
)
return te.sum(
(
tvm.tir.popcount(
data_vec[n, h, w, ci, vh * HSTR + dh, vw * WSTR + dw, b1]
& kernel_vec[co, ci, dh, dw, b2, vc]
)
).astype(out_dtype)
<< b1b2,
axis=[ci, dh, dw, b1, b2],
)
conv = te.compute(ovshape, _conv, name="conv_out")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
return te.compute(
oshape,
lambda n, co, h, w: conv[
n, idxd(co, VC), idxd(h, VH), idxd(w, VW), idxm(h, VH), idxm(w, VW), idxm(co, VC)
],
name="conv_vec",
tag="spatial_bitserial_conv_nchw",
)
@autotvm.register_topi_compute("bitserial_conv2d_nhwc.x86")
def bitserial_conv2d_nhwc(
cfg,
data,
kernel,
stride,
padding,
in_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
pack_kernel = len(kernel.shape) == 4
if pack_kernel:
kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
else:
kernel_q = kernel
KH, KW, _, CO, KB = get_const_tuple(kernel_q.shape)
N, H, W, CI, IB = get_const_tuple(data_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, TPAD, LPAD, 0, 0]
pad_after = [0, DPAD, RPAD, 0, 0]
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
PAD_H = H + (TPAD + DPAD)
PAD_W = W + (LPAD + RPAD)
OH = (PAD_H - KH) // HSTR + 1
OW = (PAD_W - KW) // WSTR + 1
oshape = (1, OH, OW, CO)
# ==================== define configuration space ====================
n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(in_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: max(x.size[1:]) <= 16)
cfg.define_annotate("ann_reduce", [ib, kb, kh, kw], policy="try_unroll")
cfg.define_reorder(
"reorder_0",
[n, oh, ow, co, vh, vw, kh, kw, kb, ib, vc, ci],
policy="interval_all",
interval=(3, 7),
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (
1,
PAD_H // (VH * HSTR),
PAD_W // (VW * WSTR),
VH * HSTR + HCAT,
VW * WSTR + WCAT,
CI,
IB,
)
kvshape = (CO, KH, KW, CI, VC, KB)
ovshape = (1, OH, OW, CO, VH, VW, VC)
oshape = (1, OH, OW, CO)
if DPAD != 0 and RPAD != 0:
data_pad = pad(data_q, pad_before, pad_after, name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, vh, vw, ci, b: data_pad[n][h * VH * HSTR + vh][w * VW * WSTR + vw][ci][b],
name="data_vec",
)
kernel_vec = te.compute(
kvshape,
lambda co, dh, dw, ci, vc, b: kernel_q[dh][dw][ci][co * VC + vc][b],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
b1 = te.reduce_axis((0, IB), name="ib")
b2 = te.reduce_axis((0, KB), name="kb")
def _conv(n, h, w, co, vh, vw, vc):
b1b2 = (b1 + b2).astype(out_dtype)
if unipolar:
return te.sum(
(
(
tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
- tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& ~kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
)
<< b1b2
),
axis=[dh, dw, ci, b1, b2],
)
return te.sum(
tvm.tir.popcount(
data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ci, b1]
& kernel_vec[co, dh, dw, ci, vc, b2]
).astype(out_dtype)
<< b1b2,
axis=[dh, dw, ci, b1, b2],
)
conv = te.compute(ovshape, _conv, name="conv")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
return te.compute(
oshape,
lambda n, h, w, co: conv[
n, idxd(h, VH), idxd(w, VW), idxd(co, VC), idxm(h, VH), idxm(w, VW), idxm(co, VC)
],
name="output_unpack",
tag="spatial_bitserial_conv_nhwc",
)
@autotvm.register_topi_schedule("bitserial_conv2d_nchw.x86")
def schedule_bitserial_conv2d_nchw(cfg, outs):
return _schedule_bitserial_conv2d(cfg, outs)
@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.x86")
def schedule_bitserial_conv2d_nhwc(cfg, outs):
return _schedule_bitserial_conv2d(cfg, outs)
def _schedule_bitserial_conv2d(cfg, outs):
"""CPU schedule for bitserial convolutions NCHW and NHWC"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
output = op.output(0)
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and (tensor.op not in scheduled_ops):
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif "spatial_bitserial_conv_nchw" in op.tag or "spatial_bitserial_conv_nhwc" in op.tag:
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel_q = kernel_vec.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[0]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, tvm.te.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data_q.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
# Need to go up 1 further, from the combine in bitpack
data = data.op.input_tensors[0]
if "spatial_bitserial_conv_nchw" in op.tag:
_schedule_bitserial_conv2d_nchw(
cfg,
s,
data_q,
data_pad,
data_vec,
kernel_q,
kernel_vec,
conv_out,
output,
outs[0],
)
elif "spatial_bitserial_conv_nhwc" in op.tag:
_schedule_bitserial_conv2d_nhwc(
cfg,
s,
data_q,
data_pad,
data_vec,
kernel_q,
kernel_vec,
conv_out,
output,
outs[0],
)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_bitserial_conv2d_nchw(
cfg, s, data_q, data_pad, data_vec, kernel_q, kernel_vec, conv_out, output, last
):
IB, _, CI, IH, IW = data_q.shape
KB, CO, _, KH, KW = kernel_q.shape
_, _, OH, OW = output.shape
# Infer padding and stride
if data_pad is None:
padding = (0, 0)
TH, TW = IH, IW
else:
_, _, _, TH, TW = data_pad.shape
hpad = get_const_int((TH - IH) // 2)
wpad = get_const_int((TW - IW) // 2)
padding = (hpad, wpad)
hstride = get_const_int((TH - KH) // (OH - 1))
wstride = get_const_int((TW - KW) // (OW - 1))
stride = (hstride, wstride)
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule Data padding, and bitpacking
if data_pad is not None:
s[data_pad].compute_inline()
_, _, h, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
if cfg["tile_ah"].size[1] == 1:
oaxis = oh
paxis = oh
else:
oaxis = oh
paxis = ih
s[data_vec].parallel(paxis)
s[data_vec].pragma(oaxis, "parallel_launch_point")
s[data_vec].pragma(paxis, "parallel_stride_pattern")
s[data_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Kenerl bitpacking
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
if cfg["tile_bco"].size[1] == 1:
oaxis = oco
paxis = oco
else:
oaxis = oco
paxis = ico
s[kernel_vec].parallel(paxis)
s[kernel_vec].pragma(oaxis, "parallel_launch_point")
s[kernel_vec].pragma(paxis, "parallel_stride_pattern")
s[kernel_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Convolution
n, co, oh, ow, vh, vw, vc = s[conv_out].op.axis
ci, dh, dw, ib, kb = s[conv_out].op.reduce_axis
# s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2)
cfg["reorder_0"].apply(s, conv_out, [n, co, oh, ow, vc, vh, vw, dh, dw, kb, ib, ci])
cfg["ann_reduce"].apply(
s,
conv_out,
[kb, ib, dh, dw],
axis_lens=[
get_const_int(kb.dom.extent),
get_const_int(ib.dom.extent),
get_const_int(dh.dom.extent),
get_const_int(dw.dom.extent),
],
max_unroll=16,
cfg=cfg,
)
s[conv_out].vectorize(vc)
# # Schedule output
n, co, h, w = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
oco, ico = cfg["tile_oh"].apply(s, last, co)
if cfg["tile_oh"].size[1] == 1:
oaxis = oco
paxis = oco
else:
oco, ico = s[last].split(co, bc)
oaxis = oco
paxis = ico
s[last].parallel(oco)
return s
def _schedule_bitserial_conv2d_nhwc(
cfg, s, data_q, data_pad, data_vec, kernel_q, kernel_vec, conv_out, output, last
):
# no stride and padding info here
_, IH, IW, CI, IB = data_q.shape
KH, KW, _, CO, KB = kernel_q.shape
_, OH, OW, _ = output.shape
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule data padding and packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
s[data_vec].parallel(oh)
##### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
s[kernel_vec].parallel(oco)
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
dh, dw, ci, b1, b2 = s[conv_out].op.reduce_axis
# s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2)
cfg["reorder_0"].apply(s, conv_out, [n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2])
cfg["ann_reduce"].apply(
s,
conv_out,
[b1, b2, dh, dw],
axis_lens=[
get_const_int(b1.dom.extent),
get_const_int(b2.dom.extent),
get_const_int(dh.dom.extent),
get_const_int(dw.dom.extent),
],
max_unroll=16,
cfg=cfg,
)
s[conv_out].unroll(b1)
s[conv_out].unroll(b2)
s[conv_out].vectorize(vc)
# # Schedule output
n, h, w, co = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
oho, iho = cfg["tile_oh"].apply(s, last, oh) # reuse parameter
s[last].parallel(oho)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Schedule for bitserial dense operator."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.utils import get_const_int, get_const_tuple
from .. import tag
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_dense.x86")
def bitserial_dense(
cfg, data, weight, data_bits, weight_bits, pack_dtype="uint32", out_dtype="int16", unipolar=True
):
"""Bitserial dense implementation. TODO: Why are these separate
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim] or
3-D with shape [out_dim, weight_bits, in_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
if len(weight.shape) == 2:
weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
else:
weight_packed = weight
Y, DB, K = get_const_tuple(data_packed.shape)
X, WB, _ = get_const_tuple(weight_packed.shape)
######## Search space
x, y = cfg.axis(X), cfg.axis(Y)
db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(K)
ko, ki = cfg.define_split("tile_k", k, num_outputs=2)
yo, yi = cfg.define_split("tile_y", y, num_outputs=2)
xo, xi = cfg.define_split("tile_x", x, num_outputs=2)
cfg.define_reorder(
"reorder_0",
[yo, xo, ko, yi, wb, db, ki, xi],
policy="candidate",
candidate=[[yo, xo, ko, yi, wb, db, ki, xi], [yo, xo, yi, ko, wb, db, ki, xi]],
)
cfg.define_annotate("ann_reduce", [db, wb], policy="try_unroll")
cfg.define_annotate("ann_spatial", [yi, xi], policy="try_unroll_vec")
###### Compute rule
VX = cfg["tile_x"].size[-1]
wvshape = (X // VX, WB, VX, K)
oshape = (Y, X)
k = te.reduce_axis((0, K), name="k")
db = te.reduce_axis((0, DB), name="db")
wb = te.reduce_axis((0, WB), name="wb")
# Tile data and weights
weight_vec = te.compute(
wvshape, lambda xo, wb, vx, k: weight_packed[xo * VX + vx][wb][k], name="weight_vec"
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
matmul_unipolar = te.compute(
oshape,
lambda i, j: te.sum(
(
tvm.tir.popcount(
weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
)
- tvm.tir.popcount(
~weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
)
).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense_unipolar",
)
matmul = te.compute(
oshape,
lambda i, j: te.sum(
tvm.tir.popcount(
weight_vec[idxdiv(j, VX), wb, idxmod(j, VX), k] & data_packed[i, db, k]
).astype(out_dtype)
<< (db + wb).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense",
)
# binary ops
cfg.add_flop(2 * Y * X * K * binary_op_multiplier(pack_dtype))
if unipolar:
return matmul_unipolar
return matmul
@autotvm.register_topi_schedule("bitserial_dense.x86")
def schedule_bitserial_dense(cfg, outs):
"""Schedule for bitserial_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial dense operator.
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for bitserial_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data_vec, weight_vec, output):
s[data_vec].parallel(s[data_vec].op.axis[0])
s[weight_vec].parallel(s[weight_vec].op.axis[0])
y, x = s[output].op.axis
wb, db, k = s[output].op.reduce_axis
yo, yi = cfg["tile_y"].apply(s, output, y)
xo, xi = cfg["tile_x"].apply(s, output, x)
ko, ki = cfg["tile_k"].apply(s, output, k)
cfg["reorder_0"].apply(s, output, [yo, xo, ko, yi, wb, db, ki, xi])
cfg["ann_reduce"].apply(
s,
output,
[db, wb],
axis_lens=[get_const_int(db.dom.extent), get_const_int(wb.dom.extent)],
max_unroll=8,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
output,
[yi, xi],
axis_lens=[cfg["tile_y"].size[-1], cfg["tile_x"].size[-1]],
max_unroll=8,
cfg=cfg,
)
s[output].vectorize(xi)
s[output].parallel(yo)
return s
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar":
output = op.output(0)
weight_vec = op.input_tensors[0]
data_vec = op.input_tensors[1]
data = data_vec.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
data = data.op.input_tensors[0]
_schedule(cfg, s, data_vec, weight_vec, output)
else:
raise RuntimeError("Unsupported operator: %s" % op.tag)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/concat.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"concatenate related operators"
from typing import Optional
import tvm
from tvm import te
import numpy as np
from ..utils import get_const_int
def concatenate(data: tvm.te.Tensor, axis: Optional[int] = 0):
"""Join a sequence of arrays along an existing axis.
Optimized for CPU execution.
Parameters
----------
data : tuple of tvm.te.Tensor
The arrays to concatenate
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ret : tvm.te.Tensor
"""
in_outers = [int(np.prod(i.shape[axis:])) for i in data]
in_outers_cumsum = [0, *np.cumsum(in_outers, dtype="int64")[0:-1]]
def gen_ir_1d(data_bufs, out_buf):
"""Custom concatenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
for i in range(len(data)):
with i_b.for_range(0, in_outers[i], name="j") as j:
out_buf[in_outers_cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
def gen_ir(data_bufs, out_buf, inner, outer):
"""Common case of concatenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
if inner > 1:
with i_b.for_range(0, inner, name="inn", kind="parallel") as inn:
pos = inn * outer
for i in range(len(data)):
offset = inn * in_outers[i]
with i_b.for_range(0, in_outers[i], name="j") as j:
out_buf[pos + in_outers_cumsum[i] + j] = data_bufs1[i][offset + j]
else:
for i in range(len(data)):
with i_b.for_range(0, in_outers[i], name="j", kind="parallel") as j:
out_buf[in_outers_cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
if axis < 0:
axis += len(data[0].shape)
concat_axis_sizes = [int(t.shape[axis]) for t in data]
join_size = int(np.sum(concat_axis_sizes))
dtype = data[0].dtype
out_shape = data[0].shape[:axis] + [join_size] + data[0].shape[axis + 1 :]
right_val = np.prod(out_shape[axis:])
left_val = np.prod(out_shape[:axis])
if (
len(data[0].shape) == 1
or (left_val == 1 and axis == len(data[0].shape) - 1)
or (left_val == 1 and right_val == 1)
):
# badly parallelized case
return te.extern(
[out_shape],
list(data),
lambda ins, outs: gen_ir_1d(ins, outs[0]),
dtype=dtype,
name="concatenate_ext",
)
inner = get_const_int(int(left_val))
outer = get_const_int(int(right_val))
return te.extern(
[out_shape],
list(data),
lambda ins, outs: gen_ir(ins, outs[0], inner, outer),
dtype=dtype,
name="concatenate_ext",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""Conv1D schedule on for Intel CPU"""
from tvm import te
from .. import tag
def schedule_conv1d_ncw(outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 3: # schedule bias + bn + relu
n, c, w = op.axis
fused = s[op].fuse(n, c)
s[op].parallel(fused)
s[op].vectorize(w)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv1d_ncw" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, te.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, c_pad, w_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, c_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, c, w = C.op.axis
rc, rw = C.op.reduce_axis
n_out, c_out, w_out = output_op.axis
s[C].vectorize(w)
if op != output_op: # fuse bias + bn + relu into conv
s[C].compute_at(s[output_op], w_out)
else:
fused = s[C].fuse(n, c)
s[C].parallel(fused)
scheduled_ops.append(op)
traverse(output_op)
return s
def schedule_conv1d_nwc(outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 3: # schedule bias + bn + relu
n, w, c = op.axis
fused = s[op].fuse(n, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv1d_nwc" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, te.tensor.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, w_pad, c_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, w_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, w, c = C.op.axis
rc, rw = C.op.reduce_axis
n_out, w_out, c_out = output_op.axis
s[C].vectorize(c)
if op != output_op: # fuse bias + bn + relu into conv
s[C].compute_at(s[output_op], c_out)
else:
fused = s[C].fuse(n, w)
s[C].parallel(fused)
scheduled_ops.append(op)
traverse(output_op)
return s
def schedule_group_conv1d_ncw(outs):
return schedule_conv1d_ncw(outs)
def schedule_group_conv1d_nwc(outs):
return schedule_conv1d_nwc(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Conv2D schedule on x86"""
import logging
import tvm
from tvm import te
from tvm import autotvm
from tvm.contrib import dnnl
from .. import nn
from ..generic import schedule_extern
from ..nn.conv2d import conv2d_infer_layout, _get_workload as _get_conv2d_workload
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from ..nn.utils import get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from . import conv2d_avx_1x1, conv2d_avx_common
logger = logging.getLogger("topi")
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, out_dtype, is_depthwise=False, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
if is_depthwise:
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_avx_1x1._fallback_schedule(cfg, wkl)
else:
conv2d_avx_common._fallback_schedule(cfg, wkl)
@conv2d_infer_layout.register("cpu")
def _conv2d_infer_layout(workload, cfg):
_, data, kernel, strides, padding, dilation, layout, _, dtype = workload
batch_size, in_channel, in_height, in_width = data[1]
out_channel, _, k_height, k_width = kernel[1]
idxdiv = tvm.tir.indexdiv
pt, pl, pb, pr = get_pad_tuple(padding, (k_height, k_width))
hdilation, wdilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (k_height - 1) * hdilation + 1
dilated_kernel_w = (k_width - 1) * wdilation + 1
out_height = idxdiv(in_height + pt + pb - dilated_kernel_h, strides[0]) + 1
out_width = idxdiv(in_width + pl + pr - dilated_kernel_w, strides[1]) + 1
tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
in_shape = (batch_size, idxdiv(in_channel, tile_ic), in_height, in_width, tile_ic)
in_layout = "NCHW%dc" % tile_ic
out_shape = (batch_size, idxdiv(out_channel, tile_oc), out_height, out_width, tile_oc)
out_layout = "NCHW%dc" % tile_oc
return ((in_shape, in_layout),), ((out_shape, out_layout),)
def schedule_conv2d_nhwc(outs):
"""Create schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
def _callback(op):
if "conv2d_nhwc" in op.tag:
conv = op.output(0)
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
n_pad, h_pad, w_pad, c_pad = data_pad.op.axis
pad_fused = s[data_pad].fuse(n_pad, h_pad)
s[data_pad].parallel(pad_fused)
C = conv
n, h, w, c = C.op.axis
s[C].vectorize(c)
O = output_op.output(0)
if len(O.op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = O.op.axis
fused = s[O].fuse(n, h, w)
s[O].parallel(fused)
channels = int(O.shape[-1])
if channels % 64 == 0:
c, ci = s[O].split(c, 64)
s[O].vectorize(ci)
if C != O:
s[C].compute_at(s[O], c)
traverse_inline(s, output_op, _callback)
return s
def conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype):
layout = "NCHW"
packed_out = conv2d_NCHWc(data, kernel, strides, padding, dilation, layout, layout, out_dtype)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw(outs):
"""Create schedule for tensors"""
return schedule_conv2d_NCHWc(outs)
def _pack_data(cfg, data, kernel):
n, _, ih, iw = get_const_tuple(data.shape)
oc, ic, kh, kw = get_const_tuple(kernel.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
# Handle dynamic shape to pass tuning dispatch.
if isinstance(n, tvm.tir.Any):
n = tvm.te.size_var("n")
if isinstance(ih, tvm.tir.Any):
ih = tvm.te.size_var("ih")
if isinstance(iw, tvm.tir.Any):
iw = tvm.te.size_var("iw")
if isinstance(ic, tvm.tir.Any):
raise RuntimeError("Dynamic input channel is not supported for conv2d.")
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn),
lambda occ, icc, k_h, k_w, icb, ocb: kernel[occ * oc_bn + ocb, icc * ic_bn + icb, k_h, k_w],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("conv2d_NCHWc.x86")
def conv2d_NCHWc(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d with NCHWc layout."""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
if len(data.shape) == 5:
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn = get_const_tuple(
kernel.shape
)
in_channel = ic_chunk * ic_bn
num_filter = oc_chunk * oc_bn
else:
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kernel_height == 1 and kernel_width == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (ih - kernel_height + pt + pb) // sh + 1
ow = (iw - kernel_width + pl + pr) // sw + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", num_filter, num_outputs=2)
cfg.define_split(
"tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64, policy="verbose"
)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder(
(num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
out_dtype,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
dshape = (n, in_channel // cfg["tile_ic"].size[-1], ih, iw, cfg["tile_ic"].size[-1])
data = tvm.te.placeholder(dshape, data.dtype, name="data")
kshape = (
num_filter // cfg["tile_oc"].size[-1],
in_channel // cfg["tile_ic"].size[-1],
kernel_height,
kernel_width,
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
)
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel")
else:
data, kernel = _pack_data(cfg, data, kernel)
return nn.conv2d_NCHWc(data, kernel, strides, padding, dilation, layout, out_layout, out_dtype)
@autotvm.register_topi_schedule("conv2d_NCHWc.x86")
def schedule_conv2d_NCHWc(cfg, outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_NCHWc" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
(
_,
_,
kh,
kw,
_,
_,
) = get_const_tuple(kernel_vec.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc(*args)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_dnnl.x86")
def conv2d_nchw_dnnl(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d in NCHW format using dnnl."""
groups = 1
_out = dnnl.dnnl_conv2d(data, kernel, strides, padding, dilation, groups, False, out_dtype)
return _out
@autotvm.register_topi_schedule("conv2d_nchw_dnnl.x86")
def schedule_conv2d_nchw_dnnl(_, outs):
"""Create schedule for conv2d_nchw_dnnl"""
return schedule_extern(outs)
@autotvm.register_topi_compute("conv2d_nhwc_dnnl.x86")
def conv2d_nhwc_dnnl(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d in NHWC format using dnnl."""
groups = 1
_out = dnnl.dnnl_conv2d(data, kernel, strides, padding, dilation, groups, True, out_dtype)
return _out
@autotvm.register_topi_schedule("conv2d_nhwc_dnnl.x86")
def schedule_conv2d_nhwc_dnnl(_, outs):
"""Create schedule for conv2d_nhwc_dnnl"""
return schedule_extern(outs)
# FIXME - https://github.com/apache/tvm/issues/4122
# _declaration_conv_nhwc_pack expects kernel layout to be HWOI. However, the tests use HWIO
# layout. Commenting until we have clarity about the nhwc_pack implementation from the author.
# elif layout == 'NHWC' and kh == 1 and kw == 1 and kernel.dtype == "int8":
# if cfg.is_fallback:
# _get_default_config(cfg, data, kernel, strides, padding, out_dtype, False, layout)
# # specialize for INT8 1X1 conv on X86
# return conv2d_avx_1x1._declaration_conv_nhwc_pack(cfg, data, kernel, strides,
# padding, dilation, out_dtype)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for x86"""
import logging
import re
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .conv2d import _get_default_config
from .conv2d_int8 import is_int8_hw_support, _get_default_config_int8
from ..utils import get_const_tuple
from ..nn import conv2d_legalize, conv2d_alter_layout
from ..generic.conv2d import conv2d_alter_int8_common
logger = logging.getLogger("topi")
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWio_matcher = re.compile("^OIHW[0-9]+i[0-9]+o$")
@conv2d_alter_layout.register("cpu")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Parse the attributes.
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
out_dtype = out_type.dtype
if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest):
cfg = dispatch_ctx.query(target, None)
workload = cfg.workload
else:
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template.
# It may be from the auto-scheduler
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
# Pre-compute weight transformation in winograd
tile_size = 4
# HWIO -> OIHW
kernel_transform = relay.transpose(inputs[1], axes=[3, 2, 0, 1])
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
kernel_transform, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
if topi_tmpl == "conv2d_NCHWc.x86":
# we only convert conv2d_NCHW to conv2d_NCHWc for x86
if data_layout == "NCHW" and kernel_layout == "OIHW":
if cfg.is_fallback:
_get_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs["kernel_layout"] = "OIHW%di%do" % (ic_bn, oc_bn)
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn, oc_bn),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWio_matcher.match(kernel_layout)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc_int8.x86":
# TODO(@icemelon9, @anijain2305): Need to support data layout NHWC with kernel layout HWIO
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_default_config_int8(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
int32_lanes=16,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
n_elems = 4
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
new_attrs["kernel_layout"] = "OIHW{:n}i{:n}o{:n}i".format(ic_bn // n_elems, oc_bn, n_elems)
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
dtype=kernel_dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "depthwise_conv2d_NCHWc.x86":
if data_layout == "NCHW" and kernel_layout == "OIHW":
if cfg.is_fallback:
_get_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
True,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert channel_multiplier == 1
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
new_attrs["kernel_layout"] = "OIHW1i%do" % oc_bn
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWio_matcher.match(kernel_layout)
return relay.nn.contrib_depthwise_conv2d_nchwc(*inputs, **new_attrs)
return None
@conv2d_legalize.register("cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# Intel vector intructions require data and kernel to have different dtypes.
if data_tensor.dtype == "int8" and kernel_tensor.dtype == "int8":
data_dtype = "uint8"
if is_int8_hw_support(data_dtype, kernel_dtype):
return conv2d_alter_int8_common(
data, data_tensor, kernel, kernel_tensor, output_tensor, attrs, data_dtype, 4, 16
)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d_avx_1x1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""1x1 Conv2D schedule on for Intel CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..nn.pad import pad
from ..nn.utils import get_pad_tuple
from ..generic import conv2d as conv2d_generic
from ..utils import get_const_tuple, simplify
from .tensor_intrin import dot_16x1x16_uint8_int8_int32
from .utils import get_simd_32bit_lanes
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_h = (wkl.kernel_h - 1) * wkl.dilation_h + 1
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_height = (wkl.height + pt + pb - dilated_kernel_h) // HSTR + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
for ow_factor in range(out_width, 0, -1):
if out_width % ow_factor == 0:
for oh_factor in range(out_height, 0, -1):
if out_height % oh_factor == 0 and ow_factor * oh_factor < 32:
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_oh"] = OtherOptionEntity(oh_factor)
cfg["tile_ow"] = SplitEntity([out_width // ow_factor, ow_factor])
return
raise ValueError("cannot decide default schedule for workload: {}".format(wkl))
def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last):
# fetch schedule
oh_factor, ow_factor = cfg["tile_oh"].val, cfg["tile_ow"].size[-1]
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
oc_bn = cfg["tile_oc"].size[-1]
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
oh_outer, oh_inner = s[C].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[C].split(ow, factor=ow_factor)
s[C].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
s[C].vectorize(oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh_outer)
s[CC].compute_at(s[C], parallel_axis)
if C == O:
s[C].parallel(parallel_axis)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, _, _ = s[CC].op.reduce_axis
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
oh_outer, oh_inner = s[CC].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[CC].split(ow, factor=ow_factor)
s[CC].reorder(oc_chunk, oh_outer, ow_outer, ic_chunk, ic_block, oh_inner, ow_inner, oc_block)
s[CC].fuse(oc_chunk, oh_outer)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_inner)
s[CC].unroll(oh_inner)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
oh_outer, oh_inner = s[O].split(oh, factor=oh_factor)
ow_outer, ow_inner = s[O].split(ow, factor=ow_factor)
s[O].reorder(oc_chunk, oh_outer, ow_outer, oh_inner, ow_inner, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh_outer)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError("Unsupported output ndim: %s" % out_ndim)
return s
def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last):
return conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=get_simd_32bit_lanes(),
intrin=dot_16x1x16_uint8_int8_int32(),
)
def _declaration_conv_nhwc_pack(cfg, Input, Filter, stride, padding, dilation, out_dtype):
# more assertion for the shapes
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = Input.shape
kernel_h, kernel_w, num_filter, channel = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
# todo: padding filter to accommodate the intrinsic
# packing the Filter to let memory access be consecutive for AVX512 intrinsic
# Done in pre-compute stage
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
packw_shape = (kernel_h, kernel_w, idxd(num_filter, 16), 16 * idxd(channel, 4), 4)
PackW = te.compute(
packw_shape,
lambda a, b, c, d, e: Filter[a, b, c * 16 + idxm(d, 16), idxd(d, 16) * 4 + e],
name="packed_filter",
)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
PaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* PackW[ry, rx, idxd(ff, 16), idxd(rc, 4) * 16 + idxm(ff, 16), idxm(rc, 4)].astype(
out_dtype
),
axis=[ry, rx, rc],
),
name="Conv2d_1x1_Output_int8",
tag="conv2d_nhwc_pack_int8",
)
return Output
def _schedule_conv_nhwc_pack_int8(s, cfg, data, conv_out, last):
"""
Defines the schedule for the int8 nhwc layout. For 1x1 conv, it
is a matrix-multiply operation by using nhwc layout. We will do
packing of weight to make the address access be friendly to int8
intrinsic
"""
# FIXME - https://github.com/apache/tvm/issues/3598
# pylint: disable=unreachable
return s
int32_lanes = 16
# assertion to fail the unhandled case
_, _, _, ic_num = get_const_tuple(data.shape)
_, _, _, oc_num = get_const_tuple(conv_out.shape)
assert ic_num % 4 == 0
assert oc_num % 16 == 0
ic_factor, oc_factor = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# schedule data
A = data
if isinstance(s[A].op, tvm.te.ComputeOp):
batch, ih, iw, ic = s[A].op.axis
d_ic_chunk, d_ic_block = s[A].split(ic, factor=4)
s[A].vectorize(d_ic_block)
C, O = conv_out, last
batch, oh, ow, oc = s[C].op.axis
kh, kw, ic = s[C].op.reduce_axis
# match the x86 intrinsic
ic_outer, ic_inner = s[C].split(ic, factor=4)
oc_outer, oc_inner = s[C].split(oc, factor=int32_lanes)
ic_f_outer, ic_s_outer = s[C].split(ic_outer, factor=ic_factor)
s[C].reorder(oc_outer, oh, ow, ic_f_outer, ic_s_outer, kh, kw, oc_inner, ic_inner)
pc = dot_16x1x16_uint8_int8_int32()
s[C].tensorize(oc_inner, pc)
if C != O:
batch, last_oh, last_ow, last_oc = s[O].op.axis
oc_chunk, oc_block = s[O].split(ochannel, 16)
# not saw perf improvement to split oh/ow here
s[O].vectorize(oc_block)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d_avx_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,invalid-name
"""Conv2D schedule on for Intel CPU"""
import tvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..generic import conv2d as conv2d_generic
from ..utils import get_const_tuple
from .tensor_intrin import dot_16x1x16_uint8_int8_int32
from .utils import get_simd_32bit_lanes
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width + pl + pr - dilated_kernel_w) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _fallback_schedule_int8(cfg, wkl):
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
out_width = (wkl.width + pl + pr - wkl.kernel_w) // WSTR + 1
oc_bn = 16
assert wkl.out_filter % oc_bn == 0
ic_bn = 1
for bn in range(oc_bn, 0, -4):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
assert wkl.in_filter % 4 == 0
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last):
# fetch schedule
reg_n, unroll_kw = cfg["tile_ow"].size[-1], cfg["unroll_kw"].val
_, _, _, _, ic_bn = get_const_tuple(data_vec.shape)
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
data_vec = data_vec.op.input_tensors[0]
oc_bn = cfg["tile_oc"].size[-1]
if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == "kernel_vec":
# data and kernel are not pre-computed, schedule layout transform here.
# this should only be used by x86 conv2d_nchw, which is for
# testing purpose.
batch, ic_chunk, ih, ic_block, iw = s[data_vec].op.axis
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[kernel_vec].op.axis
s[kernel_vec].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[kernel_vec].vectorize(oc_block)
parallel_axis = s[kernel_vec].fuse(oc_chunk, oh)
s[kernel_vec].parallel(parallel_axis)
# schedule 5-D NCHW[x]c conv
C, O = conv_out, last
CC = s.cache_write(C, "global")
batch, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[C].fuse(batch, oc_chunk, oh)
s[C].vectorize(oc_block)
if C == O:
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError("Unsupported output ndim: %s" % out_ndim)
return s
def _schedule_conv_NCHWc_int8(s, cfg, data_vec, kernel_vec, conv_out, last):
return conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(
s,
cfg,
data_vec,
kernel_vec,
conv_out,
last,
int32_lanes=get_simd_32bit_lanes(),
intrin=dot_16x1x16_uint8_int8_int32(),
inline_fused=True,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Conv2D int8 schedule on x86"""
import tvm
from tvm import te
from tvm import autotvm
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from .. import tag
from ..generic import conv2d as conv2d_generic
from ..nn.utils import get_pad_tuple
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload
from ..utils import get_const_tuple, traverse_inline
from .. import nn
from . import conv2d_avx_1x1, conv2d_avx_common
from .utils import target_has_sse42
def _get_default_config_int8(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
is_depthwise=False,
layout="NCHW",
int32_lanes=4,
):
"""
Get default schedule config for the workload
"""
if is_depthwise:
# Fallback to FP32 default config until a VNNI schedule is defined.
wkl = _get_depthwise_conv2d_workload(data, kernel, strides, padding, out_dtype)
from .depthwise_conv2d import _fallback_schedule
_fallback_schedule(cfg, wkl)
else:
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(
cfg, wkl, int32_lanes=int32_lanes, num_int8_elements=4
)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=int32_lanes, num_int8_elements=4
)
def is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use Intel DLBoost instructions
1) The datatypes are correct.
2) LLVM version has support for the instructions.
3) Target is skylake and above.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == "uint8" and kernel_dtype == "int8"
# 2) Check LLVM support
llvm_version = tvm.target.codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
mcpu = tvm.target.Target.current().mcpu
is_target_support = target_has_sse42(mcpu)
return is_dtype_support and is_llvm_support and is_target_support
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout and int8 dtype"""
layout = "NCHW"
packed_out = conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create the schedule for conv2d_nchw_int8"""
return schedule_conv2d_NCHWc_int8(outs)
def _pack_data(cfg, data, kernel):
n_elems = 4
n, _, ih, iw = get_const_tuple(data.shape)
oc, ic, kh, kw = get_const_tuple(kernel.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, ic_chunk, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
lambda occ, icc, k_h, k_w, icbc, ocb, icbb: kernel[
occ * oc_bn + ocb, icc * ic_bn + icbc * n_elems + icbb, k_h, k_w
],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("conv2d_NCHWc_int8.x86")
def conv2d_NCHWc_int8(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d with NCHWc layout and int8 dtype"""
if len(data.shape) == 5:
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
oc_chunk, ic_chunk_group, kernel_height, kernel_width, _, oc_bn, _ = get_const_tuple(
kernel.shape
)
num_filter = oc_chunk * oc_bn
else:
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kernel_height, kernel_width = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kernel_height == 1 and kernel_width == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kernel_height, kernel_width))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (kernel_height - 1) * dh + 1
dilated_kernel_w = (kernel_width - 1) * dw + 1
oh = (ih - dilated_kernel_h + pt + pb) // sh + 1
ow = (iw - dilated_kernel_w + pl + pr) // sw + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)
cfg.define_split("tile_oc", num_filter, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config_int8(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder(
(num_filter, in_channel, kernel_height, kernel_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
out_dtype,
int32_lanes=16,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
data, kernel = _pack_data(cfg, data, kernel)
return nn.conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, out_layout, out_dtype
)
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.x86")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "conv2d_NCHWc_int8" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel_vec.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("conv2d_nhwc_pack_int8.x86")
def schedule_conv2d_nhwc_pack_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "conv2d_nhwc_pack_int8" in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = (
data_vec.op.input_tensors[0]
if isinstance(data_vec.op, te.tensor.ComputeOp) and "pad" not in data_vec.op.tag
else data_vec
)
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == "uint8":
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)
else:
raise ValueError("Only support 1x1 kernel with " "schedule_conv2d_nhwc_pack.")
else:
raise ValueError(
"Not support this data type {} with "
"schedule_conv2d_nhwc_pack. Only support int8".format(data.dtype)
)
scheduled_ops.append(op)
traverse(output_op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D Transpose schedule on x86"""
from tvm import te
from ..utils import traverse_inline
from .. import nn
from .conv2d import conv2d_nchw, schedule_conv2d_nchw
def conv2d_transpose_nchw(data, kernel, strides, padding, out_dtype, output_padding):
data_pad, kernel_transform = nn.conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
# reuse conv2d_nchw implementation
return conv2d_nchw(
data_pad,
kernel_transform,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
out_dtype=out_dtype,
)
def schedule_conv2d_transpose_nchw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv2d_nchw(outs)
def _callback(op):
if "unpack_nchwc" in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
if isinstance(data_vec, te.ComputeOp):
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
if isinstance(kernel_vec, te.ComputeOp):
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin, no-else-return
"""Conv3D operators"""
from collections import namedtuple
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..utils import traverse_inline
from ..nn.utils import get_pad_tuple3d, infer_pad3d
from ..nn.pad import pad
from ..utils import get_const_tuple, simplify, get_const_int
from .utils import get_simd_32bit_lanes
Workload3D = namedtuple(
"Workload",
[
"in_dtype",
"out_dtype",
"depth",
"height",
"width",
"in_filter",
"groups",
"out_filter",
"dkernel",
"hkernel",
"wkernel",
"dpad",
"hpad",
"wpad",
"dstride",
"hstride",
"wstride",
],
)
@autotvm.register_topi_compute("conv3d_ndhwc.x86")
def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""3D convolution forward operator.
Parameters
----------
input : tvm.te.Tensor
5-D input data with shapes:
[batch, in_depth, in_height, in_width, in_channel] for NDHWC layout
filter : tvm.te.Tensor
5-D filter with shape [kernel_depth, kernel_height, kernel_width, in_channels, out_channels]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_depth, out_height, out_width, out_channel] for NDHWC layout
"""
layout = "NDHWC"
out_dtype = data.dtype if out_dtype is None else out_dtype
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation, dilation)
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout)
return _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype)
@autotvm.register_topi_compute("conv3d_ncdhw.x86")
def conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
"""3D convolution forward operator.
Parameters
----------
input : tvm.te.Tensor
5-D input data with shapes:
[batch, in_channel, in_depth, in_height, in_width] for NCDHW layout
filter : tvm.te.Tensor
5-D filter with shape [out_channels, in_channels, kernel_depth, kernel_height, kernel_width]
strides : int or a list/tuple of three ints
stride size, or [stride_depth, stride_height, stride_width]
padding : int or a list/tuple of three ints
padding size, or [pad_depth, pad_height, pad_width]
dilation: int or a list/tuple of three ints
dilation size, or [dilation_depth, dilation_height, dilation_width]
groups: int
Number of groups
Returns
-------
output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width] for NCDHW layout
"""
# assert groups == 1, "conv3d_ncdhw.x86 does not support groups"
layout = "NCDHW"
out_dtype = data.dtype if out_dtype is None else out_dtype
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
dilation = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation, dilation)
_create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout)
if cfg.is_fallback:
_get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout)
return _conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, layout, groups, out_dtype)
@autotvm.register_topi_schedule("conv3d_ndhwc.x86")
def schedule_conv3d_ndhwc(cfg, outs):
"""TOPI schedule callback for conv3d
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d.
"""
s = te.create_schedule([x.op for x in outs])
def _traverse(op):
if "conv3d_ndhwc" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
kd, kh, kw, i, o = get_const_tuple(kernel.shape)
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_conv3d_ndhwc(*args)
traverse_inline(s, outs[0].op, _traverse)
return s
@autotvm.register_topi_schedule("conv3d_ncdhw.x86")
def schedule_conv3d_ncdhw(cfg, outs):
"""TOPI schedule callback for conv3d
Parameters
----------
outs: Array of Tensor
The computation graph description of conv3d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv3d.
"""
s = te.create_schedule([x.op for x in outs])
def _traverse(op):
if "conv3d_ncdhw" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
kd, kh, kw, i, o = get_const_tuple(kernel.shape)
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_conv3d_ncdhw(*args)
traverse_inline(s, outs[0].op, _traverse)
return s
def _conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation, groups, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(dilation, int):
dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation)
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = strides
batch_size, in_depth, in_height, in_width, in_channel = get_const_tuple(data.shape)
kernel_depth, kernel_height, kernel_width, _, num_filter = get_const_tuple(kernel.shape)
assert in_channel % groups == 0, "input channels must be a multiple of group size"
assert num_filter % groups == 0, "number of filters must be a multiple of group size"
dilated_kernel_d = (kernel_depth - 1) * dilation_d + 1
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_down
pad_w = pad_left + pad_right
pad_depth = in_depth + pad_d
pad_height = in_height + pad_h
pad_width = in_width + pad_w
out_depth = simplify((in_depth + pad_d - dilated_kernel_d) // DSTR + 1)
out_height = simplify((in_height + pad_h - dilated_kernel_h) // HSTR + 1)
out_width = simplify((in_width + pad_w - dilated_kernel_w) // WSTR + 1)
# pack data
DOPAD = pad_d != 0 or pad_h != 0 or pad_w != 0
if DOPAD:
data_pad = pad(
data,
(0, pad_front, pad_top, pad_left, 0),
(0, pad_back, pad_down, pad_right, 0),
name="data_pad",
)
else:
data_pad = data
# fetch schedule
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert groups == 1 or ic_bn <= groups
assert groups == 1 or oc_bn <= groups
shape = (batch_size, in_channel // ic_bn, pad_depth, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape, lambda n, C, d, h, c, w: data_pad[n, d, h, w, C * ic_bn + c], name="data_vec"
)
ci_tile = in_channel // groups // ic_bn
if ci_tile == 0 or ci_tile * ic_bn * groups < in_channel:
ci_tile += 1
# pack kernel
shape = (
num_filter // oc_bn,
ci_tile,
kernel_depth,
kernel_height,
kernel_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda CO, CI, d, h, w, ci, co: kernel[d, h, w, CI * ic_bn + ci, CO * oc_bn + co],
name="kernel_vec",
)
# convolution
oshape = (batch_size, num_filter // oc_bn, out_depth, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_depth, out_height, out_width, num_filter)
ic = te.reduce_axis((0, in_channel // groups), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
kd = te.reduce_axis((0, kernel_depth), name="kd")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda n, oc_chunk, od, oh, ow, oc_block: te.sum(
data_vec[
n,
idxdiv(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
od * DSTR + kd * dilation_d,
oh * HSTR + kh * dilation_h,
idxmod(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
ow * WSTR + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
oc_chunk, idxdiv(ic, ic_bn), kd, kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[kd, kh, kw, ic],
),
name="conv",
)
conv_unpacked = te.compute(
unpack_shape,
lambda n, d, h, w, c: conv[n, idxdiv(c, oc_bn), d, h, w, idxmod(c, oc_bn)].astype(
out_dtype
),
name="output_unpack",
tag="conv3d_ndhwc",
)
return conv_unpacked
def _conv3d_ncdhw(cfg, data, kernel, strides, padding, dilation, layout, groups, out_dtype):
out_dtype = data.dtype if out_dtype is None else out_dtype
assert isinstance(dilation, int) or len(dilation) == 3
if isinstance(dilation, int):
dilation_d, dilation_h, dilation_w = (dilation, dilation, dilation)
else:
dilation_d, dilation_h, dilation_w = dilation
DSTR, HSTR, WSTR = strides
batch_size, in_channel, in_depth, in_height, in_width = get_const_tuple(data.shape)
num_filter, _, kernel_depth, kernel_height, kernel_width = get_const_tuple(kernel.shape)
dilated_kernel_d = (kernel_depth - 1) * dilation_d + 1
dilated_kernel_h = (kernel_height - 1) * dilation_h + 1
dilated_kernel_w = (kernel_width - 1) * dilation_w + 1
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (dilated_kernel_d, dilated_kernel_h, dilated_kernel_w)
)
pad_d = pad_front + pad_back
pad_h = pad_top + pad_down
pad_w = pad_left + pad_right
pad_depth = in_depth + pad_d
pad_height = in_height + pad_h
pad_width = in_width + pad_w
out_depth = simplify((in_depth + pad_d - dilated_kernel_d) // DSTR + 1)
out_height = simplify((in_height + pad_h - dilated_kernel_h) // HSTR + 1)
out_width = simplify((in_width + pad_w - dilated_kernel_w) // WSTR + 1)
# pack data
DOPAD = pad_d != 0 or pad_h != 0 or pad_w != 0
if DOPAD:
data_pad = pad(
data,
(0, 0, pad_front, pad_top, pad_left),
(0, 0, pad_back, pad_down, pad_right),
name="data_pad",
)
else:
data_pad = data
# fetch schedule
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
shape = (batch_size, in_channel // ic_bn, pad_depth, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape, lambda n, C, d, h, c, w: data_pad[n, C * ic_bn + c, d, h, w], name="data_vec"
)
ci_tile = in_channel // groups // ic_bn
if ci_tile == 0 or ci_tile * ic_bn * groups < in_channel:
ci_tile += 1
# pack kernel
shape = (
num_filter // oc_bn,
ci_tile,
kernel_depth,
kernel_height,
kernel_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda CO, CI, d, h, w, ci, co: kernel[CO * oc_bn + co, CI * ic_bn + ci, d, h, w],
name="kernel_vec",
)
# convolution
oshape = (batch_size, num_filter // oc_bn, out_depth, out_height, out_width, oc_bn)
unpack_shape = (batch_size, num_filter, out_depth, out_height, out_width)
ic = te.reduce_axis((0, in_channel // groups), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
kd = te.reduce_axis((0, kernel_depth), name="kd")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda n, oc_chunk, od, oh, ow, oc_block: te.sum(
data_vec[
n,
idxdiv(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
od * DSTR + kd * dilation_d,
oh * HSTR + kh * dilation_h,
idxmod(
(oc_chunk * oc_bn + oc_block) // (num_filter // groups) * (in_channel // groups)
+ ic,
ic_bn,
),
ow * WSTR + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
oc_chunk, idxdiv(ic, ic_bn), kd, kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kd, kh, kw],
),
name="conv",
)
conv_unpacked = te.compute(
unpack_shape,
lambda n, c, d, h, w: conv[n, idxdiv(c, oc_bn), d, h, w, idxmod(c, oc_bn)].astype(
out_dtype
),
name="output_unpack",
tag="conv3d_ncdhw",
)
return conv_unpacked
def _create_tuning_space(cfg, data, kernel, strides, padding, dilation, groups, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
if layout == "NDHWC":
n, d, h, w, ic = dshape
kd, kh, kw, _, oc = kshape
elif layout == "NCDHW":
n, ic, d, h, w = dshape
oc, _, kd, kh, kw = kshape
else:
raise ValueError("Not support this layout {} with " "schedule template.".format(layout))
# pad_front, pad_top, pad_left, pad_back, pad_down(bottom), pad_right
pf, pt, pl, pb, pd, pr = get_pad_tuple3d(padding, (kd, kh, kw))
sd, sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides, strides)
od = (d - kd + pf + pb) // sd + 1
oh = (h - kh + pt + pd) // sh + 1
ow = (w - kw + pl + pr) // sw + 1
# Create schedule config
cfg.define_split("tile_ic", ic, num_outputs=2)
cfg.define_split("tile_oc", oc, num_outputs=2)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 8)
cfg.define_knob("unroll_kw", [True, False])
def _get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout):
"""
Get default schedule config for the workload
"""
if layout not in ["NDHWC", "NCDHW"]:
raise ValueError("Layout {} is not supported".format(layout))
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv3d_workload(data, kernel, strides, padding, groups, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _get_conv3d_workload(data, kernel, stride, padding, groups, out_dtype, data_layout="NCHW"):
"""Get the workload structure."""
if data_layout == "NCDHW":
_, CI, ID, IH, IW = get_const_tuple(data.shape)
CO, CIG, KD, KH, KW = get_const_tuple(kernel.shape)
elif data_layout == "NDHWC":
_, ID, IH, IW, CI = get_const_tuple(data.shape)
KD, KH, KW, CIG, CO = get_const_tuple(kernel.shape)
else:
raise ValueError("not support this layout {} yet".format(data_layout))
pad_front, pad_top, pad_left, pad_back, pad_down, pad_right = get_pad_tuple3d(
padding, (get_const_int(KD), get_const_int(KH), get_const_int(KW))
)
DPAD = pad_front + pad_back
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
if isinstance(stride, (tuple, list)):
DSTR, HSTR, WSTR = stride
else:
DSTR, HSTR, WSTR = stride, stride, stride
assert (data.dtype == kernel.dtype) or (
data.dtype == "uint8" and kernel.dtype == "int8"
), "Do not support inputs with different data types now. ' \
'{} vs. {}".format(
data.dtype, kernel.dtype
)
return Workload3D(
data.dtype,
out_dtype,
ID,
IH,
IW,
CI,
groups,
CO,
KD,
KH,
KW,
DPAD,
HPAD,
WPAD,
DSTR,
HSTR,
WSTR,
)
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
DPAD, HPAD, WPAD = wkl.dpad, wkl.hpad, wkl.wpad
DSTR, HSTR, WSTR = wkl.dstride, wkl.hstride, wkl.wstride
out_width = (wkl.width + 2 * WPAD - wkl.wkernel) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(7, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def _schedule_conv3d_ndhwc(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
# fetch schedule
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
# get padding size
padding = infer_pad3d(data, data_pad, "NDHWC")
DPAD, HPAD, WPAD = padding
DOPAD = DPAD != 0 or HPAD != 0 or WPAD != 0
A, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if DOPAD:
s[A0].compute_inline()
batch, ic_chunk, idd, ih, ic_block, iw = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, idd, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
oc_chunk, ic_chunk, od, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, od, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(oc_chunk, od, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, oc_chunk, od, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, od, oh)
s[C].vectorize(oc_block)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, od, oh, ow, oc_block = s[CC].op.axis
kd, kh, kw, ic = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, kw, ic_block, ow_block, oc_block)
s[CC].fuse(oc_chunk, od, oh)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
# unpacking
batch, od, oh, ow, oc = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, od, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
def _schedule_conv3d_ncdhw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
# fetch schedule
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
# get padding size
padding = infer_pad3d(data, data_pad, "NCDHW")
DPAD, HPAD, WPAD = padding
DOPAD = DPAD != 0 or HPAD != 0 or WPAD != 0
A, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if DOPAD:
s[A0].compute_inline()
batch, ic_chunk, idd, ih, ic_block, iw = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, idd, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
oc_chunk, ic_chunk, od, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, od, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(oc_chunk, od, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, oc_chunk, od, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, od, oh)
s[C].vectorize(oc_block)
s[CC].compute_at(s[C], ow_chunk)
_, oc_chunk, od, oh, ow, oc_block = s[CC].op.axis
ic, kd, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kd, kh, kw, ic_block, ow_block, oc_block)
s[CC].fuse(oc_chunk, od, oh)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
# unpacking
batch, oc, od, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, od, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(batch, oc_chunk, od, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/conv3d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Conv3D Transpose schedule on x86"""
from tvm import te
from ..utils import traverse_inline
from .. import nn
from .conv3d import conv3d_ncdhw, schedule_conv3d_ncdhw
def conv3d_transpose_ncdhw(data, kernel, strides, padding, out_dtype, output_padding):
data_pad, kernel_transform = nn.conv3d_transpose_ncdhw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
# reuse conv3d_ncdhw implementation
return conv3d_ncdhw(data_pad, kernel_transform, (1, 1, 1), (0, 0, 0), (1, 1, 1), 1, out_dtype)
def schedule_conv3d_transpose_ncdhw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv3d_ncdhw(outs)
def _callback(op):
if "unpack_ncdhwc" in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable
# pylint: disable=no-value-for-parameter
"""x86 dense operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from tvm.contrib import cblas, dnnl, mkl
from .. import generic, tag
from ..utils import get_const_tuple, traverse_inline
from .tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
from .utils import get_simd_32bit_lanes
def _schedule_dense_pack_template(cfg, s, C, O):
A, packedB = s[C].op.input_tensors
CC = s.cache_write(C, "global")
y, x = s[C].op.axis
(k,) = s[CC].op.reduce_axis
yt, yo, yi = cfg["tile_y"].apply(s, C, y)
xt, xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(xt, yt, yo, xo, yi, xi)
xyt = s[C].fuse(xt, yt)
if C == O:
s[C].parallel(xyt)
xyo = s[C].fuse(yo, xo)
s[C].unroll(yi)
s[C].vectorize(xi)
s[CC].compute_at(s[C], xyo)
y, x = s[CC].op.axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, ki, y, x)
s[CC].vectorize(x)
tile_inner = cfg["tile_inner"].size[-1]
if tile_inner > 1:
yo, yi = s[CC].split(y, tile_inner)
s[CC].reorder(ko, yo, ki, yi, x)
s[CC].unroll(yo)
s[CC].unroll(ki)
s[CC].unroll(yi)
else:
s[CC].unroll(ki)
s[CC].unroll(y)
if C != O:
y, x = s[O].op.axis
yt, yo, yi = cfg["tile_y"].apply(s, O, y)
xt, xo, xi = cfg["tile_x"].apply(s, O, x)
s[O].reorder(xt, yt, yo, xo, yi, xi)
xyt = s[O].fuse(xt, yt)
s[C].compute_at(s[O], xyt)
s[O].vectorize(xi)
s[O].parallel(xyt)
return s
def _schedule_dense_nopack_template(cfg, s, C):
y, x = s[C].op.axis
(kk,) = s[C].op.reduce_axis
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, yi, xi)
xyo = s[C].fuse(yo, xo)
s[C].parallel(xyo)
s[C].unroll(kk)
(CC,) = s[C].op.input_tensors
s[CC].compute_at(s[C], xyo)
z, y, x = s[CC].op.axis
(k,) = s[CC].op.reduce_axis
yz = s[CC].fuse(z, y)
s[CC].reorder(k, yz, x)
s[CC].unroll(yz)
s[CC].vectorize(x)
return s
def _default_dense_pack_config(cfg, M, N, K):
# Generate default schedule for dynamic shape.
if isinstance(M, (tvm.tir.Var, tvm.tir.Any)):
M = 16
if isinstance(N, (tvm.tir.Var, tvm.tir.Any)):
N = 16
if isinstance(K, (tvm.tir.Var, tvm.tir.Any)):
K = 16
vec_width = get_simd_32bit_lanes()
tilex_ii = 1
for bn in range(vec_width * 2, 0, -1):
if N % bn == 0:
tilex_ii = bn
break
NN = N // tilex_ii
tilex_oi = 1
while NN // tilex_oi > 4:
if (NN // tilex_oi) % 2 == 1:
break
tilex_oi *= 2
tiley_ii = 8
while M % tiley_ii != 0:
tiley_ii //= 2
MM = M // tiley_ii
tiley_oi = 1
while MM // tiley_oi > 4:
if (MM // tiley_oi) % 2 == 1:
break
tiley_oi *= 2
cfg["tile_y"] = SplitEntity([MM // tiley_oi, tiley_oi, tiley_ii])
cfg["tile_x"] = SplitEntity([NN // tilex_oi, tilex_oi, tilex_ii])
cfg["tile_k"] = SplitEntity([K, 1])
cfg["tile_inner"] = SplitEntity([M // tiley_ii, tiley_ii])
def _default_dense_nopack_config(cfg, M, N, K):
# Generate default schedule for dynamic shape.
if isinstance(M, (tvm.tir.Var, tvm.tir.Any)):
M = 16
if isinstance(N, (tvm.tir.Var, tvm.tir.Any)):
N = 16
if isinstance(K, (tvm.tir.Var, tvm.tir.Any)):
K = 16
vec_width = get_simd_32bit_lanes()
tilek_bn = 1
for bn in range(vec_width * 2, 0, -1):
if K % bn == 0:
tilek_bn = bn
break
cfg["tile_k"] = SplitEntity([K // tilek_bn, tilek_bn])
cfg["tile_x"] = SplitEntity([N, 1])
cfg["tile_y"] = SplitEntity([1, M])
@autotvm.register_topi_compute("dense_nopack.x86")
def dense_nopack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense without packing"""
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
# create tuning space
cfg.define_split(
"tile_y", 32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M, num_outputs=2
)
cfg.define_split(
"tile_x", 32 if isinstance(N, (tvm.tir.Var, tvm.tir.Any)) else N, num_outputs=2
)
cfg.define_split(
"tile_k", 32 if isinstance(K, (tvm.tir.Var, tvm.tir.Any)) else K, num_outputs=2
)
if cfg.is_fallback:
_default_dense_nopack_config(cfg, M, N, K)
vec = cfg["tile_k"].size[-1]
k = te.reduce_axis((0, K // vec), "k")
CC = te.compute(
(M, N, vec),
lambda z, y, x: te.sum(
data[z, k * vec + x].astype(out_dtype) * weight[y, k * vec + x].astype(out_dtype),
axis=k,
),
)
kk = te.reduce_axis((0, vec), "kk")
C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_schedule("dense_nopack.x86")
def schedule_dense_nopack(cfg, outs):
"""Create the schedule for dense_nopack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_nopack" in op.tag:
_schedule_dense_nopack_template(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("dense_pack.x86")
def dense_pack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense with transformed weight."""
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape) # batch, in_dim
if len(weight.shape) == 3:
N, _, packw_bn = get_const_tuple(weight.shape) # out_dim
N = N * packw_bn
else:
N, _ = get_const_tuple(weight.shape) # out_dim
# create tuning space
cfg.define_split(
"tile_y", 32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M, num_outputs=3
)
cfg.define_split(
"tile_x", 32 if isinstance(N, (tvm.tir.Var, tvm.tir.Any)) else N, num_outputs=3
)
cfg.define_split(
"tile_k", 32 if isinstance(K, (tvm.tir.Var, tvm.tir.Any)) else K, num_outputs=2
)
cfg.define_split(
"tile_inner",
32 if isinstance(M, (tvm.tir.Var, tvm.tir.Any)) else M,
num_outputs=2,
filter=lambda y: y.size[-1] <= 16,
)
if cfg.is_fallback:
_default_dense_pack_config(cfg, M, N, K)
if len(weight.shape) == 2:
packw_bn = cfg["tile_x"].size[-1]
packw_shape = (N // packw_bn, K, packw_bn)
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
packw = tvm.te.placeholder(packw_shape, weight.dtype, name="packed_weight")
else:
packw = te.compute(
packw_shape, lambda z, y, x: weight[z * packw_bn + x, y], name="packed_weight"
)
else:
packw = weight
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(M, N),
lambda y, x: te.sum(
data[y, k].astype(out_dtype)
* packw[idxdiv(x, packw_bn), k, idxmod(x, packw_bn)].astype(out_dtype),
axis=k,
),
tag="dense_pack",
)
if bias is not None:
C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_schedule("dense_pack.x86")
def schedule_dense_pack(cfg, outs):
"""Create the schedule for dense_pack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_pack" in op.tag:
_schedule_dense_pack_template(cfg, s, op.output(0), outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def dense_vnni_compute(cfg, X, packed_w, bias=None):
"""Compute for uint8 x int8 -> int32 dense"""
m, k = X.shape
n_o, _, n_i, _ = packed_w.shape
ak = te.reduce_axis((0, k), name="k")
C = te.compute(
(m, n_o * n_i),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packed_w[tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
tag="dense_vnni",
attrs={"schedule_rule": "dense_vnni"},
)
if bias is not None:
C = te.compute(C.shape, lambda i, j: C[i, j] + bias[j], tag=tag.BROADCAST)
a_y, _ = C.op.axis
cfg.define_split("tile_y", a_y, num_outputs=2)
return C
def dense_vnni_schedule(cfg, s, C, O, do_parallel=True):
"""Schedule dense compute using VNNI vpdpbusd instruction"""
# C: The output of GEMM
# O: The output of the fused op
def split_y(out):
default_y_split_factor = 32
a_y = out.op.axis[-2]
if cfg.is_fallback:
return s[out].split(a_y, factor=default_y_split_factor)
return cfg["tile_y"].apply(s, out, a_y)
(a_k,) = C.op.reduce_axis
a_yo, a_yi = split_y(C)
a_xo, a_xi = s[C].split(C.op.axis[-1], factor=16)
a_ko, a_ki = s[C].split(a_k, factor=4)
s[C].reorder(a_yo, a_xo, a_yi, a_ko, a_xi, a_ki)
pc = dot_16x1x16_uint8_int8_int32_cascadelake()
s[C].tensorize(a_xi, pc)
if C == O:
fused = s[O].fuse(a_yo, a_xo)
else:
a_yo, a_yi = split_y(O)
a_xo, a_xi = s[O].split(O.op.axis[-1], factor=16)
s[O].reorder(a_yo, a_xo, a_yi, a_xi)
s[O].vectorize(a_xi)
s[C].compute_at(s[O], a_yi)
fused = s[O].fuse(a_yo, a_xo)
if do_parallel:
s[O].parallel(fused)
return s, fused
@autotvm.register_topi_compute("dense_vnni.x86")
def dense_vnni(cfg, data, weight, bias=None, out_dtype=None):
"""Compute for uint8 x int8 -> int32 dense"""
if out_dtype is None:
out_dtype = data.dtype
assert len(weight.shape) == 4
assert data.dtype == "uint8" and weight.dtype == "int8"
_, _, n_inner, k_inner = get_const_tuple(weight.shape) # out_dim
assert n_inner == 16 and k_inner == 4
return dense_vnni_compute(cfg, data, weight, bias)
@autotvm.register_topi_schedule("dense_vnni.x86")
def schedule_dense_vnni(cfg, outs):
"""Create a schedule for dense_vnni"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_vnni" in op.tag:
dense_vnni_schedule(cfg, s, op.output(0), outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def matmul_blas_common(cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, lib):
"""Compute matmul/dense using a BLAS library"""
M, K = get_const_tuple(tensor_a.shape)
N, _ = get_const_tuple(tensor_b.shape)
if isinstance(M, int) and isinstance(K, int) and isinstance(N, int):
cfg.add_flop(M * K * N * 2)
if tensor_a.dtype == "uint8" and tensor_b.dtype == "int8" and out_dtype == "int32":
if not hasattr(lib, "matmul_u8s8s32"):
raise NotImplementedError(
f"Matmul/Dense with {lib.__name__} for {tensor_a.dtype} is not supported "
"(matmulu8s8s32 not imlemented)"
)
C = lib.matmul_u8s8s32(tensor_a, tensor_b, transpose_a, transpose_b, dtype=out_dtype)
elif tensor_a.dtype == "float32" or tensor_a.dtype == "float64":
C = lib.matmul(tensor_a, tensor_b, transpose_a, transpose_b)
else:
raise NotImplementedError(
f"Matmul/Dense with {lib.__name__} for {tensor_a.dtype} is not supported"
)
if bias is not None:
C = te.compute(C.shape, lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
return C
@autotvm.register_topi_compute("dense_cblas.x86")
def dense_cblas(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using cblas. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, cblas)
@autotvm.register_topi_schedule("dense_cblas.x86")
def schedule_dense_cblas(_, outs):
"""Create schedule for dense_cblas. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_mkl.x86")
def dense_mkl(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using mkl. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, mkl)
@autotvm.register_topi_schedule("dense_mkl.x86")
def schedule_dense_mkl(_, outs):
"""Create schedule for dense_mkl. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_dnnl.x86")
def dense_dnnl(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense using dnnl. This is an alias of matmul_nt operator."""
return matmul_blas_common(cfg, data, weight, bias, out_dtype, False, True, dnnl)
@autotvm.register_topi_schedule("dense_dnnl.x86")
def schedule_dense_dnnl(_, outs):
"""Create schedule for dense_dnnl. This is an alias of matmul_nt operator."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_cblas.x86")
def matmul_cblas(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using cblas."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, cblas
)
@autotvm.register_topi_schedule("matmul_cblas.x86")
def schedule_matmul_cblas(_, outs):
"""Create schedule for matmul_cblas."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_mkl.x86")
def matmul_mkl(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using mkl."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, mkl
)
@autotvm.register_topi_schedule("matmul_mkl.x86")
def schedule_matmul_mkl(_, outs):
"""Create schedule for matmul_mkl."""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("matmul_dnnl.x86")
def matmul_dnnl(
cfg, tensor_a, tensor_b, bias=None, out_dtype=None, transpose_a=False, transpose_b=False
):
"""Compute matmul using dnnl."""
return matmul_blas_common(
cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b, dnnl
)
@autotvm.register_topi_schedule("matmul_dnnl.x86")
def schedule_matmul_dnnl(_, outs):
"""Create schedule for matmul_dnnl."""
return generic.schedule_extern(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/dense_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Dense alter op functions for x86"""
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from .dense import _default_dense_pack_config
from ..utils import get_const_tuple
from ..nn import dense_alter_layout
from .utils import target_has_vnni
from .. import nn
def check_vnni_applicable(x, y):
mcpu = tvm.target.Target.current().mcpu
return (
target_has_vnni(mcpu)
and "int8" in x.dtype
and "int8" in y.dtype
and y.shape[-2] % 16 == 0
and y.shape[-1] % 4 == 0
)
@dense_alter_layout.register(["cpu", "arm_cpu"])
def _alter_dense_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
data_tensor, weight_tensor = tinfos
out_dtype = out_type.dtype
M, K = get_const_tuple(data_tensor.shape)
N, _ = get_const_tuple(weight_tensor.shape)
if check_vnni_applicable(data_tensor, weight_tensor) and data_tensor.dtype == "uint8":
weight_layout = "NC16n4c"
return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype)
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.dense"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload:
cfg = dispatch_ctx.query(target, workload)
topi_impl = workload[0]
if topi_impl == "dense_pack.x86":
if cfg.is_fallback:
_default_dense_pack_config(cfg, M, N, K)
packw_bn = cfg["tile_x"].size[-1]
weight_layout = "NC%dn" % packw_bn
new_weight = te.placeholder(
(N // packw_bn, K, packw_bn),
dtype=weight_tensor.dtype,
)
# Relay dense doesn't have bias.
new_workload = autotvm.task.args_to_workload(
[
data_tensor,
new_weight,
None,
out_dtype,
],
topi_impl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype)
return None
def vnni_legalize(inputs, arg_types, op, attrs, need_expand=False):
"""Legalizes s8, s8 -> s32 GEMM op for VNNI."""
if check_vnni_applicable(arg_types[0], arg_types[1]) and arg_types[0].dtype == "int8":
x, y = inputs
x = relay.cast(x, "int32")
x = relay.add(x, relay.const(128, "int32"))
x = relay.cast(x, "uint8")
adjust_shift = relay.const(128, "int32") * relay.sum(relay.cast(y, "int32"), axis=[-1])
if need_expand:
adjust_shift = relay.expand_dims(adjust_shift, axis=1)
out = op(x, y, **attrs)
return relay.subtract(out, adjust_shift)
return None
@nn.dense_legalize.register("cpu")
def _dense_legalize(attrs, inputs, arg_types):
"""Legalizes s8, s8 -> s32 dense for VNNI."""
return vnni_legalize(inputs, arg_types, relay.nn.dense, attrs)
@nn.batch_matmul_legalize.register("cpu")
def _batch_matmul_legalize(attrs, inputs, arg_types):
"""Legalizes s8, s8 -> s32 batch_matmul for VNNI."""
if attrs["transpose_a"] or not attrs["transpose_b"]:
return None
return vnni_legalize(inputs, arg_types, relay.nn.batch_matmul, attrs, need_expand=True)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Depthwise Conv2D schedule on x86"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..nn.pad import pad
from ..utils import get_const_tuple
from ..nn.utils import get_pad_tuple
from ..nn.depthwise_conv2d import _get_workload, depthwise_conv2d_infer_layout
from ..nn.conv2d import unpack_NCHWc_to_nchw
from ..utils import traverse_inline
from .utils import get_simd_32bit_lanes
def _fallback_schedule(cfg, wkl):
"""
Get default schedule for the workload
Parameters
----------
cfg : tvm.autotvm.task.space.FallbackConfigEntity
Fallback config to be updated
wkl : topi.nn.depthwise_conv2d.Workload
Convolution workload
"""
simd_width = get_simd_32bit_lanes()
pt, pl, pb, pr = wkl.padt, wkl.padl, wkl.padb, wkl.padr
HSTR, WSTR = wkl.stride_h, wkl.stride_w
dilated_kernel_w = (wkl.kernel_w - 1) * wkl.dilation_w + 1
out_width = (wkl.width - dilated_kernel_w + pl + pr) // WSTR + 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if wkl.out_filter % bn == 0:
oc_bn = bn
break
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if wkl.in_filter % bn == 0:
ic_bn = bn
break
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
def depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise conv2d with NCHW layout."""
layout = "NCHW"
packed_out = depthwise_conv2d_NCHWc(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_depthwise_conv2d_nchw(outs):
"""Create schedule for depthwise_conv2d_nchw."""
return schedule_depthwise_conv2d_NCHWc(outs)
def _pack_data(cfg, data, kernel):
n, ic, ih, iw = get_const_tuple(data.shape)
filters, cm, kh, kw = get_const_tuple(kernel.shape)
oc = filters * cm
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
ic_chunk = ic // ic_bn
oc_chunk = oc // oc_bn
data = te.compute(
(n, ic_chunk, ih, iw, ic_bn),
lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w],
name="data_vec",
)
kernel = te.compute(
(oc_chunk, 1, kh, kw, 1, oc_bn),
lambda occ, icc, k_h, k_w, icb, ocb: kernel[
(occ * oc_bn + ocb) // cm, (occ * oc_bn + ocb) % cm, k_h, k_w
],
name="kernel_vec",
)
return data, kernel
@autotvm.register_topi_compute("depthwise_conv2d_NCHWc.x86")
def depthwise_conv2d_NCHWc(
cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype=None
):
"""Compute depthwise conv2d with NCHWc layout"""
out_dtype = data.dtype if out_dtype is None else out_dtype
if len(data.shape) == 5:
batch, in_channel_chunk, in_height, in_width, in_channel_block = get_const_tuple(data.shape)
(
out_channel_chunk,
cm_chunk,
filter_height,
filter_width,
cm_block,
out_channel_block,
) = get_const_tuple(kernel.shape)
in_channel = in_channel_chunk * in_channel_block
out_channel = out_channel_chunk * out_channel_block
channel_multiplier = cm_chunk * cm_block
assert channel_multiplier * in_channel == out_channel
else:
batch, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, channel_multiplier, filter_height, filter_width = get_const_tuple(kernel.shape)
assert channel_multiplier == 1
strides = strides if isinstance(strides, (tuple, list)) else (strides, strides)
HSTR, WSTR = strides
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (filter_height - 1) * dh + 1
dilated_kernel_w = (filter_width - 1) * dw + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
out_height = (in_height + HPAD - dilated_kernel_h) // HSTR + 1
out_width = (in_width + WPAD - dilated_kernel_w) // WSTR + 1
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# get workload and related schedule config
wkl = _get_workload(
te.placeholder((batch, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, channel_multiplier, filter_height, filter_width), dtype=kernel.dtype
),
strides,
(pad_top, pad_down),
dilation,
out_dtype,
)
if cfg.is_fallback:
_fallback_schedule(cfg, wkl)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
in_channel_block = cfg["tile_ic"].size[-1]
in_channel_chunk = in_channel // in_channel_block
out_channel_block = cfg["tile_oc"].size[-1]
out_channel_chunk = out_channel // out_channel_block
dshape = (batch, in_channel_chunk, in_height, in_width, in_channel_block)
data = tvm.te.placeholder(dshape, data.dtype, name="data")
kshape = (out_channel_chunk, 1, filter_height, filter_width, 1, out_channel_block)
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel")
else:
data, kernel = _pack_data(cfg, data, kernel)
_, _, _, _, in_channel_block = get_const_tuple(data.shape)
out_channel_chunk, _, _, _, _, out_channel_block = get_const_tuple(kernel.shape)
# padding stage
DOPAD = pad_top != 0 or pad_left != 0 or pad_down != 0 or pad_right != 0
if DOPAD:
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
data_pad = pad(data, pad_before, pad_after, name="PaddedInput")
else:
data_pad = data
# depthconv stage
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
kh = te.reduce_axis((0, filter_height), name="kh")
kw = te.reduce_axis((0, filter_width), name="kw")
Output = te.compute(
(batch, out_channel_chunk, out_height, out_width, out_channel_block),
lambda b, oco, oh, ow, oci: te.sum(
(
data_pad[
b,
idxdiv(
idxdiv(oco * out_channel_block + oci, channel_multiplier), in_channel_block
),
oh * HSTR + kh * dh,
ow * WSTR + kw * dw,
idxmod(
idxdiv(oco * out_channel_block + oci, channel_multiplier), in_channel_block
),
].astype(out_dtype)
* kernel[oco, 0, kh, kw, 0, oci].astype(out_dtype)
),
axis=[kh, kw],
),
name="DepthwiseConv2d",
tag="depthwise_conv2d_NCHWc",
)
return Output
@autotvm.register_topi_schedule("depthwise_conv2d_NCHWc.x86")
def schedule_depthwise_conv2d_NCHWc(cfg, outs):
"""CPU schedule for depthwise conv2d in NCHW[x]c layout"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
"""Traverse operators from computation graph"""
if "depthwise_conv2d_NCHWc" in op.tag:
conv_out = op.output(0)
data = conv_out.op.input_tensors[0]
kernel = conv_out.op.input_tensors[1]
_schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data, kernel, conv_out, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data_vec, kernel_vec, conv_out, output):
tile_ow, oc_bn = cfg["tile_ow"].size[-1], cfg["tile_oc"].size[-1]
unroll_kw = cfg["unroll_kw"].val
# schedule pad
if isinstance(s[data_vec].op, tvm.te.ComputeOp) and "pad" in data_vec.op.tag:
batch, ic_chunk, ih, iw, ic_block = s[data_vec].op.axis
s[data_vec].vectorize(ic_block)
parallel_axis = s[data_vec].fuse(batch, ic_chunk, ih)
s[data_vec].parallel(parallel_axis)
C, O = conv_out, output
CC = s.cache_write(C, "global")
_, ic_chunk, oh, ow, ic_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=tile_ow)
s[C].reorder(ic_chunk, oh, ow_chunk, ow_block, ic_block)
s[C].vectorize(ic_block)
parallel_axis = s[C].fuse(ic_chunk, oh)
s[C].parallel(parallel_axis)
s[CC].compute_at(s[C], ow_chunk)
# the ow axis in the cached block CC is the ow_block in C
_, ic_chunk, oh, ow, ic_block = s[CC].op.axis
kh, kw = s[CC].op.reduce_axis
s[CC].reorder(ic_chunk, oh, kh, kw, ow, ic_block)
if unroll_kw:
s[CC].unroll(kw)
s[CC].vectorize(ic_block)
s[CC].unroll(ow)
if C != O:
out_ndim = len(s[O].op.axis)
if out_ndim == 5:
batch, oc_chunk, oh, ow, oc_block = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=tile_ow)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
elif out_ndim == 4:
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=tile_ow)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[C].compute_at(s[O], parallel_axis)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
else:
raise ValueError("Unsupported output ndim: %s" % out_ndim)
return s
@depthwise_conv2d_infer_layout.register("cpu")
def _depthwise_conv2d_infer_layout(workload, cfg):
_, data, kernel, strides, padding, dilation, _, _, dtype = workload
batch_size, in_channel, in_height, in_width = data[1]
filter_channel, channel_multiplier, k_height, k_width = kernel[1]
out_channel = filter_channel * channel_multiplier
out_height = (in_height + padding[0] + padding[2] - k_height) // strides[0] + 1
out_width = (in_width + padding[1] + padding[3] - k_width) // strides[1] + 1
tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic)
in_layout = "NCHW%dc" % tile_ic
out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc)
out_layout = "NCHW%dc" % tile_oc
return ((in_shape, in_layout),), ((out_shape, out_layout),)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Grouped Spatial Pack Convolution (Group Conv2D) schedule on x86"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .utils import get_simd_32bit_lanes
from ..utils import get_const_tuple
from ..nn.pad import pad
from .. import tag
from ..nn.conv2d import _get_workload as _get_conv2d_workload
def group_conv2d_nchw(data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute group_conv2d with NCHW layout"""
return group_conv2d_nchw_spatial_pack(
data, kernel, strides, padding, dilation, groups, out_dtype
)
def schedule_group_conv2d_nchw(outs):
"""Compute group_conv2d with NCHW layout"""
return schedule_group_conv2d_nchwc(outs)
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _fallback_schedule(cfg, wkl):
simd_width = get_simd_32bit_lanes()
pad_left, pad_right = wkl.padl, wkl.padr
stride_w = wkl.stride_w
out_width = (wkl.width + pad_left + pad_right - wkl.kernel_w) // stride_w + 1
groups = wkl.groups
kernels_per_group = wkl.out_filter // groups
kernel_depth = wkl.in_filter // groups
oc_bn = 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if kernels_per_group % bn == 0:
oc_bn = bn
break
if oc_bn > kernels_per_group:
oc_bn = kernels_per_group
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if kernel_depth % bn == 0:
ic_bn = bn
break
if ic_bn > kernel_depth:
ic_bn = kernel_depth
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
@autotvm.register_topi_compute("group_conv2d_nchw.x86")
def group_conv2d_nchw_spatial_pack(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"
):
"""
Compute group conv2d with NCHW layout, using GSPC algorithm.
https://arxiv.org/abs/2006.09791
"""
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(dilation, int):
dilation_h, dilation_w = dilation, dilation
else:
dilation_h, dilation_w = dilation
assert isinstance(padding, int) or len(padding) == 2 or len(padding) == 4
if isinstance(padding, int):
pad_top, pad_left, pad_bottom, pad_right = padding, padding, padding, padding
elif len(padding) == 2:
hpad, wpad = padding
pad_top, pad_bottom = hpad, hpad
pad_left, pad_right = wpad, wpad
else:
pad_top, pad_left, pad_bottom, pad_right = padding
hpad = pad_top + pad_bottom
wpad = pad_left + pad_right
assert isinstance(strides, int) or len(strides) == 2
if isinstance(strides, int):
stride_h, stride_w = strides, strides
else:
stride_h, stride_w = strides
batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, kernel_depth, k_height, k_width = get_const_tuple(kernel.shape)
pad_height = in_height + pad_top + pad_bottom
pad_width = in_width + pad_left + pad_right
dilated_kernel_h = (k_height - 1) * dilation_h + 1
dilated_kernel_w = (k_width - 1) * dilation_w + 1
out_height = (in_height + pad_top + pad_bottom - dilated_kernel_h) // stride_h + 1
out_width = (in_width + pad_left + pad_right - dilated_kernel_w) // stride_w + 1
kernels_per_group = out_channel // groups
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((batch_size, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, in_channel // groups, k_height, k_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
groups,
out_dtype,
)
oc_bn = cfg["tile_oc"].size[-1]
ic_bn = cfg["tile_ic"].size[-1]
# pack data
DOPAD = hpad != 0 or wpad != 0
if DOPAD:
data_pad = pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_bottom, pad_right), name="data_pad"
)
else:
data_pad = data
shape = (groups, batch_size, kernel_depth // ic_bn, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape,
lambda g, n, C, h, c, w: data_pad[n, C * ic_bn + c + kernel_depth * g, h, w],
name="data_vec",
)
# pack kernel
shape = (
groups,
kernels_per_group // oc_bn,
kernel_depth // ic_bn,
k_height,
k_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda g, out_channel, in_channel, h, w, ci, co: kernel[
(out_channel * oc_bn + co + g * kernels_per_group), in_channel * ic_bn + ci, h, w
],
name="kernel_vec",
)
# convolution
oshape = (groups, batch_size, kernels_per_group // oc_bn, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_channel, out_height, out_width)
ic = te.reduce_axis((0, (kernel_depth)), name="ic")
kh = te.reduce_axis((0, k_height), name="kh")
kw = te.reduce_axis((0, k_width), name="kw")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda g, n, oc_chunk, oh, ow, oc_block: te.sum(
data_vec[
g,
n,
idxdiv(ic, ic_bn),
oh * stride_h + kh * dilation_h,
idxmod(ic, ic_bn),
ow * stride_w + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
g, oc_chunk, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
unpack = te.compute(
unpack_shape,
lambda n, c, h, w: conv[
idxdiv(c, kernels_per_group),
n,
idxmod(idxdiv(c, oc_bn), (kernels_per_group // oc_bn)),
h,
w,
idxmod(idxmod(c, oc_bn), kernels_per_group),
].astype(out_dtype),
name="output_unpack",
tag="group_conv2d_nchw",
)
return unpack
@autotvm.register_topi_schedule("group_conv2d_nchw.x86")
def schedule_group_conv2d_nchwc(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "group_conv2d_nchw" in op.tag:
output = op.output(0)
if "tile_ic" not in cfg:
return
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_gspc_nchw(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_gspc_nchw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
"""Schedule GSPC"""
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
_, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if (
data_pad is not None
and isinstance(data_pad.op, tvm.te.ComputeOp)
and "pad" in data_pad.op.tag
):
s[A0].compute_inline()
groups, batch, ic_chunk, ih, ic_block, _ = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
groups, oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(groups, oc_chunk, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, _, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, oh)
s[C].vectorize(oc_block)
groups, batch, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
parallel_axis = s[CC].fuse(groups, batch, oc_chunk, oh)
s[CC].parallel(parallel_axis)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(batch, oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
from tvm import te
from tvm.topi import tag
from tvm.tir import IntImm
from tvm.topi.generic.injective import (
schedule_injective_from_existing as schedule_injective_for_concat,
)
from ..utils import is_empty_shape
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
if len(sch[out].op.axis) >= 5:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 3:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 1:
sch[out].parallel(sch[out].op.axis[0])
# Vectorize the inner most for loop. Tiling first to get a const extent
if len(sch[out].op.axis) >= 1:
l = sch[out].op.axis[-1]
lo, li = sch[out].split(l, factor=16)
sch[out].vectorize(li)
# for 1D loop, the above split will break the parallel axis
# Need to make the outer loop parallel again
if len(sch[out].op.axis) == 1:
sch[out].parallel(lo)
return sch
def schedule_injective(outs):
"""X86 schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
for x in outs:
if not is_empty_shape(x.shape):
schedule_injective_from_existing(s, x)
return s
def schedule_concatenate(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
def vectorize(sch, tensor, vectorize_limit):
"""Internal vectorization function for concatenate."""
inner_axis = s[tensor].op.axis[len(s[tensor].op.axis) - 1]
# Check that the tensor shape is static. Otherwise skip vectorization.
if isinstance(tensor.shape[len(tensor.shape) - 1], IntImm):
inner_length = tensor.shape[len(tensor.shape) - 1].value
if inner_length <= vectorize_limit:
sch[tensor].vectorize(inner_axis)
else:
split_factor = 1
for i in range(vectorize_limit, 1, -1):
if inner_length % i == 0:
split_factor = i
break
if split_factor > 1:
_, inner_i = sch[tensor].split(inner_axis, split_factor)
sch[tensor].vectorize(inner_i)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
x = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
if len(s[x].op.axis) >= 5:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])
vectorize(s, x, 64)
s[x].parallel(fused)
elif len(s[x].op.axis) >= 3:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])
s[x].parallel(fused)
else:
s[x].parallel(s[x].op.axis[0])
return s
def schedule_concatenate_cpu(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_for_concat(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/math_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Legalization transforms for math operations on x86"""
import logging
from tvm import relay
from ..math import erf_legalize
logger = logging.getLogger("topi")
@erf_legalize.register("cpu")
def _erf_legalize(attrs, inputs, arg_types):
"""Legalizes ERF op if needed.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Extract types and expressions.
data = inputs[0]
data_tensor = arg_types[0]
# Check if the input type is supported.
data_dtype = data_tensor.dtype
# If input is not fp32, we must cast to it.
if data_dtype != "float32":
data = relay.cast(data, "float32")
output = relay.erf(data)
return relay.cast(output, data_dtype)
# Otherwise do nothing.
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable
"""x86 nn operators"""
from tvm import te
from ..utils import traverse_inline
from .injective import schedule_injective_from_existing
def _schedule_softmax(softmax_op, s, outs):
op_tag = softmax_op.tag
if op_tag == "softmax_output":
exp = softmax_op.input_tensors[0]
expsum = softmax_op.input_tensors[1]
max_elem = s[exp].op.input_tensors[1]
delta = None
axis = int(softmax_op.attrs["axis"])
elif op_tag == "fast_softmax_output":
exp = softmax_op.input_tensors[0]
expsum = softmax_op.input_tensors[1]
delta = s[exp].op.input_tensors[0]
max_elem = s[delta].op.input_tensors[1]
axis = int(softmax_op.attrs["axis"])
elif op_tag == "log_softmax_output":
exp = None
delta = None
max_elem = softmax_op.input_tensors[1]
expsum = softmax_op.input_tensors[2]
axis = int(softmax_op.attrs["axis"])
else:
raise ValueError(
"Tag is expected to be softmax_output or log_softmax_output. \
Got {0}".format(
op_tag
)
)
output = outs[0]
def _schedule(output_op, softmax_op):
# only parallelize outer dimensions up to axis
outer_axes = [output_op.axis[i] for i in range(0, axis)]
fused_outer_axes = s[output_op].fuse(*outer_axes)
s[output_op].parallel(fused_outer_axes)
if softmax_op != output_op:
# fuse softmax output with following elemwise ops.
s[softmax_op].compute_at(s[output_op], fused_outer_axes)
# move computations with the same outer dimensions under the same root
s[max_elem].compute_at(s[output_op], fused_outer_axes)
s[expsum].compute_at(s[output_op], fused_outer_axes)
if delta is not None:
s[exp].compute_inline()
s[delta].compute_inline()
if exp is not None:
s[exp].compute_at(s[output_op], fused_outer_axes)
if list(output.shape) == list(softmax_op.output(0).shape):
_schedule(output.op, softmax_op)
else:
# This case can happen, for example, if the 4D input to softmax
# is in the NCHW layout while the fused elemwise op takes the NCHWc layout.
# Since we parallelize over outer axes up to the "axis" parameter of softmax,
# softmax and the fused op need to be in the same layout if we want to
# fuse them under the same parallel loop.
# This case can be removed if softmax supported AlterLayout.
schedule_injective_from_existing(s, output)
_schedule(softmax_op, softmax_op)
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "softmax" in op.tag:
_schedule_softmax(op, s, outs)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_batch_norm(outs):
"""Schedule for batch_norm
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_norm
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
s = te.create_schedule([x.op for x in outs])
# only parallelize outer dimensions up to axis
output_op = outs[0].op
axis = output_op.axis
outer_axes = [output_op.axis[i] for i in range(0, len(axis) - 1)]
fused_outer_axes = s[output_op].fuse(*outer_axes)
s[output_op].parallel(fused_outer_axes)
# when scale or center is enabled
if "divide" not in output_op.name:
div = output_op.input_tensors[0]
substract = s[div].op.input_tensors[0]
s[div].compute_inline()
s[substract].compute_inline()
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Schedule for pooling operators"""
from tvm import te
from .. import tag
def _parallel_sch(sch, oshape, do_vectorize=False):
def vectorize(fused_axis, num_parallel_axis, vectorize_limit=64):
"""Internal vectorization utility function."""
reorder_axis = [fused_axis]
for i in range(num_parallel_axis, len(sch.op.axis) - 1):
reorder_axis.append(sch.op.axis[i])
k = sch.op.reduce_axis
fuse_k = sch.fuse(*k)
c = sch.op.axis[len(sch.op.axis) - 1]
reorder_axis += [fuse_k, c]
sch.reorder(*reorder_axis)
inner_length = oshape[len(oshape) - 1].value
if inner_length <= vectorize_limit:
sch.vectorize(c)
else:
split_factor = 1
for i in range(vectorize_limit, 1, -1):
if inner_length % i == 0:
split_factor = i
break
if split_factor > 1:
_, c_i = sch.split(c, split_factor)
sch.vectorize(c_i)
if len(sch.op.axis) >= 5:
fused = sch.fuse(sch.op.axis[0], sch.op.axis[1], sch.op.axis[2])
if do_vectorize:
vectorize(fused, 3)
elif len(sch.op.axis) >= 3:
fused = sch.fuse(sch.op.axis[0], sch.op.axis[1])
if do_vectorize:
vectorize(fused, 2)
else:
sch.parallel(sch.op.axis[0])
return
sch.parallel(fused)
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, te.tensor.ComputeOp):
s[PaddedInput].compute_inline()
do_vectorize = layout[-1] not in "DHWdhw"
_parallel_sch(s[Pool], outs[0].shape, do_vectorize)
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
# Average pool accumulation and division happens in different for loops (#3607).
# To ensure good parallel support, apply multi-threading on the second loop.
if OP != outs[0].op:
output = outs[0]
output_fused = s[output].fuse(output.op.axis[0], output.op.axis[1])
s[output].parallel(output_fused)
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_injective(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("adaptive_pool"):
if OP != outs[0].op:
output = outs[0]
output_fused = s[output].fuse(output.op.axis[0], output.op.axis[1])
s[output].parallel(output_fused)
Pool = OP.output(0)
_parallel_sch(s[Pool], outs[0].shape)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
import tvm
from tvm import te
from .injective import schedule_injective_from_existing
from .. import tag
from ..utils import get_const_tuple
def _schedule_reduce(sch, op, is_idx_reduce=False):
if is_idx_reduce:
real_out = op.output(0)
fused = sch[real_out].fuse(*sch[real_out].op.axis)
out = op.input_tensors[0]
else:
out = op.output(0)
const_shape = True
out_shape = get_const_tuple(out.shape)
for d in out_shape:
if not isinstance(d, int):
const_shape = False
break
if const_shape:
naxes = len(sch[out].op.axis)
parallelism = 1
fuse_axes = []
# We choose a heuristic number 128 to limit the maximum parallelism
while len(fuse_axes) < naxes and parallelism < 128:
ivar = sch[out].op.axis[len(fuse_axes)]
parallelism *= int(ivar.dom.extent)
fuse_axes.append(ivar)
fused = sch[out].fuse(*fuse_axes)
sch[out].parallel(fused)
else:
if len(sch[out].op.axis) >= 5:
# avoid too many parallelism
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
else:
fused = sch[out].fuse(*sch[out].op.axis)
sch[out].parallel(fused)
def schedule_reduce(outs):
"""X86 schedule for reduction op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
sch = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.te.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
schedule_injective_from_existing(sch, operator)
for tensor in operator.input_tensors:
traverse_after_reduce(tensor.op)
elif operator.tag == "comm_reduce":
_schedule_reduce(sch, operator, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == "comm_reduce_idx":
_schedule_reduce(sch, operator, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.te.PlaceholderOp):
pass
else:
raise RuntimeError("Unsupported operator: %s (tag: %s)" % (operator, operator.tag))
scheduled_ops.append(operator)
traverse_after_reduce(outs[0].op)
return sch
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/roi_align.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable, too-many-nested-blocks, too-many-branches, too-many-statements
"""Non-maximum suppression operator for intel cpu"""
import math
import tvm
from tvm.te import hybrid
from ..tensor import full
from ..utils import get_const_tuple
@hybrid.script
def roi_align_nchw_ir(
data, rois, num_rois, w_pc, pos_pc, pooled_size, spatial_scale, sample_ratio, mode
):
"""Hybrid routing fo ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor or numpy NDArray
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
num_rois : tvm.tir.IntImm or tvm.tir.Var
Number of roi. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
w_pc : tvm.te.Tensor or numpy NDArray
3-D weight pre-calculation buffer
pos_pc : tvm.te.Tensor or numpy NDArray
3-D position pre-calculation buffer
pooled_size : tvm ConsExpr
[out_height, out_width]
spatial_scale : tvm.tir.const
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : tvm.tir.const
Sampling ratio of ROI align, using adaptive size by default.
mode : tvm.tir.const
Mode of RoiAlign. A value of 0 corrensponds to b'avg', while a value of 1 corresponds to
b'max'.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
channels = data.shape[1]
height = data.shape[2]
width = data.shape[3]
pooled_size_h = pooled_size[0]
pooled_size_w = pooled_size[1]
output = output_tensor((num_rois, channels, pooled_size_h, pooled_size_w), data.dtype)
for n in parallel(num_rois):
roi_batch_index = int32(rois[n, 0])
roi_start_w = rois[n, 1] * spatial_scale
roi_start_h = rois[n, 2] * spatial_scale
roi_end_w = rois[n, 3] * spatial_scale
roi_end_h = rois[n, 4] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
roi_bin_grid_h = sample_ratio
roi_bin_grid_w = roi_bin_grid_h
rounded_bin_h = int32(bin_h) * 1.0
rounded_bin_w = int32(bin_w) * 1.0
if sample_ratio <= 0:
# Cannot use ceil function since hybrid script
# doesn't support Call as indexing
roi_bin_grid_h = int32(bin_h)
roi_bin_grid_w = int32(bin_w)
if rounded_bin_h < bin_h:
roi_bin_grid_h += 1
if rounded_bin_w < bin_w:
roi_bin_grid_w += 1
count = roi_bin_grid_h * roi_bin_grid_w
# Pre-calculate indices and weights shared by all channels.
# This is the key point of optimization.
pre_calc_index = 0
iy_upper = roi_bin_grid_h
ix_upper = roi_bin_grid_w
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
for iy in range(iy_upper):
yy = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
for ix in range(ix_upper):
xx = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
x = xx
y = yy
if y < -1.0 or y > height or x < -1.0 or x > width:
for i in range(4):
w_pc[n, pre_calc_index, i] = 0.0
pos_pc[n, pre_calc_index, i] = 0
else:
if y < 0.0:
y = 0.0
if x < 0.0:
x = 0.0
y_low = int32(y)
x_low = int32(x)
x_high = x_low + 1
y_high = y_low + 1
if y_low >= height - 1:
y_high = height - 1
y_low = y_high
y = float32(y_low)
if x_low >= width - 1:
x_high = width - 1
x_low = x_high
x = float32(x_low)
ly = y - y_low
lx = x - x_low
hy = 1.0 - ly
hx = 1.0 - lx
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
pos_pc[n, pre_calc_index, 0] = x_low
pos_pc[n, pre_calc_index, 1] = x_high
pos_pc[n, pre_calc_index, 2] = y_low
pos_pc[n, pre_calc_index, 3] = y_high
w_pc[n, pre_calc_index, 0] = w1
w_pc[n, pre_calc_index, 1] = w2
w_pc[n, pre_calc_index, 2] = w3
w_pc[n, pre_calc_index, 3] = w4
pre_calc_index += 1
for c in range(channels):
pre_calc_index = 0
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
output_val = 0.0 # Avg mode
if mode == 1: # Max mode
output_val = ninf("float32")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
bilinear_val = (
w_pc[n, pre_calc_index, 0]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 1]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 1],
]
+ w_pc[n, pre_calc_index, 2]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 3]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 1],
]
)
pre_calc_index += 1
if mode == 0: # Avg mode
output_val += bilinear_val / count
if mode == 1: # Max mode
output_val = max(output_val, bilinear_val)
output[n, c, ph, pw] = output_val
return output
def roi_align_nchw(data, rois, pooled_size, spatial_scale, mode, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
mode : str
Mode of RoiAlign. Should be b'max' or b'avg'.
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
if not isinstance(pooled_size, (tuple, list)):
pooled_size = (pooled_size, pooled_size)
# Pre-allocate intermediate buffer
if sample_ratio > 0:
max_roi_bin_grid_w = max_roi_bin_grid_h = sample_ratio
else:
_, _, height, width = get_const_tuple(data.shape)
max_roi_bin_grid_h = math.ceil(height / pooled_size[0])
max_roi_bin_grid_w = math.ceil(width / pooled_size[1])
num_rois = rois.shape[0]
max_pc_shape = (
rois.shape[0],
max_roi_bin_grid_h * max_roi_bin_grid_w * pooled_size[0] * pooled_size[1],
4,
)
w_pc_buffer = full(max_pc_shape, data.dtype, 0)
pos_pc_buffer = full(max_pc_shape, "int32", 0)
pooled_size = tvm.runtime.convert(pooled_size)
spatial_scale = tvm.tir.const(spatial_scale, "float32")
sample_ratio = tvm.tir.const(sample_ratio, "int32")
if mode in (b"avg", 0):
mode = tvm.tir.const(0, dtype="float32")
elif mode in (b"max", 1):
mode = tvm.tir.const(1, dtype="float32")
else:
raise ValueError(mode, "Value %s passed in for mode not supported", mode)
return roi_align_nchw_ir(
data,
rois,
num_rois,
w_pc_buffer,
pos_pc_buffer,
pooled_size,
spatial_scale,
sample_ratio,
mode,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Scatter operators for x86"""
import tvm
from tvm import te
from ..scatter import _verify_scatter_nd_inputs
def scatter_nd(data, indices, updates, mode):
"""Scatter elements from a n-dimension array.
Given updates with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}), indices with shape
(M, Y_0, ..., Y_{K-1}), and output copied from data with shape (X_0, X_1, ..., X_{N-1}),
scatter_nd computes
.. code-block::
output[indices[0, y_0, ..., y_{K-1}],
...,
indices[M-1, y_0, ..., y_{K-1}],
x_M,
...,
x_{N-1}
] = f(output[...], updates[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}])
where the update function f is determinted by the mode.
Parameters
----------
data : tvm.te.Tensor
The source array.
indices : tvm.te.Tensor
The indices of the values to extract.
updates : tvm.te.Tensor
The updates to apply at the Indices
mode : string
The update mode for the algorithm, either "update" or "add"
If update, the update values will replace the input data
If add, the update values will be added to the input data
Returns
-------
ret : tvm.te.Tensor
"""
_verify_scatter_nd_inputs(data, indices, updates)
def gen_ir(data_ptr, indices_ptr, updates_ptr, out_ptr):
# pylint: disable=invalid-name
ib = tvm.tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
indices = ib.buffer_ptr(indices_ptr)
updates = ib.buffer_ptr(updates_ptr)
out = ib.buffer_ptr(out_ptr)
# We combine all the indices dimensions but the first one into a single
# dimension so we can iterate it in single loop instead of an arbitrary
# number of loops. We do the same thing for all the update dimensions.
fused_indices_dimension = 1
for i in indices_ptr.shape[1:]:
fused_indices_dimension *= i
fused_updates_dimension = 1
for i in updates_ptr.shape[len(indices_ptr.shape) - 1 :]:
fused_updates_dimension *= i
fused_shape = 1
for i in data_ptr.shape:
fused_shape *= i
with ib.for_range(0, fused_shape) as i:
out[i] = data[i]
with ib.for_range(0, fused_indices_dimension) as i:
with ib.for_range(0, fused_updates_dimension, kind="parallel") as j:
offset = fused_updates_dimension
index = j # This is x_M, .. x_{N-1} part of the index into out.
# Build up the indices[0, y_0, .. y_{K-1}], .. indices[M-1, y_0, .. y_{K-1}] part
# of the index into out.
for l in reversed(range(indices_ptr.shape[0].value)):
# indices[i * l * fused_indices_dimension] = indices[l, y_0, ... y_{k-1}]
index += offset * indices[i + l * fused_indices_dimension]
offset *= data_ptr.shape[l]
if mode == "update":
out[index] = updates[i * fused_updates_dimension + j]
elif mode == "add":
out[index] += updates[i * fused_updates_dimension + j]
else:
raise NotImplementedError("scatter_nd mode not in [update, add]:", mode)
return ib.get()
out_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "out_buf")
return te.extern(
[data.shape],
[data, indices, updates],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_nd_x86",
tag="scatter_nd_x86",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/x86/sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""sparse_dense schedule on x86"""
from functools import partial, reduce
from tvm import te, tir, autotvm
from ..transform import reshape
from ..utils import traverse_inline, get_const_int
from .utils import get_simd_32bit_lanes
def schedule_sparse_dense(outs):
"""Create schedule for sparse dense"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
simd_width = get_simd_32bit_lanes()
if op.tag == "sparse_dense_sp_lhs_csrmm" or op.tag == "sparse_dense_sp_lhs_csrmm":
(y_o, y_i) = s[op].split(s[op].op.axis[1], 2)
fused = s[op].fuse(s[op].op.axis[0], y_o)
s[op].parallel(fused)
s[op].vectorize(y_i)
elif op.tag == "sparse_dense_sp_rhs_bsrmm" or op.tag == "sparse_dense_sp_rhs_bsrmm":
y_bsrmm = op.input_tensors[0]
assert (
y_bsrmm.op.tag == "sparse_dense_sp_rhs_bsrmm_block"
or y_bsrmm.op.tag == "sparse_dense_sp_lhs_bsrmm_block"
)
y_reshape = op
(m, num_blocks, b_r) = s[y_bsrmm].op.axis
bs_r = get_const_int(b_r.dom.extent)
(elem_idx, c) = s[y_bsrmm].op.reduce_axis
s[y_bsrmm].reorder(num_blocks, m, elem_idx, b_r, c)
s[y_bsrmm].vectorize(b_r)
(m_o, n_o) = s[y_reshape].op.axis
(noo, noi) = s[y_reshape].split(n_o, bs_r)
s[y_bsrmm].compute_at(s[y_reshape], noi)
s[y_reshape].vectorize(noi)
if op != s[outs[0]].op:
(y_o, y_i) = s[outs[0].op].split(s[outs[0].op].op.axis[1], 2 * simd_width)
s[y_reshape].compute_at(s[outs[0]], y_o)
s[outs[0].op].parallel(y_o)
s[outs[0].op].vectorize(y_i)
else:
m_o_noo = s[y_reshape].fuse(m_o, noo)
s[y_reshape].parallel(m_o_noo)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3x3_spNHWC.x86")
def spconv2d_3x3_nhwc(cfg, data, wdat, wind, wptr, layout="NHWC"):
"""Sparse Conv2d 3x3 compute (NHWC)."""
assert layout == "NHWC"
nsamples, imh, imw, chanin = [i.value for i in data.shape]
nelems, bsrr, bsrc = [i.value for i in wdat.shape]
chanout = (wptr.shape[0].value - 1) * bsrr
imglen, chanlen = nsamples * imh * imw, 9 * chanin
cfg.define_split("tile_y", imglen, num_outputs=3)
cfg.define_split("tile_x", chanout // bsrr, num_outputs=2)
cfg.add_flop(imglen * (nelems * bsrc * bsrr * 2 - chanout))
if cfg.is_fallback:
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 160, 8])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 4])
idxsplit = lambda x, y: reduce(lambda a, b: a[:-1] + [a[-1] % b, a[-1] // b], y, [x])
@partial(te.compute, (imglen, chanlen), name="Im2Col")
def im2col(row, col):
j_w, j_h, j_n = idxsplit(row, [imw, imh])
j_c, k_w, k_h = idxsplit(col, [chanin, 3])
i_h, i_w = j_h + k_h - 1, j_w + k_w - 1
return tir.if_then_else(
tir.all(i_h >= 0, i_h < imh, i_w >= 0, i_w < imw), data[j_n, i_h, i_w, j_c], 0
)
@partial(te.compute, (imglen, chanout // bsrr, bsrr, bsrc), name="CC")
def matmul(drow, wrow, brow, bcol):
row_start, row_end = wptr[wrow], wptr[wrow + 1]
elem_idx = te.reduce_axis((0, row_end - row_start), name="elem_idx")
elem = row_start + elem_idx
return te.sum(
im2col[drow, wind[elem] * bsrc + bcol] * wdat[elem, brow, bcol], axis=elem_idx
)
sum_bsrc = te.reduce_axis((0, bsrc), name="k")
ret = te.compute(
(imglen, chanout),
lambda y, x: te.sum(matmul[y, x // bsrr, x % bsrr, sum_bsrc], axis=sum_bsrc),
name="C",
tag="conv3x3_spNHWC",
)
return reshape(ret, (nsamples, imh, imw, chanout))
@autotvm.register_topi_schedule("conv3x3_spNHWC.x86")
def schedule_spconv2d_3x3_nhwc(cfg, outs):
"""Sparse Conv2d 3x3 schedule (NHWC)."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3x3_spNHWC":
(matmul,) = op.input_tensors
# wptr, wind, im2col, wdat
_, _, im2col, _ = matmul.op.input_tensors
(data,) = im2col.op.input_tensors
bsrr = matmul.shape[-2].value
chanin = data.shape[-1].value
mm_y, mm_x = s[op].op.axis
y_t, y_o, y_i = cfg["tile_y"].apply(s, op, mm_y)
x_o, x_i = s[op].split(mm_x, factor=bsrr)
x_t, x_o = cfg["tile_x"].apply(s, op, x_o)
(sum_ax,) = s[op].op.reduce_axis
s[op].reorder(y_t, x_t, y_o, x_o, y_i, x_i, sum_ax)
s[op].unroll(sum_ax)
s[op].vectorize(x_i)
s[op].unroll(y_i)
s[matmul].compute_at(s[op], x_o)
y_i, x_i, bsrr, bsrc = s[matmul].op.axis
(sum_ax,) = s[matmul].op.reduce_axis
s[matmul].reorder(x_i, sum_ax, y_i, bsrr, bsrc)
s[matmul].unroll(bsrc)
s[matmul].vectorize(bsrr)
s[matmul].unroll(y_i)
s[im2col].compute_at(s[op], y_o)
y_i, sum_ax = s[im2col].op.axis
_, k_i = s[im2col].split(sum_ax, factor=chanin)
s[im2col].vectorize(k_i)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv3x3_spNCHW.x86")
def spconv2d_3x3_nchw(cfg, data, wdat, wind, wptr, layout="NCHW"):
"""Sparse Conv2d 3x3 compute (NCHW)."""
nsamples, chanin, imgh, imgw = [i.value for i in data.shape]
nelems, veclen, bsrc = [i.value for i in wdat.shape]
chanout = (wptr.shape[0].value - 1) * veclen
assert bsrc == 1 and layout == "NCHW"
cfg.add_flop(nsamples * imgh * imgw * (nelems * veclen * bsrc * 2 - chanout))
cfg.define_split("tile_hw", imgh * imgw, num_outputs=3)
cfg.define_split("tile_ckk", chanin * 9, num_outputs=3)
@partial(te.compute, (nsamples, chanin * 3 * 3, imgh * imgw), name="im2col")
def im2col(nsamples, ckk, imglen):
j_h, j_w = imglen // imgw, imglen % imgw
i_c, k_h, k_w = ckk // 9, ckk // 3 % 3, ckk % 3
i_h, i_w = j_h + k_h - 1, j_w + k_w - 1
return tir.if_then_else(
tir.all(i_h >= 0, i_h < imgh, i_w >= 0, i_w < imgw), data[nsamples, i_c, i_h, i_w], 0
)
@partial(
te.compute,
(nsamples, chanout // veclen, veclen, bsrc, imgh * imgw),
name="CC",
tag="conv3x3_spNCHW",
)
def matmul(nsamples, f_o, f_i, bsrk, imglen):
row_start, row_end = wptr[f_o], wptr[f_o + 1]
elem_idx = te.reduce_axis((0, row_end - row_start), name="elem_idx")
elem = row_start + elem_idx
return te.sum(
im2col[nsamples, wind[elem] * bsrc + bsrk, imglen] * wdat[elem, f_i, bsrk],
axis=elem_idx,
)
return reshape(matmul, [nsamples, chanout, imgh, imgw])
@autotvm.register_topi_schedule("conv3x3_spNCHW.x86")
def schedule_spconv2d_3x3_nchw(cfg, outs):
"""Sparse Conv2d 3x3 schedule (NCHW)."""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "conv3x3_spNCHW":
# wptr, wind, im2col, wdat
_, _, im2col, _ = op.input_tensors
n_samples, f_o, f_i, b_c, imglen = s[op].op.axis
(sum_ax,) = s[op].op.reduce_axis
hw1, hw2, hw3 = cfg["tile_hw"].apply(s, op, imglen)
s[op].reorder(n_samples, hw1, f_o, hw2, sum_ax, f_i, b_c, hw3)
s[op].unroll(f_i)
s[op].unroll(b_c)
s[op].vectorize(hw3)
s[im2col].compute_at(s[op], hw1)
n_samples, ckk, imglen = s[im2col].op.axis
ckk1, ckk2, ckk3 = cfg["tile_ckk"].apply(s, im2col, ckk)
hw2, hw3 = s[im2col].split(imglen, factor=cfg["tile_hw"].size[-1])
s[im2col].reorder(n_samples, ckk1, ckk2, hw2, ckk3, hw3)
s[im2col].unroll(ckk3)
s[im2col].vectorize(hw3)
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.