file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/topi/python/test_fifo_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for FIFO buffer"""
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
def verify_fifo_buffer(buffer_shape, data_shape, axis, dtype="float32"):
buffer = te.placeholder(buffer_shape, name="buffer", dtype=dtype)
data = te.placeholder(data_shape, name="data", dtype=dtype)
# Use memoize, pickle the test data for next time use
@memoize("topi.tests.test_fifo_buffer")
def get_ref_data():
buffer_np = np.random.uniform(size=buffer_shape).astype(dtype)
data_np = np.random.uniform(size=data_shape).astype(dtype)
# Reference implementation of FIFO queue
begin = data_np.shape[axis]
end = buffer_np.shape[axis] + data_np.shape[axis]
ndim = len(buffer_np.shape)
ss = tuple((slice(begin, end, 1) if x == axis else slice(None)) for x in range(ndim))
out_np = np.concatenate((buffer_np, data_np), axis=axis)[ss]
return (buffer_np, data_np, out_np)
# Get the test data
buffer_np, data_np, out_np = get_ref_data()
def check_device(target, dev):
print(" Running on target: {}".format(target))
with tvm.target.Target(target):
out = topi.nn.fifo_buffer(data, buffer, axis=axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
buffer_tvm = tvm.nd.array(buffer_np, device=dev)
data_tvm = tvm.nd.array(data_np, device=dev)
out_tvm = tvm.nd.empty(shape=buffer_shape, device=dev, dtype=dtype)
f = tvm.build(s, [data, buffer, out], target, name="fifo")
f(data_tvm, buffer_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_np)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_conv1d_integration():
batch_size = 1
num_channel = 1
num_filter = 1
# Note: TVM doesn't have a separate op for 1D convolution, so we use conv2d instead.
# We set height=1 to indicate that convolution is really 1D.
stride = (1, 1)
dilate = (1, 1)
padding = (0, 0)
kernel_size = (1, 3)
input_window_size = (1, 10)
inc_input_size = (1, 2)
context_size = (1, 4)
inc_output_size = (1, 2)
output_window_size = (1, 8)
num_iteration = 20
buffer_axis = 3
kernel_shape = (num_filter, num_channel, kernel_size[0], kernel_size[1])
input_window_shape = (batch_size, num_channel, input_window_size[0], input_window_size[1])
inc_input_shape = (batch_size, num_channel, inc_input_size[0], inc_input_size[1])
inc_output_shape = (batch_size, num_filter, inc_output_size[0], inc_output_size[1])
context_shape = (batch_size, num_channel, context_size[0], context_size[1])
output_window_shape = (batch_size, num_filter, output_window_size[0], output_window_size[1])
# Rule: Convolution of Tensor[context_shape] and Tensor[kernel_shape]
# produces Tensor[inc_input_shape]
dtype = "float32"
inc_input = te.placeholder(inc_input_shape, name="inc_input", dtype=dtype)
input_window = te.placeholder(input_window_shape, name="input_window", dtype=dtype)
context = te.placeholder(context_shape, name="context", dtype=dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=dtype)
inc_output = te.placeholder(inc_input_shape, name="inc_output", dtype=dtype)
output_window = te.placeholder(output_window_shape, name="output_window", dtype=dtype)
# Use memoize, pickle the test data for next time use
@memoize("topi.tests.test_fifo_buffer_conv1d_integration")
def get_data():
# Generate [num_iteration] slices of input
inc_input_np = np.random.uniform(
size=tuple([num_iteration] + list(inc_input_shape))
).astype(dtype)
input_window_np = np.zeros(input_window_shape, dtype=dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
context_np = np.zeros(context_shape, dtype=dtype)
output_window_np = np.zeros(output_window_shape, dtype=dtype)
return (inc_input_np, input_window_np, kernel_np, context_np, output_window_np)
# Get the test data
inc_input_np, input_window_np, kernel_np, context_np, output_window_np = get_data()
def check_device(target, dev):
print(" Running on target: {}".format(target))
conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(target)
with tvm.target.Target(target):
out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_context = tvm.build(s, [inc_input, context, out], target, name="update_context")
out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype)
s = schedule_conv2d_nchw([out])
conv2d_inc = tvm.build(s, [context, kernel, out], target, name="conv2d_inc")
out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_output_window = tvm.build(
s, [inc_output, output_window, out], target, name="update_output_window"
)
out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_input_window = tvm.build(
s, [inc_input, input_window, out], target, name="update_input_window"
)
out = conv2d_nchw(input_window, kernel, stride, padding, dilate, dtype)
s = schedule_conv2d_nchw([out])
conv2d = tvm.build(s, [input_window, kernel, out], target, name="conv2d")
input_window_tvm = tvm.nd.array(input_window_np, device=dev)
new_input_window_tvm = tvm.nd.empty(shape=input_window_shape, device=dev, dtype=dtype)
kernel_tvm = tvm.nd.array(kernel_np, device=dev)
context_tvm = tvm.nd.array(context_np, device=dev)
new_context_tvm = tvm.nd.empty(shape=context_shape, device=dev, dtype=dtype)
inc_output_tvm = tvm.nd.empty(shape=inc_output_shape, device=dev, dtype=dtype)
output_window_tvm = tvm.nd.array(output_window_np, device=dev)
new_output_window_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype)
output_window_ref_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype)
for i in range(num_iteration):
# Take i-th slice of inc_input_np
inc_input_tvm = tvm.nd.array(inc_input_np[i], device=dev)
# Compute new output window incrementally, using the FIFO buffer op
update_context(inc_input_tvm, context_tvm, new_context_tvm)
conv2d_inc(new_context_tvm, kernel_tvm, inc_output_tvm)
update_output_window(inc_output_tvm, output_window_tvm, new_output_window_tvm)
context_tvm = new_context_tvm
output_window_tvm = new_output_window_tvm
# Compute full input window, so that we have a baseline
update_input_window(inc_input_tvm, input_window_tvm, new_input_window_tvm)
input_window_tvm = new_input_window_tvm
conv2d(input_window_tvm, kernel_tvm, output_window_ref_tvm)
# Incrementally updating the output window should be equivalent to computing it from
# scratch using the input window
tvm.testing.assert_allclose(output_window_tvm.numpy(), output_window_ref_tvm.numpy())
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_fifo_buffer():
for ndim in [1, 2, 3, 4, 5, 6]:
for axis in range(ndim):
buffer_shape = tuple(7 for _ in range(ndim))
data_shape = tuple((2 if i == axis else 7) for i in range(ndim))
print(
"Testing FIFO buffer op: buffer_shape = {}, data_shape = {}, axis = {}".format(
buffer_shape, data_shape, axis
)
)
verify_fifo_buffer(buffer_shape, data_shape, axis)
@tvm.testing.uses_gpu
def test_conv1d_integration():
print("Testing FIFO buffer with 1D convolution")
verify_conv1d_integration()
if __name__ == "__main__":
test_fifo_buffer()
test_conv1d_integration()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for argwhere operator"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import topi
import tvm.topi.testing
_argwhere_schedule = {
"generic": topi.generic.schedule_argwhere,
"gpu": topi.cuda.schedule_argwhere,
}
_argwhere_compute = {"llvm": topi.argwhere, "cuda": topi.cuda.argwhere}
data_shape = tvm.testing.parameter(
(1,),
(100,),
(1, 1),
(5, 3),
(32, 64),
(128, 65),
(200, 500),
(6, 5, 3),
(1, 1, 1),
(1, 1, 1, 1),
(6, 4, 5, 3),
(1, 1, 1, 1, 1),
(6, 4, 5, 3, 7),
)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_argwhere(target, dev, data_shape):
dtype = "int32"
np_data = np.random.choice([0, 1, 2, 3], size=data_shape).astype(dtype)
np_out = np.argwhere(np_data)
out_shape = np_out.shape[0]
np_shape = np.ones(shape=(out_shape, len(data_shape)), dtype=dtype)
out_shape = te.placeholder(shape=(out_shape, len(data_shape)), name="out_shape", dtype=dtype)
condition = te.placeholder(shape=data_shape, name="condition", dtype=dtype)
with tvm.target.Target(target):
out = _argwhere_compute[target](out_shape, condition)
s_func = tvm.topi.testing.dispatch(target, _argwhere_schedule)
sch = s_func(out)
func = tvm.build(sch, [out_shape, condition, out], target, name="argwhere")
args = [tvm.nd.array(np_shape, dev)]
args.append(tvm.nd.array(np_data, dev))
args.append(tvm.nd.empty(out.shape, device=dev, dtype=condition.dtype))
func(*args)
np.set_printoptions(threshold=np.inf)
tvm_out = args[-1].numpy()
tvm.testing.assert_allclose(tvm_out, np_out)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_basic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
from tvm.topi import utils
def test_util():
x = tvm.tir.const(100, "int32")
assert utils.get_const_int(x) == 100
assert utils.get_const_tuple((x, x)) == (100, 100)
def test_ewise():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
def test_apply(func, name):
B = func(A)
assert tuple(B.shape) == tuple(A.shape)
assert B.op.body[0].op.name == "tir." + name
test_apply(topi.exp, "exp")
test_apply(topi.erf, "erf")
test_apply(topi.tanh, "tanh")
test_apply(topi.sigmoid, "sigmoid")
test_apply(topi.log, "log")
test_apply(topi.sqrt, "sqrt")
test_apply(topi.rsqrt, "rsqrt")
test_apply(topi.sin, "sin")
test_apply(topi.cos, "cos")
test_apply(topi.tan, "tan")
test_apply(topi.atan, "atan")
if __name__ == "__main__":
test_util()
test_ewise()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch_matmul operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
from common import Int8Fallback
_batch_matmul_implement = {
"generic": (topi.nn.batch_matmul, topi.generic.schedule_batch_matmul),
"cpu": (topi.x86.batch_matmul, topi.x86.schedule_batch_matmul),
"gpu": (topi.cuda.batch_matmul, topi.cuda.schedule_batch_matmul),
}
def verify_batch_matmul(x_batch, y_batch, M, N, K, dynamic=False, debug=False):
if not dynamic:
x = te.placeholder((x_batch, M, K), name="x")
y = te.placeholder((y_batch, N, K), name="y")
dtype = x.dtype
else:
assert x_batch == y_batch or x_batch == 1 or y_batch == 1
batch_size = max(x_batch, y_batch)
dynamic_batch_size = te.var("dynamic_batch_size")
dynamic_M = te.var("dynamic_M")
dynamic_N = te.var("dynamic_N")
dynamic_K = te.var("dynamic_K")
x = te.placeholder((dynamic_batch_size, dynamic_M, dynamic_K), name="x")
y = te.placeholder((dynamic_batch_size, dynamic_N, dynamic_K), name="y")
dtype = x.dtype
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul")
def get_ref_data():
a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _batch_matmul_implement)
out = fcompute(x, y)
if not dynamic:
s = fschedule([out])
out_shape = out.shape
else:
s = te.create_schedule(out.op)
out_shape = (batch_size, M, N)
if debug:
print(tvm.lower(s, [x, y, out], simple_mode=True))
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out_shape), dtype=dtype), dev)
f = tvm.build(s, [x, y, out], target, name="dense")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
target_kind = tvm.target.Target(target).kind.name
if dynamic and target_kind in ["cuda", "nvptx", "vulkan", "opencl"]:
print("Dynamic batch matmul test is skippped on %s" % target)
continue
check_device(target, dev)
def verify_batch_matmul_int8(x_batch, y_batch, M, N, K):
dtype = "int8"
out_dtype = "int32"
assert x_batch == y_batch or x_batch == 1 or y_batch == 1
x = te.placeholder((x_batch, M, K), name="x", dtype=dtype)
y = te.placeholder((y_batch, N, K), name="y", dtype=dtype)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype=out_dtype)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if device == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
out = topi.cuda.batch_matmul_int8(x, y, None, out_dtype)
s = topi.cuda.schedule_batch_matmul_int8([out])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev)
f = tvm.build(s, [x, y, out], device, name="batch_matmul_int8")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_batch_matmul():
verify_batch_matmul(1, 1, 16, 16, 32)
verify_batch_matmul(5, 5, 16, 16, 32)
verify_batch_matmul(5, 5, 16, 20, 32)
verify_batch_matmul(30, 30, 16, 20, 32)
# Test batch broadcasting.
verify_batch_matmul(1, 5, 16, 16, 32)
verify_batch_matmul(5, 1, 16, 16, 32)
# Test dynamic batch
verify_batch_matmul(1, 1, 16, 16, 32, dynamic=True)
verify_batch_matmul(5, 5, 16, 16, 32, dynamic=True)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_batch_matmul_int8():
with Int8Fallback():
verify_batch_matmul_int8(1, 1, 2, 3, 1)
verify_batch_matmul_int8(1, 1, 16, 24, 32)
verify_batch_matmul_int8(5, 5, 24, 16, 32)
verify_batch_matmul_int8(30, 30, 16, 20, 32)
verify_batch_matmul_int8(1, 5, 16, 16, 32)
verify_batch_matmul_int8(5, 1, 16, 16, 32)
if __name__ == "__main__":
test_batch_matmul()
test_batch_matmul_int8()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_batch_matmul_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch_matmul operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
_batch_matmul_implement = {
"gpu": (topi.cuda.batch_matmul_tensorcore, topi.cuda.schedule_batch_matmul_tensorcore),
}
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
B, K, L = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(B, K, L // 8), dtype=np.int32)
for b in range(B):
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[b, k, l] = a_int4[b, k, l] | (
(a_int32[b, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
def verify_batch_matmul(x_batch, y_batch, M, N, K, dtype):
x = te.placeholder((x_batch, M, K), name="x", dtype=dtype)
y = te.placeholder((y_batch, N, K), name="y", dtype=dtype)
assert dtype in ["int4", "int8", "float16"]
out_dtype = "float32"
if dtype in ["int8", "int4"]:
out_dtype = "int32"
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul_tensorcore")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=(x_batch, M, K))
b_np = np.random.randint(low=-8, high=7, size=(y_batch, N, K))
elif dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype)
else:
a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
b_np = convert_int32_into_int4(b_np)
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement)
out = fcompute(x, y, None, out_dtype)
s = fschedule([out])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev)
f = tvm.build(s, [x, y, out], device, name="batch_matmul")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
check_device("cuda")
@tvm.testing.requires_tensorcore
def test_batch_matmul():
for dtype in ["float16", "int8", "int4"]:
verify_batch_matmul(1, 1, 16, 16, 32, dtype)
verify_batch_matmul(5, 5, 16, 16, 32, dtype)
verify_batch_matmul(5, 5, 16, 32, 32, dtype)
verify_batch_matmul(30, 30, 16, 32, 32, dtype)
if __name__ == "__main__":
test_batch_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_batch_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for the batch_norm operator."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
_DEVICE = "llvm"
_BATCH_NORM_IMPLEMENT = {
"generic": (topi.nn.batch_norm, topi.generic.schedule_batch_norm),
"cpu": (topi.nn.batch_norm, topi.x86.schedule_batch_norm),
}
@pytest.mark.parametrize(
"shape, axis, epsilon, center, scale",
[
((1,), 0, 0.1, True, True),
((2, 3), 0, 0.1, True, True),
((1, 2, 4), 0, 0.1, True, True),
((1, 2, 3, 4), 0, 0.001, False, False),
((2, 3, 4, 1), 1, 0.01, False, True),
((3, 4, 1, 2), 2, 0.1, True, False),
((4, 1, 2, 3), 3, 1.0, True, True),
((1, 2, 4, 4, 5), 0, 0.1, True, True),
],
)
def test_batch_norm(shape, axis, epsilon, center, scale):
x_np = np.random.random(shape).astype("float32")
gamma_np = np.random.random(shape[axis]).astype("float32")
beta_np = np.random.random(shape[axis]).astype("float32")
moving_mean_np = np.random.random(shape[axis]).astype("float32")
moving_var_np = np.random.random(shape[axis]).astype("float32")
out_x_np, out_moving_mean_np, out_moving_var_np = tvm.topi.testing.batch_norm(
x_np, gamma_np, beta_np, moving_mean_np, moving_var_np, axis, epsilon, center, scale
)
x_te = te.placeholder(shape, name="x", dtype="float32")
gamma_te = te.placeholder((shape[axis],), name="gamma", dtype="float32")
beta_te = te.placeholder((shape[axis],), name="beta", dtype="float32")
moving_mean_te = te.placeholder((shape[axis],), name="moving_mean", dtype="float32")
moving_var_te = te.placeholder((shape[axis],), name="moving_var", dtype="float32")
with tvm.target.Target(_DEVICE):
fcompute, fschedule = tvm.topi.testing.dispatch(_DEVICE, _BATCH_NORM_IMPLEMENT)
out_x, out_moving_mean, out_moving_var = fcompute(
x_te, gamma_te, beta_te, moving_mean_te, moving_var_te, axis, epsilon, center, scale
)
s = fschedule([out_x, out_moving_mean, out_moving_var])
dev = tvm.device(_DEVICE, 0)
x_tvm = tvm.nd.array(x_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
moving_mean_tvm = tvm.nd.array(moving_mean_np, dev)
moving_var_tvm = tvm.nd.array(moving_var_np, dev)
out_x_tvm = tvm.nd.array(np.zeros(shape, dtype=out_x.dtype), dev)
out_moving_mean_tvm = tvm.nd.array(
np.zeros((shape[axis],), dtype=out_moving_mean.dtype), dev
)
out_moving_var_tvm = tvm.nd.array(np.zeros((shape[axis],), dtype=out_moving_var.dtype), dev)
f = tvm.build(
s,
[
x_te,
gamma_te,
beta_te,
moving_mean_te,
moving_var_te,
out_x,
out_moving_mean,
out_moving_var,
],
_DEVICE,
)
f(
x_tvm,
gamma_tvm,
beta_tvm,
moving_mean_tvm,
moving_var_tvm,
out_x_tvm,
out_moving_mean_tvm,
out_moving_var_tvm,
)
tvm.testing.assert_allclose(out_x_tvm.numpy(), out_x_np, rtol=1e-3)
tvm.testing.assert_allclose(out_moving_mean_tvm.numpy(), out_moving_mean_np, rtol=1e-3)
tvm.testing.assert_allclose(out_moving_var_tvm.numpy(), out_moving_var_np, rtol=1e-3)
if __name__ == "__main__":
test_batch_norm()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_batch_to_space_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch to space"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_batch_to_space_nd(input_shape, block_shape, crop_begin_list, crop_end_list):
out_shape = []
out_shape.append(int((input_shape[0] / np.prod(block_shape))))
for i in range(1, len(block_shape) + 1):
crop = crop_begin_list[i - 1] + crop_end_list[i - 1]
out_shape.append(input_shape[i] * block_shape[i - 1] - crop)
for i in range(len(block_shape) + 1, len(input_shape)):
out_shape.append(input_shape[i])
A = te.placeholder(input_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
B = topi.nn.batch_to_space_nd(A, block_shape, crop_begin_list, crop_end_list)
b_np = tvm.topi.testing.batch_to_space_nd_python(
a_np, block_shape, crop_begin_list, crop_end_list
)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_batch_to_space():
# Without crops
verify_batch_to_space_nd([4, 1, 1, 1], [2, 2], [0, 0], [0, 0])
# With crops
verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [0, 2], [0, 0])
verify_batch_to_space_nd([18, 2, 1, 2], [2, 3], [1, 1], [0, 0])
verify_batch_to_space_nd([20, 5, 8, 7], [2, 2], [1, 1], [1, 1])
if __name__ == "__main__":
test_batch_to_space()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_conv2d_nchw(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
):
in_height = in_width = in_size
input_dtype = "uint32"
out_dtype = "int32"
with tvm.target.Target("llvm"):
A = te.placeholder((batch, in_channel, in_height, in_width), dtype=input_dtype, name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_dtype, name="W")
B = topi.x86.bitserial_conv2d_nchw(
A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar
)
s = topi.x86.schedule_bitserial_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nchw")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def verify_bitserial_conv2d_nhwc(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
):
in_height = in_width = in_size
input_dtype = "uint32"
out_dtype = "int32"
with tvm.target.Target("llvm"):
A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_dtype, name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_dtype, name="W")
B = topi.x86.bitserial_conv2d_nhwc(
A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar
)
s = topi.x86.schedule_bitserial_conv2d_nhwc([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nhwc")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(
out_dtype
)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
if __name__ == "__main__":
test_bitserial_conv2d()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
def generate_quantized_np(shape, bits, out_dtype):
np.random.seed(0)
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
# Verify that certain special instructions from the tensorize pass exist
def verify_bitserial_conv2d_nhwc(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
use_relu=False,
):
in_height = in_width = in_size
input_type = "uint32"
out_dtype = "int16"
device = "llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon"
with tvm.target.Target(device):
A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name="W")
B = topi.arm_cpu.bitserial_conv2d_nhwc(
A, W, stride, padding, activation_bits, weight_bits, "uint8", out_dtype, unipolar
)
if use_relu:
B = topi.nn.relu(B)
s = topi.arm_cpu.schedule_bitserial_conv2d_nhwc([B])
func = tvm.build(s, [A, W, B], device)
assembly = func.get_source("asm")
matches = re.findall("vpadal", assembly)
assert len(matches) > 0
matches = re.findall("vcnt", assembly)
assert len(matches) > 0
matches = re.findall("vpadd", assembly)
assert len(matches) > 0
dev = tvm.device(device, 0)
if "arm" not in os.uname()[4]:
print("Skipped running code, not an arm device")
return
print("Running on target: %s" % device)
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(
out_dtype
)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
np.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True, True)
if __name__ == "__main__":
test_bitserial_conv2d()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for bitserial_dense operator"""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
_bitserial_dense_implement = {
"generic": (topi.nn.bitserial_dense, topi.generic.schedule_bitserial_dense),
"cpu": (topi.x86.bitserial_dense, topi.x86.schedule_bitserial_dense),
"arm_cpu": (topi.arm_cpu.bitserial_dense, topi.arm_cpu.schedule_bitserial_dense),
}
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits, unipolar):
out_dtype = "int16"
def get_ref_data(a_shape, b_shape, input_dtype):
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype)
if unipolar:
b_ = np.copy(b_np).astype(out_dtype)
for x in np.nditer(b_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
c_np = np.dot(a_np, b_.T)
else:
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
for target in ["llvm", "llvm -device=arm_cpu"]:
if "arm_cpu" in target and "arm" not in os.uname()[4]:
print("Skipped running code, not an arm device")
continue
input_dtype = "uint8" if "arm_cpu" in target else "uint32"
A = te.placeholder((batch, in_dim), dtype=input_dtype, name="A")
B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name="B")
fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement)
C = fcompute(A, B, activation_bits, weight_bits, input_dtype, out_dtype, unipolar)
s = fschedule([C])
a_shape = get_const_tuple(A.shape)
b_shape = get_const_tuple(B.shape)
a_np, b_np, c_np = get_ref_data(a_shape, b_shape, input_dtype)
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
def test_bitserial_dense():
verify_bitserial_dense(1, 1024, 1000, 1, 1, True)
verify_bitserial_dense(1, 1024, 1000, 2, 1, True)
verify_bitserial_dense(1, 1024, 1000, 1, 1, False)
verify_bitserial_dense(1, 1024, 1000, 2, 1, False)
if __name__ == "__main__":
test_bitserial_dense()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_bnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for binary neural network operators."""
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_binary_dense(batch, in_dim, out_dim):
A = te.placeholder((batch, in_dim), name="A")
B = te.placeholder((out_dim, in_dim), name="B")
bnn_A = topi.nn.binarize_pack(A)
bnn_B = topi.nn.binarize_pack(B)
# binary dense
bnn_A1 = te.placeholder(bnn_A.shape, dtype=bnn_A.dtype)
bnn_B1 = te.placeholder(bnn_B.shape, dtype=bnn_B.dtype)
bnn_C = topi.nn.binary_dense(bnn_A1, bnn_B1)
# schedule
with tvm.target.Target("llvm"):
s1 = topi.x86.schedule_binarize_pack(bnn_A)
s2 = topi.x86.schedule_binarize_pack(bnn_B)
s3 = topi.x86.schedule_binary_dense(bnn_C)
dtype = A.dtype
@memoize("topi.tests.test_topi_binary_dense")
def get_ref_data():
# generate random matrix of +1 or -1 value
a_np = (np.random.randint(2, size=(batch, in_dim)) * 2 - 1).astype(dtype)
b_np = (np.random.randint(2, size=(out_dim, in_dim)) * 2 - 1).astype(dtype)
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
a_np, b_np, c_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
bnn_a = tvm.nd.array(np.zeros(get_const_tuple(bnn_A.shape), dtype=bnn_A.dtype), dev)
bnn_b = tvm.nd.array(np.zeros(get_const_tuple(bnn_B.shape), dtype=bnn_B.dtype), dev)
bnn_c = tvm.nd.array(np.zeros(get_const_tuple(bnn_C.shape), dtype=bnn_C.dtype), dev)
f1 = tvm.build(s1, [A, bnn_A], "llvm")
f2 = tvm.build(s2, [B, bnn_B], "llvm")
f3 = tvm.build(s3, [bnn_A1, bnn_B1, bnn_C], "llvm")
f1(a, bnn_a)
f2(b, bnn_b)
f3(bnn_a, bnn_b, bnn_c)
tvm.testing.assert_allclose(bnn_c.numpy(), c_np, rtol=1e-5)
def test_binary_dense():
verify_binary_dense(1, 4096, 1024)
verify_binary_dense(1, 1024, 1000)
if __name__ == "__main__":
test_binary_dense()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_broadcast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for broadcasting operators."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_broadcast_to_ele(in_shape, out_shape, fbcast):
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
B = fbcast(A, out_shape)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target)
check_target("sdaccel")
def verify_broadcast_binary_ele(
lhs_shape,
rhs_shape,
ftopi,
fnumpy,
lhs_min=-100,
lhs_max=100,
rhs_min=-100,
rhs_max=100,
dtype="float32",
):
# Build the logic and compile the function
A = (
te.var("A", dtype=dtype)
if lhs_shape is None
else te.placeholder(shape=lhs_shape, name="A", dtype=dtype)
)
B = (
te.var("B", dtype=dtype)
if rhs_shape is None
else te.placeholder(shape=rhs_shape, name="B", dtype=dtype)
)
C = ftopi(A, B)
if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr):
assert isinstance(C, tvm.tir.PrimExpr)
return
def gen_operand(shape, low, high, dev):
if shape is None:
npy = float(np.random.uniform(low=low, high=high))
if dtype.startswith("int"):
npy = int(npy)
nd = npy
else:
npy = np.random.uniform(low=low, high=high, size=shape).astype(dtype)
nd = tvm.nd.array(npy, dev)
return npy, nd
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name="broadcast_binary" + "_" + ftopi.__name__)
lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, dev)
rhs_npy, rhs_nd = gen_operand(rhs_shape, rhs_min, rhs_max, dev)
out_npy = fnumpy(lhs_npy, rhs_npy)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target)
check_target("sdaccel")
@tvm.testing.uses_gpu
def test_broadcast_to():
verify_broadcast_to_ele((1,), (10,), topi.broadcast_to)
verify_broadcast_to_ele((), (10,), topi.broadcast_to)
verify_broadcast_to_ele((1, 1, 5, 4), (3, 4, 4, 4, 5, 4), topi.broadcast_to)
verify_broadcast_to_ele((1, 128, 1, 32), (64, 128, 64, 32), topi.broadcast_to)
@tvm.testing.uses_gpu
def test_add():
verify_broadcast_binary_ele((), (), topi.add, np.add)
verify_broadcast_binary_ele((5, 2, 3), (2, 1), topi.add, np.add)
@tvm.testing.uses_gpu
def test_subtract():
verify_broadcast_binary_ele((5, 2, 3), (), topi.subtract, np.subtract)
verify_broadcast_binary_ele((5, 2, 3), None, topi.subtract, np.subtract)
verify_broadcast_binary_ele(None, None, topi.subtract, np.subtract)
verify_broadcast_binary_ele((1, 32), (64, 32), topi.subtract, np.subtract)
@tvm.testing.uses_gpu
def test_multiply():
verify_broadcast_binary_ele((5, 64, 128), (2, 5, 64, 1), topi.multiply, np.multiply)
@tvm.testing.uses_gpu
def test_divide():
verify_broadcast_binary_ele(None, (10,), topi.divide, np.divide, rhs_min=0.0001)
verify_broadcast_binary_ele((), None, topi.divide, np.divide, rhs_min=0.0001)
verify_broadcast_binary_ele((2, 3, 1, 32), (64, 32), topi.divide, np.divide, rhs_min=0.0001)
@tvm.testing.uses_gpu
def test_floor_divide():
def _canonical_floor_div(a, b):
return np.floor(a / b)
verify_broadcast_binary_ele(
None, (10,), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001
)
verify_broadcast_binary_ele((), None, topi.floor_divide, _canonical_floor_div, rhs_min=0.0001)
verify_broadcast_binary_ele(
(2, 3, 64, 32), (64, 32), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001
)
@tvm.testing.uses_gpu
def test_maximum_minmum():
verify_broadcast_binary_ele((32,), (64, 32), topi.maximum, np.maximum)
verify_broadcast_binary_ele((1, 2, 2, 1, 32), (64, 32), topi.minimum, np.minimum)
@tvm.testing.uses_gpu
def test_power():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.power, np.power, lhs_min=0.001, rhs_min=0.001, rhs_max=2
)
@tvm.testing.uses_gpu
def test_mod():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.mod, np.mod, lhs_min=0.001, rhs_min=1, dtype="int32"
)
@tvm.testing.uses_gpu
def test_floor_mod():
def _canonical_floor_mod(a, b):
return a - np.floor(a / b) * b
verify_broadcast_binary_ele(
(1, 2, 2),
(2,),
topi.floor_mod,
_canonical_floor_mod,
lhs_min=0.001,
rhs_min=1,
dtype="int32",
)
verify_broadcast_binary_ele(
(3, 4, 5),
(3, 4, 5),
topi.floor_mod,
_canonical_floor_mod,
lhs_min=0.001,
rhs_min=1,
dtype="float32",
)
@tvm.testing.uses_gpu
def test_cmp():
# explicit specify the output type
def greater(x, y):
return topi.greater(x, y).astype("int8")
def less(x, y):
return topi.less(x, y).astype("int8")
def equal(x, y):
return topi.equal(x, y).astype("int8")
def not_equal(x, y):
return topi.not_equal(x, y).astype("int8")
def greater_equal(x, y):
return topi.greater_equal(x, y).astype("int8")
def less_equal(x, y):
return topi.less_equal(x, y).astype("int8")
verify_broadcast_binary_ele((1, 2, 2), (2,), greater, np.greater)
verify_broadcast_binary_ele((2, 1, 2), (2, 3, 1), less, np.less)
verify_broadcast_binary_ele(
(2, 1, 2),
(2, 3, 1),
equal,
np.equal,
lhs_min=-2,
lhs_max=2,
rhs_min=-2,
rhs_max=2,
dtype="int32",
)
verify_broadcast_binary_ele(
(2, 1, 2),
(2, 3, 1),
not_equal,
np.not_equal,
lhs_min=-2,
lhs_max=2,
rhs_min=-2,
rhs_max=2,
dtype="int32",
)
verify_broadcast_binary_ele(
(7, 1, 5),
(7, 3, 1),
greater_equal,
np.greater_equal,
lhs_min=-3,
lhs_max=3,
rhs_min=-3,
rhs_max=3,
dtype="int32",
)
verify_broadcast_binary_ele(
(7, 1, 5),
(7, 3, 1),
less_equal,
np.less_equal,
lhs_min=-3,
lhs_max=3,
rhs_min=-3,
rhs_max=3,
dtype="int32",
)
@tvm.testing.uses_gpu
def test_shift():
# explicit specify the output type
verify_broadcast_binary_ele(
(2, 1, 2), None, topi.right_shift, np.right_shift, dtype="int32", rhs_min=0, rhs_max=32
)
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32
)
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32
)
@tvm.testing.uses_gpu
def test_logical_single_ele():
def test_apply(
func,
name,
f_numpy,
indata,
dtype="bool",
):
# Build the logic and compile the function
A = te.placeholder(shape=indata.shape, name="A", dtype=dtype)
B = func(A)
if isinstance(A, tvm.tir.PrimExpr):
assert isinstance(B, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=name)
data_npy = indata.astype(A.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_npy = f_numpy(indata)
out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.logical_not, "logical_not", np.logical_not, np.array([True, False, 0, 1]))
test_apply(topi.logical_not, "logical_not", np.logical_not, np.array(np.arange(5) < 3))
@tvm.testing.uses_gpu
def test_bitwise_not():
def test_apply(
func,
name,
f_numpy,
shape,
dtype="int32",
):
# Build the logic and compile the function
A = te.placeholder(shape=shape, name="A", dtype=dtype)
B = func(A)
if isinstance(A, tvm.tir.PrimExpr):
assert isinstance(B, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=name)
data_npy = np.random.uniform(size=shape).astype(A.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_npy = f_numpy(data_npy)
out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, ())
test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, (2, 1, 2))
@tvm.testing.uses_gpu
def test_logical_binary_ele():
def test_apply(
func,
name,
f_numpy,
lhs,
rhs,
dtype="bool",
):
# Build the logic and compile the function
A = te.var("A", dtype=dtype)
B = te.var("B", dtype=dtype)
C = func(A, B)
if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr):
assert isinstance(C, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name=name)
lhs_nd = tvm.nd.array(lhs, dev)
rhs_nd = tvm.nd.array(rhs, dev)
out_npy = f_numpy(lhs, rhs)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.logical_and, "logical_and", np.logical_and, True, False)
test_apply(topi.logical_and, "logical_and", np.logical_and, [True, False], [False, False])
test_apply(topi.logical_or, "logical_or", np.logical_or, True, False)
test_apply(topi.logical_or, "logical_or", np.logical_or, [True, False], [False, False])
test_apply(topi.logical_xor, "logical_xor", np.logical_xor, True, False)
test_apply(topi.logical_xor, "logical_xor", np.logical_xor, [True, False], [False, False])
@tvm.testing.uses_gpu
def test_bitwise_and():
verify_broadcast_binary_ele(None, None, topi.bitwise_and, np.bitwise_and, dtype="int32")
verify_broadcast_binary_ele(
(2, 1, 2), (2, 1, 2), topi.bitwise_and, np.bitwise_and, dtype="int32"
)
@tvm.testing.uses_gpu
def test_bitwise_or():
verify_broadcast_binary_ele(None, None, topi.bitwise_or, np.bitwise_or, dtype="int32")
verify_broadcast_binary_ele((2, 1, 2), (2, 1, 2), topi.bitwise_or, np.bitwise_or, dtype="int32")
@tvm.testing.uses_gpu
def test_bitwise_xor():
verify_broadcast_binary_ele(None, None, topi.bitwise_xor, np.bitwise_xor, dtype="int32")
verify_broadcast_binary_ele(
(2, 1, 2), (2, 1, 2), topi.bitwise_xor, np.bitwise_xor, dtype="int32"
)
if __name__ == "__main__":
test_add()
test_shift()
test_cmp()
test_mod()
test_floor_mod()
test_subtract()
test_multiply()
test_divide()
test_floor_divide()
test_maximum_minmum()
test_power()
test_broadcast_to()
test_logical_single_ele()
test_bitwise_not()
test_logical_binary_ele()
test_bitwise_and()
test_bitwise_or()
test_bitwise_xor()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_clip.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for clip operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_clip(N, a_min, a_max, dtype):
A = te.placeholder((N, N), dtype=dtype, name="A")
B = topi.clip(A, a_min, a_max)
s = te.create_schedule([B.op])
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_clip")
def get_ref_data():
a_np = np.random.uniform(a_min * 2, a_max * 2, size=(N, N)).astype(dtype)
b_np = np.clip(a_np, a_min, a_max)
return a_np, b_np
a_np, b_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target, name="clip")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_clip():
verify_clip(1024, -127, 127, "float32")
verify_clip(1024, -127, 127, "int16")
verify_clip(1024, -127, 127, "int8")
if __name__ == "__main__":
test_clip()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import itertools
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv1d_ncw_implement = {
"generic": (topi.nn.conv1d_ncw, topi.generic.schedule_conv1d_ncw),
"cpu": (topi.nn.conv1d_ncw, topi.x86.schedule_conv1d_ncw),
"gpu": (topi.cuda.conv1d_ncw, topi.cuda.schedule_conv1d_ncw),
}
_conv1d_nwc_implement = {
"generic": (topi.nn.conv1d_nwc, topi.generic.schedule_conv1d_nwc),
"cpu": (topi.nn.conv1d_nwc, topi.x86.schedule_conv1d_nwc),
"gpu": (topi.cuda.conv1d_nwc, topi.cuda.schedule_conv1d_nwc),
}
_group_conv1d_implementations = {
"NCW": {
"generic": (topi.nn.group_conv1d_ncw, topi.generic.schedule_group_conv1d_ncw),
"cpu": (topi.nn.group_conv1d_ncw, topi.x86.schedule_group_conv1d_ncw),
"gpu": (topi.cuda.group_conv1d_ncw, topi.cuda.schedule_group_conv1d_ncw),
},
"NWC": {
"generic": (topi.nn.group_conv1d_nwc, topi.generic.schedule_group_conv1d_nwc),
"cpu": (topi.nn.group_conv1d_nwc, topi.x86.schedule_group_conv1d_nwc),
"gpu": (topi.cuda.group_conv1d_nwc, topi.cuda.schedule_group_conv1d_nwc),
},
}
def verify_conv1d(
batch,
in_channels,
in_width,
filters,
kernel_size=3,
stride=1,
dilation=1,
padding="VALID",
layout="NCW",
):
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels, kernel_size]
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels, filters]
dtype = "float32"
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
def get_ref_data(layout):
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data(layout)
def check_target(target, dev):
if layout == "NCW":
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_ncw_implement)
else:
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_nwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv1d():
for layout in ["NCW", "NWC"]:
# Most basic test case
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "VALID", layout)
# With padding
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "SAME", layout)
# Realistic dimensions
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
# With stride
verify_conv1d(1, 16, 32, 16, 3, 2, 1, "SAME", layout)
# With dilation
verify_conv1d(1, 16, 32, 16, 3, 1, 2, "SAME", layout)
# Large batch size
verify_conv1d(8, 16, 32, 16, 3, 1, 1, "SAME", layout)
# Other kernel sizes
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 2, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 1, 1, 1, "SAME", layout)
# Non-power-of-two shape
verify_conv1d(1, 17, 12, 21, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 5, 27, 18, 3, 1, 1, "VALID", layout)
layout = tvm.testing.parameter("NCW", "NWC")
padding = tvm.testing.parameter("SAME", "VALID")
dtype = tvm.testing.parameter("float32")
# batch, in_channels, in_width, filters, kernel_size, stride, dilation, groups
shape = tvm.testing.parameter(
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 2, 1, 4],
[1, 16, 32, 16, 3, 1, 2, 4],
[8, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 2, 1, 1, 4],
[1, 16, 32, 16, 1, 1, 1, 4],
[1, 21, 12, 21, 3, 1, 1, 3],
[1, 20, 27, 20, 3, 1, 1, 5],
)
def test_group_conv1d(shape, layout, padding, target, dev, dtype):
batch, in_channels, in_width, filters, kernel_size, stride, dilation, groups = shape
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels // groups, kernel_size]
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels // groups, filters]
# reference data
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.group_conv1d_ncw_python(np_in, np_w, stride, padding, dilation, groups)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv1d_implementations[layout])
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
print(tvm.lower(s, [A, W, B], target))
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
test_conv1d()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv1d_transpose_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import itertools
import os
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
_conv1d_transpose_ncw_implement = {
"generic": (topi.nn.conv1d_transpose_ncw, topi.generic.schedule_conv1d_transpose_ncw),
"gpu": (topi.cuda.conv1d_transpose_ncw, topi.cuda.schedule_conv1d_transpose_ncw),
}
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
) = tvm.testing.parameters(
(1, 3, 224, 32, 5, 1, 0, (0,)),
(1, 3, 224, 32, 7, 1, 2, (0,)),
(1, 3, 224, 32, 5, 2, 1, (0,)),
(1, 3, 224, 32, 5, 2, 1, (1,)),
(1, 3, 224, 32, 5, 2, 0, (0,)),
(1, 32, 32, 128, 5, 1, 0, (0,)),
(1, 32, 32, 128, 5, 2, 1, (0,)),
(1, 1, 1024, 1, 512, 1, 256, (0,)),
(1, 1, 1024, 1, 512, 2, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (3,)),
(1, 2, 1024, 1, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 2, 2, 0, (0,)),
(1, 1, 10, 1, 5, 1, (0, 3), (0,)),
(1, 1, 10, 1, 5, 1, (1, 3), (0,)),
(1, 1, 10, 1, 5, 1, (2, 3), (0,)),
(1, 257, 128, 1, 512, 128, 256, (0,)),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
dtype = "float32"
a_shape = (batch, in_channel, in_size)
w_shape = (in_channel, num_filter, kernel)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
@tvm.testing.known_failing_targets("vulkan")
def test_conv1d_transpose_ncw(
target,
dev,
ref_data,
dtype,
stride,
padding,
output_padding,
):
a_np, w_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_transpose_ncw_implement)
B = fcompute(A, W, stride, padding, A.dtype, output_padding)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn, kh, kw))
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1))
return kernel
def _transform_bias(bias, bn):
# [num_filter, 1, 1] -> [num_filter//bn, 1, 1, bn]
num_filter, h, w = bias.shape
bias = np.reshape(bias, (num_filter // bn, bn, h, w))
bias = np.transpose(bias, (0, 2, 3, 1))
return bias
def verify_conv2d_NCHWc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
dtype="float32",
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
in_height = in_width = in_size
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum)
)
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 1
for bn in range(oc_block, 0, -1):
if in_channel % bn == 0:
ic_block = bn
break
A = te.placeholder((batch, in_channel // ic_block, in_height, in_width, ic_block), name="A")
W = te.placeholder(
(num_filter // oc_block, in_channel // ic_block, kernel, kernel, ic_block, oc_block),
name="W",
)
bias = te.placeholder((num_filter // oc_block, 1, 1, oc_block), name="bias")
@memoize("topi.tests.test_topi_conv2d_NCHWc.verify_conv2d_NCHWc")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
w_np = np.random.uniform(size=(num_filter, in_channel, kernel, kernel)).astype(dtype)
b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_bias(b_np, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
# test llvm only for now since conv2d_NCHWc implement is missing in other backend.
for device in ["llvm"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
def test_conv2d_NCHWc():
# ResNet18 workloads
verify_conv2d_NCHWc(1, 3, 224, 64, 7, 2, 3)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 56, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 64, 56, 128, 3, 2, 1)
verify_conv2d_NCHWc(1, 64, 56, 128, 1, 2, 0)
verify_conv2d_NCHWc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 3, 2, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 1, 2, 0)
verify_conv2d_NCHWc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 3, 2, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 1, 2, 0)
verify_conv2d_NCHWc(1, 512, 7, 512, 3, 1, 1)
# bias, relu
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)
# dilation
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, dilation=2)
# batch size
verify_conv2d_NCHWc(4, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(9, 64, 56, 64, 3, 1, 1)
# weird workloads
verify_conv2d_NCHWc(2, 2, 2, 2, 2, 2, 2)
verify_conv2d_NCHWc(3, 3, 3, 3, 3, 3, 3)
verify_conv2d_NCHWc(4, 4, 4, 4, 4, 4, 4)
verify_conv2d_NCHWc(5, 5, 5, 5, 5, 5, 5)
verify_conv2d_NCHWc(6, 6, 6, 6, 6, 6, 6)
# disable these tests due to some bugs of llvm with nvptx
# verify_conv2d_NCHWc(1, 1, 1, 1, 1, 1, 1, dilation=1)
# verify_conv2d_NCHWc(1, 1, 1, 1, 1, 1, 1, dilation=2)
# verify_conv2d_NCHWc(2, 13, 71, 59, 3, 1, 1)
# inception v3 workloads
verify_conv2d_NCHWc(1, 3, 299, 32, 3, 2, 0)
verify_conv2d_NCHWc(1, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc(1, 32, 147, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 73, 80, 1, 1, 0)
verify_conv2d_NCHWc(1, 80, 73, 192, 3, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 48, 35, 64, 5, 1, 2)
verify_conv2d_NCHWc(1, 64, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 192, 35, 32, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 384, 3, 2, 0)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 2, 0)
verify_conv2d_NCHWc(1, 768, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 128, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 160, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 192, 17, 320, 3, 2, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 3, 2, 0)
verify_conv2d_NCHWc(1, 1280, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 1280, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 448, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 1024, 19, 84, 3, 1, 1)
verify_conv2d_NCHWc(1, 2048, 10, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 512, 5, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 3, 126, 3, 1, 1)
# Asymmetric padding
verify_conv2d_NCHWc(1, 32, 17, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_NCHWc(1, 32, 35, 128, 3, 1, (3, 3, 2, 2))
verify_conv2d_NCHWc(1, 32, 35, 32, 1, 1, (1, 2, 2, 1))
verify_conv2d_NCHWc(1, 32, 17, 192, 1, 1, (1, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (3, 1))
verify_conv2d_NCHWc(1, 128, 8, 384, 3, 1, (0, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 1, 1, "VALID")
verify_conv2d_NCHWc(1, 388, 8, 32, 3, 1, "VALID")
verify_conv2d_NCHWc(1, 512, 19, 32, 1, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 10, 32, 2, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (1, 2, 2, 1), add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 5, 2, (1, 3), add_bias=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, "VALID", add_bias=True, add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 24, 1, "SAME", add_bias=True, add_relu=True)
if __name__ == "__main__":
test_conv2d_NCHWc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_hwcn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_hwcn_implement = {
"generic": (topi.nn.conv2d_hwcn, topi.generic.schedule_conv2d_hwcn),
"gpu": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
"opencl": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
}
def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = te.placeholder((1, num_filter, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
b_shape = get_const_tuple(B.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_hwcn.verify_hwcn")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
c2_np = c1_np + b_np
c3_np = np.maximum(c2_np, 0)
return a_np, w_np, b_np, c1_np, c2_np, c3_np
a_np, w_np, b_np, c1_np, c2_np, c3_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_hwcn_implement)
t_conv = fcompute(A, W, stride, padding, dilation)
t_bias = topi.add(t_conv, B)
t_relu = topi.nn.relu(t_bias)
s1 = fschedule([t_conv])
s2 = fschedule([t_bias])
s3 = fschedule([t_relu])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
conv_out = tvm.nd.array(np.zeros(get_const_tuple(t_conv.shape), dtype=t_conv.dtype), dev)
bias_out = tvm.nd.array(np.zeros(get_const_tuple(t_bias.shape), dtype=t_bias.dtype), dev)
relu_out = tvm.nd.array(np.zeros(get_const_tuple(t_relu.shape), dtype=t_relu.dtype), dev)
func1 = tvm.build(s1, [A, W, t_conv], target)
func2 = tvm.build(s2, [A, W, B, t_bias], target)
func3 = tvm.build(s3, [A, W, B, t_relu], target)
func1(a, w, conv_out)
func2(a, w, b, bias_out)
func3(a, w, b, relu_out)
tvm.testing.assert_allclose(conv_out.numpy(), c1_np, rtol=1e-5)
tvm.testing.assert_allclose(bias_out.numpy(), c2_np, rtol=1e-5)
tvm.testing.assert_allclose(relu_out.numpy(), c3_np, rtol=1e-5)
for target in ["cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_conv2d_hwcn():
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "VALID")
# dilation = 2
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME", dilation=2)
# Pass stride as tuple
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "VALID")
if __name__ == "__main__":
test_conv2d_hwcn()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
import os
import tvm.testing
import tvm.topi.testing
from tvm import te, autotvm, topi, relay
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
_conv2d_hwnc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_hwnc_tensorcore, topi.cuda.schedule_conv2d_hwnc_tensorcore)
}
def verify_conv2d_hwnc(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, dtype="int4"
):
"""Test the conv2d with tensorcore for hwnc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
# choose dtype from int4, int8
assert dtype in ["int4", "int8"]
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, batch, in_channel), name="A", dtype=dtype)
W = te.placeholder((kernel, kernel, num_filter, in_channel), name="W", dtype=dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_conv2d_hwnc.verify_conv2d_hwnc")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=a_shape).transpose((2, 0, 1, 3))
w_np = np.random.randint(low=-8, high=7, size=w_shape)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
elif dtype == "int8":
a_np = (
np.random.randint(low=-128, high=127, size=a_shape)
.transpose((2, 0, 1, 3))
.astype(dtype)
)
w_np = np.random.randint(low=-128, high=127, size=w_shape).astype(dtype)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, c_np
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
I, J, K, L = a_int32.shape
a_int4 = np.zeros(shape=(I, J, K, L // 8), dtype=np.int32)
for i in range(I):
for j in range(J):
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[i, j, k, l] = a_int4[i, j, k, l] | (
(a_int32[i, j, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
a_np, w_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
w_np = convert_int32_into_int4(w_np)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = topi.testing.dispatch(target, _conv2d_hwnc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, dtype, "int32")
s = fschedule([C])
a = tvm.nd.array(a_np.transpose((1, 2, 0, 3)), dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol)
check_target("cuda")
def verify_feature_length():
np.random.seed(123)
target = "cuda"
ctx = tvm.device(target)
batch_size = 32
input_shape = (32, 512, 7, 7)
kernel_shape = (512, 512, 3, 3)
def get_mod():
x = relay.var("x", relay.TensorType(input_shape, "float32"))
y = relay.var("y", relay.TensorType(kernel_shape, "float32"))
f = relay.Function(
[x, y], relay.nn.conv2d(x, y, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3])
)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
return mod, {}
mod, params = get_mod()
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["HWNC", "default"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
mod = relay.transform.recast(mod, "int4", "int32")
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
assert len(tasks) == 1
task = tasks[0]
space = task.config_space
idx1 = space.get_rand_index()
idx2 = space.get_rand_index()
cfg = space.get(idx1)
sch, arg_bufs = task.instantiate(cfg)
fea1 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
cfg = space.get(idx2)
sch, arg_bufs = task.instantiate(cfg)
fea2 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
assert len(fea1) == len(fea2)
@tvm.testing.requires_tensorcore
def test_conv2d_hwnc_tensorcore():
"""Test the conv2d with tensorcore for hwnc layout"""
verify_conv2d_hwnc(8, 64, 56, 64, 3, 1, 1, dtype="int8")
verify_conv2d_hwnc(8, 64, 56, 64, 1, 1, 0, dtype="int4")
verify_conv2d_hwnc(8, 64, 56, 128, 3, 2, 1)
verify_conv2d_hwnc(8, 64, 56, 64, 1, 2, 0)
verify_conv2d_hwnc(8, 128, 28, 128, 3, 1, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 3, 2, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 1, 2, 0)
verify_conv2d_hwnc(8, 256, 14, 256, 3, 1, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 3, 2, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 1, 2, 0)
verify_conv2d_hwnc(8, 512, 9, 512, 3, 1, 1)
verify_feature_length()
if __name__ == "__main__":
test_conv2d_hwnc_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.conv2d import _get_workload
from tvm.topi.generic.conv2d import fallback_schedule_cpu_common_int8
from common import Int8Fallback
import tvm.testing
import pytest
import platform
def compile_conv2d_NHWC_gemm_int8_arm(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="int8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
bias = te.placeholder((num_filter,), name="bias", dtype="int8")
dtype = "int32"
devices = [
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_native,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_native,
),
# TODO(giuseros) Need LLVM-11 in order to compile with +i8mm extension
# (
# "llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+i8mm",
# topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
# topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
# ),
]
for device_tuple in devices:
target = device_tuple[0]
compute = device_tuple[1]
schedule = device_tuple[2]
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Compiling on arm AArch64 target: %s" % target)
with tvm.target.Target(target) as tvm_target:
assert tvm_target.features.is_aarch64, "AArch64 target not recognized"
C = compute(A, W, (stride, stride), padding, (dilation, dilation), dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%dnnn_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
def verify_conv2d_NHWC_gemm_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="int8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
bias = te.placeholder((num_filter,), name="bias", dtype="int8")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved(
A, W, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
check_target("llvm")
def verify_conv2d_NCHWc_int8(
in_dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
out_dtype = "int32" if in_dtype == "int8" else "uint32"
lo = -128 if in_dtype == "int8" else 0
hi = 127 if in_dtype == "int8" else 255
def check_target(target, compute, schedule, oc_block_factor, build_only):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype=out_dtype
)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=lo, high=hi, size=a_shape).astype(out_dtype)
w_np = np.random.randint(low=lo, high=hi, size=w_shape).astype(out_dtype)
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(
out_dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
with tvm.target.Target(target):
C = compute(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW",
"NCHW",
out_dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
a = tvm.nd.array(a_np.astype(dtype), dev)
w = tvm.nd.array(w_np.astype(dtype), dev)
b = tvm.nd.array(b_np.astype(out_dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
compile_args = [A, W, bias, C]
run_args = [a, w, b, c]
else:
compile_args = [A, W, C]
run_args = [a, w, c]
func = tvm.build(
s,
compile_args,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
if build_only:
return
print("Running on target: %s" % target)
func(*run_args)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
targets = [
(
"cuda",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
# Disable on CI since it does not support spirv int8 dot product
# (
# "vulkan -from_device=0",
# lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
# topi.cuda.schedule_conv2d_NCHWc_int8,
# 4,
# False,
# ),
]
build_only_aarch64 = platform.machine() != "aarch64"
targets.append(
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
)
)
if in_dtype == "int8":
targets += [
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
),
(
"rocm -mattr=+dotprod",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
]
for target, compute, schedule, oc_block_factor, build_only in targets:
check_target(target, compute, schedule, oc_block_factor, build_only)
def verify_conv2d_nchw_int8(
in_dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
bias = te.placeholder((num_filter, 1, 1), name="bias", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_int8.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def verify_workload_padding():
_, _, out_height, out_width = get_const_tuple(c_np.shape)
wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)
# for testing functionality,
# we choose arbitrary int32_lanes and num_int8_elements can divide the channel,
# regardless of the performance.
int32_lanes, num_int8_elements = num_filter, in_channel
# check if tile_ow candidates are the factors of the right output weight.
cfg = autotvm.get_config()
fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.conv2d_nchw_int8(
A, W, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_conv2d_nchw_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
verify_workload_padding()
for target in ["cuda"]:
check_target(target)
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
def test_conv2d_nchw(in_dtype):
with Int8Fallback():
# ResNet18 workloads where channels in / out are multiple of oc_block_factor
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 128, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 128, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 128, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 256, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 28, 256, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 256, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 512, 3, 2, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 14, 512, 1, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 512, 7, 512, 3, 1, 1)
# bias, relu
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)
# dilation = 2
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, dilation=2)
# batch size
verify_conv2d_NCHWc_int8(in_dtype, 4, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 9, 64, 56, 64, 3, 1, 1)
# weird workloads
verify_conv2d_NCHWc_int8(in_dtype, 4, 4, 4, 8, 4, 4, 4)
# inception v3 workloads where channels in / out are multiple of oc_block_factor
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 147, 64, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 73, 80, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 80, 73, 192, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 48, 35, 64, 5, 1, 2)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 96, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 35, 32, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 256, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 288, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 288, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 288, 35, 384, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 96, 35, 96, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 128, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 768, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 160, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 160, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 320, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 192, 17, 192, 3, 2, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 384, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 384, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 448, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1280, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 2048, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 1, 1024, 19, 88, 3, 1, 1)
# batch > 1
verify_conv2d_NCHWc_int8(in_dtype, 7, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 8, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc_int8(in_dtype, 32, 32, 149, 32, 3, 1, 0)
# Asymmetric padding
verify_conv2d_NCHWc_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 128, 3, 1, (3, 3, 2, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 1, 1, (1, 2, 2, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 17, 192, 1, 1, (1, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 3, 1, (3, 1))
verify_conv2d_NCHWc_int8(in_dtype, 1, 128, 8, 384, 3, 1, (0, 2))
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 1, 1, "VALID")
verify_conv2d_NCHWc_int8(in_dtype, 1, 392, 8, 64, 3, 1, "VALID")
verify_conv2d_NCHWc_int8(in_dtype, 1, 512, 19, 64, 1, 1, "SAME")
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 16, 32, 2, 1, "SAME")
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 3, 1, (1, 2, 2, 1), add_relu=True)
verify_conv2d_NCHWc_int8(in_dtype, 1, 64, 8, 64, 5, 2, (1, 3), add_bias=True)
verify_conv2d_NCHWc_int8(
in_dtype, 1, 64, 56, 64, 3, 1, "VALID", add_bias=True, add_relu=True
)
verify_conv2d_NCHWc_int8(
in_dtype, 1, 64, 56, 64, 24, 1, "SAME", add_bias=True, add_relu=True
)
# Conv2d NCHW int8 schedule testing. Internally, it uses NCHWc schedule. So, just
# performing basic testing - one test for all different scenarios - batch, dilation etc..
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nchw_int8(in_dtype, 1, 64, 56, 64, 3, 1, 1, dilation=2)
verify_conv2d_nchw_int8(in_dtype, 9, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw_int8(in_dtype, 4, 4, 4, 4, 4, 4, 4)
verify_conv2d_nchw_int8(in_dtype, 1, 32, 149, 32, 3, 1, 0)
verify_conv2d_nchw_int8(in_dtype, 7, 32, 149, 32, 3, 1, 0)
verify_conv2d_nchw_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_nchw_int8(in_dtype, 1, 32, 35, 64, 7, 2, (0, 0, 2, 2))
def test_conv2d_nhwc():
with Int8Fallback():
# Subset of inception v3 expanded (dilation > 1, batch > 1, 'VALID' padding)
verify_conv2d_NHWC_gemm_int8(1, 3, 299, 32, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 32, 149, 32, 3, 1, "SAME", dilation=2)
verify_conv2d_NHWC_gemm_int8(4, 32, 147, 64, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 64, 73, 80, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 80, 73, 192, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 64, 1, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 192, 35, 32, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 48, 35, 64, 5, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 96, 35, 96, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 256, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 256, 35, 64, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 288, 35, 64, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 288, 35, 48, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 96, 35, 96, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 128, 17, 192, 7, 1, "SAME", dilation=2)
verify_conv2d_NHWC_gemm_int8(1, 160, 17, 160, 7, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 160, 17, 192, 1, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 192, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 768, 5, 128, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 320, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 192, 17, 192, 3, 2, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 192, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 384, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 320, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 1280, 8, 448, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 384, 8, 384, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 384, 8, 384, 3, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 448, 8, 384, 3, 1, "VALID")
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 320, 1, 1, "SAME")
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 448, 1, 1, "SAME", add_bias=True, add_relu=True)
verify_conv2d_NHWC_gemm_int8(1, 2048, 8, 192, 1, 1, "SAME", add_bias=True)
# Let's also verify that it compiles fine on AArch64 targets
compile_conv2d_NHWC_gemm_int8_arm(1, 3, 299, 32, 3, 2, "SAME")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import sys
import pytest
import numpy as np
import tvm
from tvm import autotvm, te, topi
import tvm.topi.testing
from tvm.contrib import cudnn
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.conv2d import _get_workload
from tvm.topi.x86.conv2d_avx_common import _fallback_schedule
import tvm.testing
dtype = tvm.testing.parameter("float16", "float32")
random_seed = tvm.testing.parameter(0)
@tvm.testing.fixture
def input_shape(batch, in_channel, in_size):
return (batch, in_channel, in_size, in_size)
@tvm.testing.fixture
def weight_shape(num_filter, in_channel, kernel):
return (num_filter, in_channel, kernel, kernel)
@tvm.testing.fixture
def bias_shape(num_filter):
return (num_filter, 1, 1)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
random_seed,
input_shape,
weight_shape,
bias_shape,
dtype,
stride,
padding,
dilation,
add_bias,
apply_relu,
):
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if dtype == "float16" else dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
w_np = np.random.uniform(size=weight_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding
).astype(dtype)
if add_bias:
c_np = c_np + b_np
if apply_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
class BaseConv2DTests:
add_bias = tvm.testing.parameter(False)
apply_relu = tvm.testing.parameter(False)
dilation = tvm.testing.parameter(1)
batch = tvm.testing.parameter(1)
def test_conv2d_nchw(
self,
target,
dev,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dtype,
ref_data,
dilation,
add_bias,
apply_relu,
):
target = tvm.target.Target(target)
is_cudnn_target = target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", [])
if target.kind.name == "vulkan" and dtype == "float16":
if not target.attrs.get("supports_float16", False) or not target.attrs.get(
"supports_16bit_buffer", False
):
pytest.xfail("Vulkan device does not support float16")
if (
target.kind.name == "cuda"
and dtype == "float16"
and not tvm.contrib.nvcc.have_fp16(dev.compute_version)
):
pytest.xfail("CUDA float16 intrinsics not available")
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
has_asymmetric_padding = (pad_top != pad_bottom) or (pad_left != pad_right)
if is_cudnn_target and has_asymmetric_padding:
pytest.xfail("CuDNN does not support asymmetric padding")
a_np, w_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
bias = te.placeholder(b_np.shape, name="bias", dtype=dtype)
if "int" in dtype:
tol = {"atol": 0, "rtol": 0}
elif dtype == "float32":
tol = {"rtol": 1e-4, "atol": 2e-4}
elif dtype == "float16":
# A summation in float16 with a single accumulator very
# quickly runs into large rounding errors. At some point,
# this tolerance should be schedule-dependent for to avoid
# false negatives.
num_values_summed = in_channel * kernel * kernel
gap_size = np.nextafter(c_np.max(), np.inf, dtype=c_np.dtype) - c_np.max()
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
if is_cudnn_target:
fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn
else:
fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(target)
with target:
if is_cudnn_target:
C = fcompute(
A, W, (stride, stride), padding, (dilation, dilation), 1, "NCHW", dtype
)
else:
C = fcompute(A, W, (stride, stride), padding, (dilation, dilation), dtype)
if add_bias:
C = topi.add(C, bias)
if apply_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, **tol)
@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
target,
input_shape,
weight_shape,
stride,
padding,
dilation,
dtype,
ref_data,
):
a_np, w_np, b_np, c_np = ref_data
_, _, out_height, out_width = c_np.shape
A = te.placeholder(input_shape, name="A", dtype=dtype)
W = te.placeholder(weight_shape, name="W", dtype=dtype)
with tvm.target.Target(target):
wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)
# check if tile_ow candidates are the factors of the right output weight.
cfg = autotvm.get_config()
_fallback_schedule(cfg, wkl)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
class TestResNet18Workloads(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 224, 64, 7, 2, 3),
(64, 56, 64, 3, 1, 1),
(64, 56, 64, 1, 1, 0),
(64, 56, 128, 3, 2, 1),
(64, 56, 128, 1, 2, 0),
(128, 28, 128, 3, 1, 1),
(128, 28, 256, 3, 2, 1),
(128, 28, 256, 1, 2, 0),
(256, 14, 256, 3, 1, 1),
(256, 14, 512, 3, 2, 1),
(256, 14, 512, 1, 2, 0),
(512, 7, 512, 3, 1, 1),
)
class TestInceptionV3Workloads(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 299, 32, 3, 2, 0),
(32, 149, 32, 3, 1, 0),
(32, 147, 64, 3, 1, 1),
(64, 73, 80, 1, 1, 0),
(80, 73, 192, 3, 1, 0),
(192, 35, 64, 1, 1, 0),
(192, 35, 48, 1, 1, 0),
(48, 35, 64, 5, 1, 2),
(64, 35, 96, 3, 1, 1),
(96, 35, 96, 3, 1, 1),
(192, 35, 32, 1, 1, 0),
(256, 35, 64, 1, 1, 0),
(256, 35, 48, 1, 1, 0),
(288, 35, 64, 1, 1, 0),
(288, 35, 48, 1, 1, 0),
(288, 35, 384, 3, 2, 0),
(96, 35, 96, 3, 2, 0),
(768, 17, 192, 1, 1, 0),
(768, 17, 128, 1, 1, 0),
(128, 17, 128, 1, 1, 0),
(128, 17, 192, 7, 1, 3),
(128, 17, 128, 7, 1, 3),
(128, 17, 192, 1, 1, 0),
(768, 17, 160, 1, 1, 0),
# disable these tests due to some bugs of llvm with nvptx
# (160, 17, 160, 1, 1, 0),
(160, 17, 192, 7, 1, 3),
(160, 17, 160, 7, 1, 3),
(160, 17, 192, 1, 1, 0),
(192, 17, 192, 1, 1, 0),
(192, 17, 192, 7, 1, 3),
(192, 17, 320, 3, 2, 0),
(192, 17, 192, 3, 2, 0),
(1280, 8, 320, 1, 1, 0),
(1280, 8, 384, 1, 1, 0),
(384, 8, 384, 1, 1, 0),
(384, 8, 384, 3, 1, 1),
(1280, 8, 448, 1, 1, 0),
(448, 8, 384, 3, 1, 1),
(1280, 8, 192, 1, 1, 0),
(2048, 8, 320, 1, 1, 0),
(2048, 8, 384, 1, 1, 0),
(2048, 8, 448, 1, 1, 0),
(2048, 8, 192, 1, 1, 0),
(1024, 19, 84, 3, 1, 1),
(2048, 10, 126, 3, 1, 1),
(512, 5, 126, 3, 1, 1),
(256, 3, 126, 3, 1, 1),
)
class TestWeirdWorkloads(BaseConv2DTests):
batch, in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(2, 2, 2, 2, 2, 2, 2),
(3, 3, 3, 3, 3, 3, 3),
(4, 4, 4, 4, 4, 4, 4),
(5, 5, 5, 5, 5, 5, 5),
(6, 6, 6, 6, 6, 6, 6),
# disable these tests due to some bugs of llvm with nvptx
# (1, 1, 1, 1, 1, 1, 1),
# (2, 13, 71, 59, 3, 1, 1),
)
class TestAsymmetricPadding(BaseConv2DTests):
dilation = tvm.testing.parameter(1, 2)
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 35, 64, 7, 2, (0, 0, 1, 1)),
(64, 8, 128, 3, 1, (3, 3, 2, 2)),
(64, 8, 64, 1, 1, (1, 2, 2, 1)),
(64, 17, 192, 1, 1, (1, 2)),
(64, 8, 64, 3, 1, (3, 1)),
(128, 8, 384, 3, 1, (0, 2)),
(64, 35, 64, 3, 1, (1, 2)),
(64, 8, 64, 1, 1, "VALID"),
(388, 8, 64, 3, 1, "VALID"),
(64, 10, 48, 3, 1, "VALID"),
(512, 19, 64, 1, 1, "SAME"),
(64, 5, 32, 2, 1, "SAME"),
(64, 8, 64, 3, 1, "SAME"),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(64, 8, 64, 24, 1, "SAME"),
(32, 35, 64, 7, 2, (0, 0, 2, 2)),
)
class TestBatchSize(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
)
batch = tvm.testing.parameter(1, 4, 9)
class TestBiasRelu(BaseConv2DTests):
apply_relu = tvm.testing.parameter(True, False, ids=["relu", "no_relu"])
add_bias = tvm.testing.parameter(True, False, ids=["bias", "no_bias"])
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(64, 8, 64, 24, 1, "SAME"),
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_implement = {
"generic": (topi.nn.conv2d_nhwc, topi.generic.schedule_conv2d_nhwc),
"gpu": (topi.gpu.conv2d_nhwc, topi.gpu.schedule_conv2d_nhwc),
"cpu": (topi.nn.conv2d_nhwc, topi.x86.schedule_conv2d_nhwc),
"arm_cpu": (
topi.arm_cpu.conv2d_nhwc_spatial_pack,
topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack,
),
"mali": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"bifrost": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"hls": (topi.nn.conv2d_nhwc, topi.hls.schedule_conv2d_nhwc),
}
dtype = tvm.testing.parameter("float32")
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation = tvm.testing.parameters(
(1, 256, 32, 256, 3, 1, "SAME", 1),
(4, 128, 16, 128, 5, 2, "SAME", 1),
(4, 128, 16, 256, 5, 2, "SAME", 1),
(1, 256, 32, 256, 3, 1, "VALID", 1),
(1, 256, 32, 256, 3, 1, "VALID", 1),
(4, 128, 16, 128, 5, 2, "VALID", 1),
(4, 128, 16, 256, 5, 2, "VALID", 1),
(1, 128, 16, 256, 3, 2, (0, 0, 1, 1), 1),
(1, 128, 16, 256, 3, 2, (1, 1, 2, 2), 1),
(1, 128, 16, 128, 5, 2, (3, 3, 2, 2), 1),
(1, 128, 16, 256, 3, 2, (0, 1, 2, 3), 1),
(1, 256, 32, 256, 3, 1, "SAME", 2),
(1, 256, 32, 256, 3, 1, (1, 1, 2, 2), 2),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation):
in_height = in_width = in_size
a_shape = (batch, in_height, in_width, in_channel)
w_shape = (kernel, kernel, in_channel, num_filter)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
def test_conv2d_nhwc_hwio(target, dev, ref_data, dtype, stride, padding, dilation):
a_np, w_np, b_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_nhwc_implement)
B = fcompute(A, W, stride, padding, dilation, dtype)
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_conv2d_nhwc_ohwi(ref_data, dtype, stride, padding, dilation):
# only test on CPU target because topi doesn't have schedules for this layout
target = "llvm"
dev = tvm.device(target, 0)
a_np, w_np_hwio, b_np = ref_data
w_np_ohwi = w_np_hwio.transpose(3, 0, 1, 2) # HWIO -> OHWI
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np_ohwi.shape, name="W", dtype=dtype)
B = topi.nn.conv2d(
A,
W,
stride,
padding,
dilation,
data_layout="NHWC",
kernel_layout="OHWI",
out_dtype="float32",
)
s = tvm.te.create_schedule(B.op)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np_ohwi, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import pytest
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
def verify_conv2d_1x1_nhwc_pack_int8(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1
):
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="uint8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
adtype = A.dtype
wdtype = W.dtype
@memoize("topi.tests.test_topi_conv2d_1x1_nhwc_pack_int8.verify_nhwc.v2")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(adtype)
w_np = np.random.uniform(size=w_shape).astype(wdtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
B = topi.nn.conv2d(A, W, stride, padding, dilation, layout="NHWC", out_dtype="int32")
s = topi.x86.schedule_conv2d_nhwc_pack_int8([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
# for device in ['llvm -mcpu=skylake-avx512']:
for device in ["llvm"]:
check_device(device)
# TODO(@llyfacebook): Please fix https://github.com/apache/tvm/issues/4122 to enable this test.
@pytest.mark.skip
def test_conv2d_nhwc():
verify_conv2d_1x1_nhwc_pack_int8(1, 256, 32, 256, 1, 1, 0)
if __name__ == "__main__":
# test_conv2d_nhwc()
pass
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_nhwc_tensorcore, topi.cuda.schedule_conv2d_nhwc_tensorcore)
}
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv2d with tensorcore for nhwc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, "float32")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
check_device(devices)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_conv2d_nhwc_tensorcore():
"""Test the conv2d with tensorcore for nhwc layout"""
verify_conv2d_nhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv2d_nhwc(16, 128, 7, 128, 7, 1, 3)
verify_conv2d_nhwc(16, 160, 7, 160, 7, 1, 3)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_bias=True)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv2d_nhwc(16, 64, 17, 64, 7, 1, (3, 3, 2, 2))
verify_conv2d_nhwc(16, 64, 17, 64, 7, 1, "SAME")
verify_conv2d_nhwc(16, 48, 35, 48, 5, 1, "VALID")
verify_conv2d_nhwc(16, 48, 56, 48, 3, 1, (1, 1, 1, 1))
verify_conv2d_nhwc(16, 64, 28, 64, 3, 1, (1, 1, 1, 1))
if __name__ == "__main__":
test_conv2d_nhwc_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
# pylint: disable=bad-whitespace
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_winograd_tensorcore = {
"cuda": (
topi.cuda.conv2d_nhwc_winograd_tensorcore,
topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore,
)
}
_conv2d_nhwc_winograd_direct = {
"cuda": (topi.cuda.conv2d_nhwc_winograd_direct, topi.cuda.schedule_conv2d_nhwc_winograd_direct)
}
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
bgemm="direct",
):
"""Test the conv2d with winograd for nhwc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
if bgemm == "direct":
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_winograd_direct
)
elif bgemm == "tensorcore":
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_winograd_tensorcore
)
C = fcompute(A, W, stride, padding, dilation, "float32")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=2e-3)
check_device(devices)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_conv2d_nhwc_winograd_direct():
"""Test the conv2d with winograd for nhwc layout"""
# resnet 18 workloads
print("test_winograd_direct...")
verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1, bgemm="direct")
verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1)
verify_conv2d_nhwc(1, 48, 35, 64, 5, 1, 2)
# weird workloads
verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1)
# Asymmetric padding
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, "SAME")
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True)
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True)
verify_conv2d_nhwc(1, 48, 35, 48, 5, 1, "VALID")
@tvm.testing.requires_cuda
@tvm.testing.requires_tensorcore
def test_conv2d_nhwc_winograd_tensorcore():
"""Test the conv2d with winograd for nhwc layout"""
verify_conv2d_nhwc(8, 64, 56, 64, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 128, 28, 128, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 256, 14, 256, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, (1, 1), add_relu=True, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, "SAME", add_relu=True, bgemm="tensorcore")
if __name__ == "__main__":
test_conv2d_nhwc_winograd_direct()
test_conv2d_nhwc_winograd_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_transpose_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_transpose_nchw_implement = {
"generic": (topi.nn.conv2d_transpose_nchw, topi.generic.schedule_conv2d_transpose_nchw),
"cpu": (topi.x86.conv2d_transpose_nchw, topi.x86.schedule_conv2d_transpose_nchw),
"arm_cpu": (topi.arm_cpu.conv2d_transpose_nchw, topi.arm_cpu.schedule_conv2d_transpose_nchw),
"gpu": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw),
"hls": (topi.nn.conv2d_transpose_nchw, topi.hls.schedule_conv2d_transpose_nchw),
}
def verify_conv2d_transpose_nchw(
batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
in_height, in_width = in_size
kernel_height, kernel_width = kernel
stride_height, stride_width = stride
pad_top, pad_left, pad_bottom, pad_right = padding
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((in_channel, num_filter, kernel_height, kernel_width), name="W")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_transpose.verify_conv2d_transpose_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check(fcompute, fschedule, target, dev):
B = fcompute(
A,
W,
[stride_height, stride_width],
[pad_top, pad_left, pad_bottom, pad_right],
A.dtype,
output_padding,
)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
def check_generic(target, dev):
print("Running generic on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = _conv2d_transpose_nchw_implement["generic"]
check(fcompute, fschedule, target, dev)
check_generic("llvm", tvm.cpu(0))
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(
target, _conv2d_transpose_nchw_implement
)
check(fcompute, fschedule, target, dev)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nchw():
verify_conv2d_transpose_nchw(1, 3, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (1, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (1, 1))
verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0))
verify_conv2d_transpose_nchw(16, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0))
verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0))
verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0))
if __name__ == "__main__":
test_conv2d_transpose_nchw()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv2d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nchw_winograd_implement = {
"arm_cpu": (topi.arm_cpu.conv2d_nchw_winograd, topi.arm_cpu.schedule_conv2d_nchw_winograd),
"cuda": (topi.cuda.conv2d_nchw_winograd, topi.cuda.schedule_conv2d_nchw_winograd),
"mali": (topi.mali.conv2d_nchw_winograd, topi.mali.schedule_conv2d_nchw_winograd),
}
def verify_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices=["cuda", "llvm -device=arm_cpu", "opencl -device=mali"],
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
C = fcompute(A, W, stride, padding, dilation, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
for device in devices:
check_device(device)
@tvm.testing.uses_gpu
def test_conv2d_nchw():
# inception v3 workloads
verify_conv2d_nchw(1, 128, 17, 192, 7, 1, 3, devices=["cuda"])
verify_conv2d_nchw(1, 128, 17, 128, 7, 1, 3, devices=["cuda"])
verify_conv2d_nchw(1, 160, 17, 160, 7, 1, 3, devices=["cuda"])
# resnet 18 workloads
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1)
# batch size = 2
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1)
# relu, bias
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True)
# weird workloads
verify_conv2d_nchw(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nchw(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nchw(2, 13, 71, 59, 3, 1, 1)
verify_conv2d_nchw(1, 48, 35, 64, 5, 1, 2, devices=["cuda"])
# Asymmetric padding
verify_conv2d_nchw(1, 48, 56, 48, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(1, 64, 28, 64, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(1, 128, 14, 128, 3, 1, (1, 1))
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, "SAME")
verify_conv2d_nchw(2, 13, 71, 59, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1, 1, 1), add_bias=True)
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True)
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True)
verify_conv2d_nchw(1, 64, 17, 192, 7, 1, (3, 1), devices=["cuda"])
verify_conv2d_nchw(1, 64, 17, 64, 7, 1, (3, 3, 2, 2), devices=["cuda"])
verify_conv2d_nchw(1, 160, 17, 160, 7, 1, "SAME", devices=["cuda"])
verify_conv2d_nchw(1, 48, 35, 48, 5, 1, "VALID", devices=["cuda"])
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
):
# This version is intented to be used by the auto-scheduler,
# so we only test the correctness of compute declaration
# with the default naive schedule in cpu
A = te.placeholder((batch, in_size, in_size, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
target = "llvm"
dev = tvm.device(target)
C = topi.nn.conv2d_winograd_nhwc(A, W, stride, padding, dilation, dtype)
s = te.create_schedule([C.op])
a = tvm.nd.array(a_np, device=dev)
w = tvm.nd.array(w_np, device=dev)
b = tvm.nd.array(b_np, device=dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), device=dev)
func = tvm.build(s, [A, W, C], target=target)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
def test_conv2d_nhwc():
# This version is intented to be used by the auto-scheduler,
# so we only test the correctness of compute declaration
# with the default naive schedule in cpu
# resnet 18 workloads
verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1)
# more shapes
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, 1)
verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1)
# Asymmetric padding
verify_conv2d_nhwc(1, 3, 7, 3, 3, 1, "SAME")
verify_conv2d_nhwc(1, 48, 35, 48, 3, 1, "VALID")
if __name__ == "__main__":
test_conv2d_nchw()
test_conv2d_nhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv3d_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
_conv3d_ncdhw_implement = {
"generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw),
"cpu": (topi.x86.conv3d_ncdhw, topi.x86.schedule_conv3d_ncdhw),
"gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
add_bias=False,
add_relu=False,
):
if isinstance(kernel, (tuple, list)):
if len(kernel) == 3:
kernel_d = kernel[0]
kernel_h = kernel[1]
kernel_w = kernel[2]
else:
raise ValueError("Size of kernel can only be 3")
elif isinstance(kernel, int):
kernel_d = kernel_h = kernel_w = kernel
else:
raise ValueError("Unknown kernel option %s" % kernel)
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel_d, kernel_h, kernel_w), name="W")
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding, groups)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ncdhw_implement)
with tvm.target.Target(target):
C = fcompute(
A,
W,
(stride, stride, stride),
padding,
(dilation, dilation, dilation),
groups,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_ncdhw():
# 3DCNN workloads
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 1)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 1)
# bias, relu
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True, add_relu=True)
# dilation = 2
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, 1, dilation=2)
# batch size
verify_conv3d_ncdhw(4, 64, 56, 5, 3, 1, 1)
# weird workloads
verify_conv3d_ncdhw(2, 2, 2, 2, 2, 2, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 3)
# Asymmetric padding
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 0, 0, 1, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 2, 1, 2, 1))
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, (2, 2, 2, 1, 1, 1), dilation=2)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 0))
verify_conv3d_ncdhw(1, 32, 32, 1, 3, 1, "VALID")
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, "VALID")
# DHW kernel layout
verify_conv3d_ncdhw(1, 32, 56, 16, (3, 5, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 7), 2, (1, 2, 3, 0, 3, 2))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 3, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 3), 2, (1, 3, 1))
# grouped workloads
verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 1, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 1, groups=4)
if __name__ == "__main__":
test_conv3d_ncdhw()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv3d_ndhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv3d_ndhwc_implement = {
"generic": (topi.nn.conv3d_ndhwc, topi.generic.schedule_conv3d_ndhwc),
"cpu": (topi.x86.conv3d_ndhwc, topi.x86.schedule_conv3d_ndhwc),
"gpu": (topi.cuda.conv3d_ndhwc, topi.cuda.schedule_conv3d_ndhwc),
}
def verify_conv3d_ndhwc(
target,
dev,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
):
if isinstance(in_size, tuple):
in_depth, in_height, in_width = in_size
else:
in_depth = in_height = in_width = in_size
if isinstance(kernel, tuple):
kernel_depth, kernel_height, kernel_width = kernel
else:
kernel_depth = kernel_height = kernel_width = kernel
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
W = te.placeholder(
(kernel_depth, kernel_height, kernel_width, in_channel // groups, num_filter), name="W"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_ndhwc.v2")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding, groups)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ndhwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, dtype)
s = fschedule([B])
dev = tvm.device(target, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
print(tvm.lower(s, [A, W, B], target))
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_conv3d_ndhwc(target, dev):
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "VALID")
# dilation = 2
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "SAME", dilation=2)
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 3, 3), (1, 2, 2), "SAME")
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 6, 6), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 4, (20, 256, 256), 8, (1, 5, 5), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME", groups=4)
if __name__ == "__main__":
test_conv3d_ndhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv3d_ndhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv3d_ndhwc_tensorcore, topi.cuda.schedule_conv3d_ndhwc_tensorcore)
}
def verify_conv3d_ndhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv3d with tensorcore for ndhwc layout"""
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel, kernel, kernel)
)
padding_sum = pad_front + pad_top + pad_left + pad_back + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
dtype = "float16"
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), dtype, name="A")
W = te.placeholder((kernel, kernel, kernel, in_channel, num_filter), dtype, name="W")
bias = te.placeholder((1, 1, 1, 1, num_filter), dtype, name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_conv3d_ndhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv3d_ndhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, 1, "float16")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
# Tensorcores are very inaccurate, with large shapes, the accumulation
# error is high especially away from 1. We disable atol as it is very
# large for these numbers that are far away from 1.
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e200, rtol=0.01)
check_device(devices)
@tvm.testing.requires_tensorcore
@tvm.testing.requires_cuda
def test_conv3d_ndhwc_tensorcore():
"""Test the conv3d with tensorcore for ndhwc layout"""
verify_conv3d_ndhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv3d_ndhwc(16, 64, 7, 64, 7, 1, 3)
verify_conv3d_ndhwc(16, 32, 7, 32, 7, 1, 3)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_bias=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, (3, 3, 3, 2, 2, 2))
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, "SAME")
verify_conv3d_ndhwc(8, 16, 35, 32, 5, 1, "VALID")
verify_conv3d_ndhwc(16, 32, 16, 32, 3, 1, (1, 1, 1, 1, 1, 1))
verify_conv3d_ndhwc(16, 16, 12, 16, 3, 1, (1, 1, 1, 1, 1, 1))
if __name__ == "__main__":
test_conv3d_ndhwc_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv3d_transpose_ncdhw_implement = {
"generic": (topi.nn.conv3d_transpose_ncdhw, topi.generic.schedule_conv3d_transpose_ncdhw),
"cpu": (topi.x86.conv3d_transpose_ncdhw, topi.x86.schedule_conv3d_transpose_ncdhw),
"gpu": (topi.cuda.conv3d_transpose_ncdhw, topi.cuda.schedule_conv3d_transpose_ncdhw),
}
def verify_conv3d_transpose_ncdhw(
batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
in_depth, in_height, in_width = in_size
kernel_depth, kernel_height, kernel_width = kernel
stride_depth, stride_height, stride_width = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = padding
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder(
(in_channel, num_filter, kernel_depth, kernel_height, kernel_width), name="W"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_transpose.verify_conv3d_transpose_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(
target, _conv3d_transpose_ncdhw_implement
)
B = fcompute(
A,
W,
[stride_depth, stride_height, stride_width],
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right],
A.dtype,
output_padding,
)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, atol=1e-4, rtol=1e-4)
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e-4, rtol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_transpose_ncdhw():
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 1, (1, 1, 1), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 2, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (2, 2, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (1, 0, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (2, 2, 2), (2, 2, 2), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 32, (5, 5, 5), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (1, 1, 1)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 3, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
if __name__ == "__main__":
test_conv3d_transpose_ncdhw()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_conv3d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for 3d convolution with winograd."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
_conv3d_ncdhw_implement = {
"gpu": (topi.cuda.conv3d_ncdhw_winograd, topi.cuda.schedule_conv3d_ncdhw_winograd),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
depth_kernel,
space_kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (depth_kernel, space_kernel, space_kernel)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, space_kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel, depth_kernel, space_kernel, space_kernel), name="W")
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
with tvm.target.Target(device):
C = fcompute(
A, W, (stride, stride, stride), padding, (dilation, dilation, dilation), 1, dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for device in ["cuda"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
@tvm.testing.requires_gpu
def test_conv3d_ncdhw():
# Try without depth transformation
# 3DCNN workloads
verify_conv3d_ncdhw(1, 61, 20, 120, 3, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 5, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 5, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 7, 7, 1, 3)
verify_conv3d_ncdhw(1, 128, 12, 256, 3, 3, 1, 1)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1)
# bias, relu
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 1, 3, 1, 1, add_relu=True, add_bias=True)
# dilation = 2
verify_conv3d_ncdhw(1, 16, 12, 16, 3, 3, 1, "VALID", dilation=2)
verify_conv3d_ncdhw(1, 16, 12, 16, 1, 3, 1, "VALID", dilation=2)
# batch size
verify_conv3d_ncdhw(4, 32, 12, 64, 3, 3, 1, 1)
verify_conv3d_ncdhw(4, 32, 12, 64, 1, 3, 1, 1)
# weird workloads
verify_conv3d_ncdhw(2, 2, 2, 2, 3, 3, 1, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 1, 3)
if __name__ == "__main__":
test_conv3d_ncdhw()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_correlation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
"""test of correlation operator in NCHW layout"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
_correlation_implement = {
"generic": (topi.nn.correlation_nchw, topi.generic.schedule_correlation_nchw),
"gpu": (topi.cuda.correlation_nchw, topi.cuda.schedule_correlation_nchw),
}
(
data_shape,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
) = tvm.testing.parameters(
((1, 3, 10, 10), 1, 4, 1, 1, 4, True),
((1, 3, 10, 10), 1, 5, 1, 1, 5, True),
((5, 1, 4, 4), 3, 1, 2, 1, 2, True),
((5, 1, 6, 4), 3, 1, 2, 2, 2, False),
((5, 1, 11, 11), 5, 1, 1, 1, 2, False),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, data_shape, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
):
a_np = np.random.uniform(size=data_shape).astype(dtype)
b_np = np.random.uniform(size=data_shape).astype(dtype)
c_np = tvm.topi.testing.correlation_nchw_python(
a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
)
return a_np, b_np, c_np
def test_correlation_nchw(
target,
dev,
ref_data,
dtype,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
):
a_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="data1", dtype=dtype)
B = te.placeholder(b_np.shape, name="data2", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _correlation_implement)
with tvm.target.Target(target):
C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=dtype, device=dev)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_deformable_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_deformable_conv2d_nchw_implement = {
"generic": (topi.nn.deformable_conv2d_nchw, topi.generic.schedule_deformable_conv2d_nchw),
"cuda": (topi.cuda.deformable_conv2d_nchw, topi.cuda.schedule_deformable_conv2d_nchw),
}
_deformable_conv2d_nhwc_implement = {
"generic": (topi.nn.deformable_conv2d_nhwc, topi.generic.schedule_deformable_conv2d_nhwc),
}
def verify_deformable_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_channel, in_size, in_size), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding) // stride + 1
Offset = te.placeholder(
(batch, deformable_groups * kernel * kernel * 2, out_size, out_size), name="offset"
)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nchw_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
def verify_deformable_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_size, in_size, in_channel), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding) // stride + 1
Offset = te.placeholder(
(batch, out_size, out_size, deformable_groups * kernel * kernel * 2), name="offset"
)
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((num_filter,), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nhwc_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm"]:
check_device(device)
@tvm.testing.uses_gpu
def test_deformable_conv2d_nchw():
verify_deformable_conv2d_nchw(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 2, dilation=2)
def test_deformable_conv2d_nhwc():
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 2, dilation=2)
if __name__ == "__main__":
test_deformable_conv2d_nchw()
test_deformable_conv2d_nhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for dense operator"""
import contextlib
import numpy as np
import pytest
import sys
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
from common import Int8Fallback
random_seed = tvm.testing.parameter(0)
use_bias = tvm.testing.parameter(True, False)
batch_size = tvm.testing.parameter(1, 2, 128)
in_dim, out_dim = tvm.testing.parameters((1024, 1000))
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
("int8", "int32"),
)
_dense_implementations = {
"generic": [(topi.nn.dense, topi.generic.schedule_dense)],
"cpu": [
(topi.x86.dense_nopack, topi.x86.schedule_dense_nopack),
(topi.x86.dense_pack, topi.x86.schedule_dense_pack),
],
"gpu": [
(topi.gpu.dense_small_batch, topi.gpu.schedule_dense_small_batch),
(topi.gpu.dense_large_batch, topi.gpu.schedule_dense_large_batch),
],
"mali": [(topi.mali.dense, topi.mali.schedule_dense)],
"bifrost": [(topi.bifrost.dense, topi.bifrost.schedule_dense)],
"hls": [(topi.nn.dense, topi.hls.schedule_dense)],
}
@tvm.testing.fixture(cache_return_value=True)
def dense_ref_data(random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype):
np.random.seed(random_seed)
if "float" in in_dtype:
a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
elif in_dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype)
else:
raise ValueError("No method to generate test data for data type '{}'".format(in_dtype))
matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
if use_bias:
matmul += c_np
d_np = np.maximum(matmul, 0)
return (a_np, b_np, c_np, d_np)
def test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
implementations=None,
):
target = tvm.target.Target(target)
if target.kind.name == "cuda":
if in_dtype == "int8" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
pytest.xfail("CUDA int8 intrinsics not available")
if in_dtype == "float16" and not tvm.contrib.nvcc.have_fp16(dev.compute_version):
pytest.xfail("CUDA float16 intrinsics not available")
if target.kind.name == "vulkan":
if in_dtype == "int8" and (
not target.attrs.get("supports_int8", False)
or not target.attrs.get("supports_8bit_buffer", False)
):
pytest.xfail("Vulkan int8 driver support not available")
if in_dtype == "float16" and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
):
pytest.xfail("Vulkan float16 driver support not available")
if (
target.kind.name not in ["llvm", "c"]
and len(set(target.keys) & set(_dense_implementations)) == 0
):
pytest.xfail("No implementation for tvm.topi.testing.dispatch to find")
if "int" in in_dtype:
tol = {"atol": 0, "rtol": 0}
elif in_dtype == "float32":
tol = {"rtol": 1e-5, "atol": 1e-5}
elif in_dtype == "float16":
tol = {"rtol": 5e-2, "atol": 1e-5}
A = te.placeholder((batch_size, in_dim), name="A", dtype=in_dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=in_dtype)
C = te.placeholder((out_dim,), name="C", dtype=out_dtype)
a_np, b_np, c_np, d_np = dense_ref_data
if implementations is None:
implementations = tvm.topi.testing.dispatch(target, _dense_implementations)
for fcompute, fschedule in implementations:
with tvm.target.Target(target):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], target, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, **tol)
@pytest.mark.parametrize("target,in_dtype,out_dtype", [("cuda", "int8", "int32")])
def test_dense_cuda_int8(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
):
implementations = [
(topi.cuda.dense_int8, topi.cuda.schedule_dense_int8),
]
with Int8Fallback():
test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
implementations=implementations,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_dense_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Test code for dense tensorcore operator"""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm import te
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
_dense_implement = {"gpu": [(topi.cuda.dense_tensorcore, topi.cuda.schedule_dense_tensorcore)]}
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
K, L = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(K, L // 8), dtype=np.int32)
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[k, l] = a_int4[k, l] | ((a_int32[k, l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def convert_int32_into_int4_bias(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
(L,) = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(L // 8), dtype=np.int32)
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[l] = a_int4[l] | ((a_int32[l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def verify_dense(batch, in_dim, out_dim, dtype, use_bias=True):
"""Dense tensorcore verify function"""
A = te.placeholder((batch, in_dim), name="A", dtype=dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=dtype)
C = te.placeholder((out_dim,), name="C", dtype=dtype)
assert dtype in ["int4", "int8", "float16"]
out_dtype = "float32"
if dtype in ["int8", "int4"]:
out_dtype = "int32"
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_dense_tensorcore")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=(batch, in_dim))
b_np = np.random.randint(low=-8, high=7, size=(out_dim, in_dim))
c_np = np.random.randint(low=-8, high=7, size=(out_dim,))
elif dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch, in_dim)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(dtype)
else:
a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(dtype)
d_np = tvm.topi.testing.dense(a_np, b_np, c_np, use_bias, True, out_dtype)
return (a_np, b_np, c_np, d_np)
# get the test data
a_np, b_np, c_np, d_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
b_np = convert_int32_into_int4(b_np)
c_np = convert_int32_into_int4_bias(c_np)
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
with tvm.target.Target(device):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], device, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-3)
check_device("cuda")
@tvm.testing.requires_tensorcore
def test_dense_tensorcore():
"""Test cases"""
for dtype in ["float16", "int8"]:
verify_dense(8, 16, 32, "float16", use_bias=True)
verify_dense(16, 32, 16, dtype, use_bias=True)
verify_dense(256, 1024, 1024, dtype, use_bias=True)
verify_dense(1000, 1024, 1024, dtype, use_bias=False)
verify_dense(256, 2048, 1000, dtype, use_bias=False)
# TODO: need fix int4 use_bias=True, wyc-ruiker
verify_dense(16, 32, 16, "int4", use_bias=False)
verify_dense(256, 1024, 1024, "int4", use_bias=False)
verify_dense(1000, 1024, 1024, "int4", use_bias=False)
verify_dense(256, 2048, 1000, "int4", use_bias=False)
if __name__ == "__main__":
test_dense_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for depth to space"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_depth_to_space(
block_size, batch, in_channel, in_height, in_width, layout="NCHW", mode="DCR"
):
out_channel = int(in_channel / (block_size * block_size))
out_height = int(in_height * block_size)
out_width = int(in_width * block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_depth_to_space():
for layout in ["NCHW", "NHWC"]:
for mode in ["DCR", "CDR"]:
# Simplest possible case
verify_depth_to_space(2, 1, 4, 1, 1, layout=layout, mode=mode)
# Average input size
verify_depth_to_space(2, 1, 32, 32, 32, layout=layout, mode=mode)
# Large block size
verify_depth_to_space(8, 1, 256, 32, 32, layout=layout, mode=mode)
# Large batch size
verify_depth_to_space(4, 8, 32, 32, 32, layout=layout, mode=mode)
# Large input size
verify_depth_to_space(4, 8, 32, 128, 128, layout=layout, mode=mode)
if __name__ == "__main__":
test_depth_to_space()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.depthwise_conv2d import _get_workload
from tvm.topi.x86.depthwise_conv2d import _fallback_schedule
from tvm.topi.generic import conv2d as conv2d_generic
_depthwise_conv2d_implement = {
"NCHW": {
"generic": [(topi.nn.depthwise_conv2d_nchw, topi.generic.schedule_depthwise_conv2d_nchw)],
"arm_cpu": [
(topi.arm_cpu.depthwise_conv2d_nchw, topi.arm_cpu.schedule_depthwise_conv2d_nchw),
(
topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack,
),
],
"gpu": [(topi.cuda.depthwise_conv2d_nchw, topi.cuda.schedule_depthwise_conv2d_nchw)],
"mali": [(topi.mali.depthwise_conv2d_nchw, topi.mali.schedule_depthwise_conv2d_nchw)],
"bifrost": [(topi.nn.depthwise_conv2d_nchw, topi.bifrost.schedule_depthwise_conv2d_nchw)],
"intel_graphics": [
(
topi.intel_graphics.depthwise_conv2d_nchw,
topi.intel_graphics.schedule_depthwise_conv2d_nchw,
)
],
},
"NHWC": {
"generic": [
(topi.nn.depthwise_conv2d_nhwc, topi.generic.schedule_depthwise_conv2d_nhwc),
(topi.nn.depthwise_conv2d_nhwc, conv2d_generic.schedule_depthwise_conv2d_nhwc),
],
"arm_cpu": [
(
topi.arm_cpu.compute_depthwise_conv2d_nhwc,
topi.arm_cpu.schedule_depthwise_conv2d_nhwc,
)
],
"gpu": [(topi.nn.depthwise_conv2d_nhwc, topi.cuda.schedule_depthwise_conv2d_nhwc)],
"mali": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
"bifrost": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
},
"NCHWc": {
"generic": [(topi.x86.depthwise_conv2d_NCHWc, topi.x86.schedule_depthwise_conv2d_NCHWc)],
},
}
random_seed = tvm.testing.parameter(0)
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
)
@tvm.testing.fixture
def input_shape(layout, batch, in_channel, in_size, filter_shape):
if layout == "NCHW":
return (batch, in_channel, in_size, in_size)
elif layout == "NHWC":
return (batch, in_size, in_size, in_channel)
elif layout == "NCHWc":
oc_block = filter_shape[-1]
ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0)
return (batch, in_channel // ic_block, in_size, in_size, ic_block)
@tvm.testing.fixture
def filter_shape(layout, in_channel, channel_multiplier, kernel):
filter_channel = in_channel
if layout == "NCHW":
return (filter_channel, channel_multiplier, kernel, kernel)
elif layout == "NHWC":
return (kernel, kernel, filter_channel, channel_multiplier)
elif layout == "NCHWc":
out_channel = in_channel * channel_multiplier
# For testing the functionality, we choose an arbitrary block
# size that can divide out_channel, regardless of the
# performance.
oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0)
return (out_channel // oc_block, 1, kernel, kernel, 1, oc_block)
@tvm.testing.fixture
def scale_shape(layout, in_channel, channel_multiplier, filter_shape):
out_channel = in_channel * channel_multiplier
if layout in ("NCHW", "NHWC"):
return (out_channel,)
if layout == "NCHWc":
oc_block = filter_shape[-1]
return (out_channel // oc_block, oc_block)
raise ValueError("Unknown layout {}".format(layout))
@tvm.testing.fixture
def shift_shape(scale_shape):
return scale_shape
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
random_seed,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
dilation,
stride,
padding,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
):
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if in_dtype == "float16" else in_dtype
input_np = np.random.uniform(size=input_shape).astype(in_dtype)
filter_np = np.random.uniform(size=filter_shape).astype(in_dtype)
scale_np = np.random.uniform(size=scale_shape).astype(out_dtype)
shift_np = np.random.uniform(size=shift_shape).astype(out_dtype)
if layout == "NCHW":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw
dilation = (1, 1, dilation, dilation)
reshape = (1, -1, 1, 1)
elif layout == "NHWC":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc
dilation = (dilation, dilation, 1, 1)
reshape = (1, 1, 1, -1)
elif layout == "NCHWc":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc
dilation = (1, 1, dilation, dilation, 1, 1)
reshape = (1, scale_shape[0], 1, 1, scale_shape[1])
dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation)
output_np = np_depthwise_conv2d(
input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding
).astype(out_dtype)
if use_scale_shift:
output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape)
if apply_relu:
output_np = np.maximum(output_np, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
output_np,
)
class BaseDepthwiseConv2D:
"""Provides the test_conv2d test function, to be used by other test classes.
Test parameter sets are split out into different classes for
readability (e.g. used for mobilenet), and for restrictions
(e.g. implemented only for llvm).
"""
layout = tvm.testing.parameter("NCHW", "NHWC")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
(4, 256, 64, 2, 5, 2),
)
padding = tvm.testing.parameter("SAME", "VALID")
dilation = tvm.testing.parameter(1, 2)
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
apply_relu = tvm.testing.parameter(True, False, ids=["with_relu", "no_relu"])
run_after_compile = True
def test_conv2d(
self,
target,
dev,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
batch,
in_channel,
channel_multiplier,
kernel,
stride,
padding,
dilation,
ref_data,
):
target = tvm.target.Target(target)
if (
target.kind.name == "cuda"
and in_dtype == "float16"
and not tvm.contrib.nvcc.have_fp16(dev.compute_version)
):
pytest.xfail("CUDA float16 intrinsics not available")
if (
target.kind.name == "vulkan"
and in_dtype == "float16"
and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
)
):
pytest.xfail("Vulkan float16 driver support not available")
# Transform the padding argument from 'str' to 'tuple' to
# match the "workload" tuple in TopHub. Which padding_args to
# use for each layout chosen to reproduce previous behavior.
if dilation == 1:
padding_args = get_pad_tuple(padding, (kernel, kernel))
padding_args_i = [0, 1, 2, 3] if layout == "NCHW" else [0, 1]
padding_args = [padding_args[i] for i in padding_args_i]
else:
padding_args = padding
# placeholder
Input = te.placeholder(input_shape, name="Input", dtype=in_dtype)
Filter = te.placeholder(filter_shape, name="Filter", dtype=in_dtype)
Scale = te.placeholder(scale_shape, name="Scale", dtype=out_dtype)
Shift = te.placeholder(shift_shape, name="Shift", dtype=out_dtype)
if layout == "NCHW":
topi_scale_shift = topi.nn.scale_shift_nchw
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NHWC":
topi_scale_shift = topi.nn.scale_shift_nhwc
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NCHWc":
topi_scale_shift = topi.nn.scale_shift_nchwc
in_layout = "NCHW{}c".format(input_shape[-1])
out_layout = "NCHW{}c".format(filter_shape[-1])
fcompute_args = (
Input,
Filter,
stride,
padding,
dilation,
in_layout,
out_layout,
out_dtype,
)
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
impl_list = tvm.topi.testing.dispatch(target, _depthwise_conv2d_implement[layout])[:]
if target == "llvm" and layout == "NCHW" and channel_multiplier == 1 and dilation == 1:
impl_list.append(
(topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw)
)
for fcompute, fschedule in impl_list:
with tvm.target.Target(target):
# Declare, build schedule
C = fcompute(*fcompute_args)
if use_scale_shift:
C = topi_scale_shift(C, Scale, Shift)
if apply_relu:
C = topi.nn.relu(C)
s = fschedule(C)
# Build and run
f = tvm.build(s, [Input, Filter, Scale, Shift, C], target)
if self.run_after_compile:
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if "int" in out_dtype:
tol = {"atol": 0, "rtol": 0}
elif out_dtype == "float32":
tol = {"rtol": 1e-4, "atol": 1e-5}
elif out_dtype == "float16":
# A summation in float16 with a single accumulator very
# quickly runs into large rounding errors. At some point,
# this tolerance should be schedule-dependent for to avoid
# false negatives.
num_values_summed = kernel * kernel
gap_size = (
np.nextafter(output_np.max(), np.inf, dtype=output_np.dtype)
- output_np.max()
)
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
output_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(C.shape), dtype=C.dtype),
dev,
)
f(input_tvm, filter_tvm, scale_tvm, shift_tvm, output_tvm)
tvm.testing.assert_allclose(output_np, output_tvm.numpy(), **tol)
class TestDepthwiseConv2D(BaseDepthwiseConv2D):
"""Test variety of parameters, defined in BaseDepthwiseConv2D. Also
has llvm-specific tests for workload padding."""
@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
out_dtype,
layout,
input_shape,
filter_shape,
target,
ref_data,
stride,
padding,
dilation,
):
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if layout == "NCHW":
_, _, out_height, out_width = output_np.shape
elif layout == "NHWC":
_, out_height, out_width, _ = output_np.shape
elif layout == "NCHWc":
_, _, out_height, out_width, _ = output_np.shape
Input = te.placeholder(input_shape, name="Input")
Filter = te.placeholder(filter_shape, name="Filter")
wkl = _get_workload(Input, Filter, (stride, stride), padding, dilation, out_dtype, layout)
# check if tile_ow candidates are the factors of the right output weight.
with tvm.target.Target(target):
cfg = autotvm.get_config()
_fallback_schedule(cfg, wkl)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
class TestDepthwiseConv2D_MobilenetWorkloads(BaseDepthwiseConv2D):
"""Extra tests to verify functionality for workloads used by mobilenet."""
layout = tvm.testing.parameter("NCHW")
batch = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1)
kernel = tvm.testing.parameter(3)
padding = tvm.testing.parameter("SAME")
dilation = tvm.testing.parameter(1)
in_channel, in_size, stride = tvm.testing.parameters(
(32, 112, 1),
(64, 112, 2),
(128, 56, 1),
(128, 56, 2),
(256, 28, 1),
(256, 28, 2),
(512, 14, 1),
(512, 14, 2),
(1024, 7, 1),
)
@tvm.testing.parametrize_targets("llvm")
class TestDepthwiseConv2D_NCHWc(BaseDepthwiseConv2D):
"""Tests specific to NCHWc layouts.
Once the implementation supports channel_multiplier>1 and GPU
devices, this class can be merged into TestDepthwiseConv2D.
"""
# depthwise_conv2d_NCHWc currently does not support channel multiplier > 1
layout = tvm.testing.parameter("NCHWc")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
)
@tvm.testing.parametrize_targets("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu")
class TestDepthwiseConv2DArmCompile(BaseDepthwiseConv2D):
"""Compile-only tests for cross-compiling to ARM."""
layout = tvm.testing.parameter("NHWC", "NCHW")
batch = tvm.testing.parameter(1)
dilation = tvm.testing.parameter(1)
in_dtype, out_dtype = tvm.testing.parameters(("int16", "int32"))
in_channel = tvm.testing.parameter(728)
in_size = tvm.testing.parameter(32)
kernel = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1, 3)
stride = tvm.testing.parameter(1)
padding = tvm.testing.parameter("SAME")
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
run_after_compile = False
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
import tvm.topi.testing
from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
import tvm.testing
def verify_depthwise_conv2d_back_input(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = np.int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = np.int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
ishape = [batch, in_h, in_w, in_channel]
oshape = [batch, out_h, out_w, out_channel]
# placeholder
Out_grad = te.placeholder(oshape, name="Out_grad")
Filter = te.placeholder((filter_h, filter_w, filter_channel, channel_multiplier))
# declare
In_grad = topi.nn.depthwise_conv2d_backward_input_nhwc(
Filter,
Out_grad,
oshape,
ishape,
stride=[stride_h, stride_w],
padding=[padding_h, padding_w],
)
# schedule
schedule = schedule_depthwise_conv2d_backward_input_nhwc(In_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
filter_shape = get_const_tuple(Filter.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_input.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
# padding params in backward propagation
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = np.zeros(
(
batch,
dilated_out_grad_np.shape[1] + bpad_top + bpad_bottom,
dilated_out_grad_np.shape[2] + bpad_left + bpad_right,
out_channel,
)
)
padded_out_grad[
:,
bpad_top : dilated_out_grad_np.shape[1] + bpad_top,
bpad_left : dilated_out_grad_np.shape[2] + bpad_left,
:,
] = dilated_out_grad_np
in_grad_np = np.zeros((batch, in_h, in_w, in_channel))
for b in range(batch):
for c in range(in_channel):
for m in range(channel_multiplier):
in_grad_np[b, :, :, c] += signal.convolve2d(
padded_out_grad[b, :, :, c * channel_multiplier + m],
filter_np[:, :, c, m],
mode="valid",
)[0:in_h, 0:in_w]
return (out_grad_np, filter_np, in_grad_np)
(out_grad_np, filter_np, in_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
in_grad_tvm = tvm.nd.array(np.zeros(shape=ishape, dtype=dtype), dev)
# launch the kernel
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean
tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.numpy(), rtol=1e-5)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_input_nhwc():
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_input_nhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
import tvm.testing
def verify_depthwise_conv2d_back_weight(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
oshape = [batch, out_h, out_w, out_channel]
fshape = [filter_h, filter_w, in_channel, channel_multiplier]
# placeholder
Out_grad = te.placeholder(oshape, name="Out_grad")
Input = te.placeholder((batch, in_h, in_w, in_channel), name="In_grad")
# declare
Weight_grad = topi.nn.depthwise_conv2d_backward_weight_nhwc(
Input, Out_grad, oshape, fshape, stride=[stride_h, stride_w], padding=[padding_h, padding_w]
)
# schedule
schedule = schedule_depthwise_conv2d_backward_weight_nhwc(Weight_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Input, Out_grad, Weight_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
in_shape = get_const_tuple(Input.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_weight.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
input_np = np.random.uniform(size=in_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
padded_input_np = np.zeros(
(batch, in_h + pad_top + pad_bottom, in_w + pad_left + pad_right, in_channel)
)
padded_input_np[:, pad_top : in_h + pad_top, pad_left : in_w + pad_left, :] = input_np
weight_grad_np = np.zeros((filter_h, filter_w, in_channel, channel_multiplier))
for c in range(in_channel):
for m in range(channel_multiplier):
for b in range(batch):
weight_grad_np[:, :, c, m] += signal.convolve2d(
padded_input_np[b, :, :, c],
np.rot90(
dilated_out_grad_np[
b, :, :, c * channel_multiplier + m % channel_multiplier
],
2,
),
mode="valid",
)[0:filter_h, 0:filter_w]
return (out_grad_np, input_np, weight_grad_np)
(out_grad_np, input_np, weight_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
input_tvm = tvm.nd.array(input_np, dev)
weight_grad_tvm = tvm.nd.array(np.zeros(shape=fshape, dtype=dtype), dev)
# launch the kernel
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(input_tvm, out_grad_tvm, weight_grad_tvm).mean
tvm.testing.assert_allclose(weight_grad_np, weight_grad_tvm.numpy(), rtol=1e-4)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_weight_nhwc():
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_weight(15, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_weight_nhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_dilate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import numpy as np
def test_dilate():
target = "llvm"
dev = tvm.cpu(0)
def _test_dilate(input_size, strides, dilation_value=None):
Input = te.placeholder((input_size))
if dilation_value is None:
Output = topi.nn.dilate(Input, strides)
else:
Output = topi.nn.dilate(Input, strides, dilation_value)
schedule = te.create_schedule(Output.op)
input_np = np.random.uniform(size=input_size).astype(Input.dtype)
if dilation_value is None:
output_np = tvm.topi.testing.dilate_python(input_np, strides)
else:
output_np = tvm.topi.testing.dilate_python(input_np, strides, dilation_value)
input_tvm = tvm.nd.array(input_np, device=dev)
output_size = topi.utils.get_const_tuple(Output.shape)
output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), device=dev)
f = tvm.build(schedule, [Input, Output], target)
f(input_tvm, output_tvm)
tvm.testing.assert_allclose(output_tvm.numpy(), output_np, rtol=1e-5)
_test_dilate((32,), (2,))
_test_dilate((32, 32), (2, 2))
_test_dilate((1, 3, 32, 32), (1, 1, 1, 1))
_test_dilate((1, 3, 32, 32), (2, 2, 2, 2))
_test_dilate((1, 32, 32, 3, 3), (1, 1, 1, 1, 1))
_test_dilate((1, 32, 32, 3, 3), (2, 2, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (1, 1, 1, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1), 1.0)
if __name__ == "__main__":
test_dilate()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
def with_tvm(lam, *args):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i, arg in enumerate(args):
pls.append(te.placeholder(arg.shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_einsum(subscripts, shapes):
ops = []
for shape in shapes:
tmp = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(np.float32)
ops.append(tmp)
c1 = np.einsum(subscripts, *ops)
if len(ops) == 1:
c2 = with_tvm(lambda A: topi.einsum(subscripts, A), *ops)
elif len(ops) == 2:
c2 = with_tvm(lambda A, B: topi.einsum(subscripts, A, B), *ops)
elif len(ops) == 3:
c2 = with_tvm(lambda A, B, C: topi.einsum(subscripts, A, B, C), *ops)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize(
"equation,inputs",
[
("ii", [(5, 5)]),
("ii->i", [(5, 5)]),
("ij->i", [(5, 5)]),
("...j->...", [(5, 5)]),
("...j, j", [(5, 5), (5,)]),
("..., ...", [(), (2, 3)]),
("ijk, jil->kl", [(3, 4, 5), (4, 3, 2)]),
("ij, ij -> i", [(1, 4), (2, 4)]),
("...ij, ...jk -> ...ik", [(1, 4), (4, 2)]),
("...ij, ...ik -> ...jk", [(1, 1, 1, 4), (1, 1, 1, 3)]),
("...ik, ...jk, ...hk -> i...jh", [(3, 4, 4), (1, 5, 3, 8, 4), (2, 5, 3, 6, 4)]),
("ij,jk->ik", [(2, 3), (3, 4)]),
("ij,jk,km->im", [(2, 3), (3, 4), (4, 5)]),
],
)
def test_einsum(equation, inputs):
verify_einsum(equation, inputs)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
from common import Int8Fallback
import tvm.testing
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]o[x]i
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn, kh, kw))
kernel = np.transpose(kernel, (0, 2, 4, 5, 1, 3))
return kernel
_group_conv2d_nchw_implement = {
"generic": (topi.nn.group_conv2d_nchw, topi.generic.schedule_group_conv2d_nchw),
"gpu": (topi.cuda.group_conv2d_nchw, topi.cuda.schedule_group_conv2d_nchw),
}
_group_conv2d_nhwc_implement = {
"generic": (topi.nn.group_conv2d_nhwc, topi.generic.schedule_group_conv2d_nhwc),
}
def verify_group_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
oc_block_factor = 4
ic_block_factor = 4
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder(
(batch, in_channel // ic_block_factor, in_height, in_width, ic_block_factor),
name="A",
dtype="int8",
)
W = te.placeholder(
(
num_filter // oc_block_factor,
(in_channel // groups) // ic_block_factor,
kernel,
kernel,
oc_block_factor,
ic_block_factor,
),
name="W",
dtype="int8",
)
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(
low=-128, high=127, size=(batch, in_channel, in_height, in_width)
).astype(dtype)
w_np = np.random.randint(
low=-128, high=128, size=(num_filter, in_channel // groups, kernel, kernel)
).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block_factor),
_transform_kernel(w_np, ic_block_factor, oc_block_factor),
b_np,
c_np,
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nchw_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="int8")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W", dtype="int8")
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel // groups, num_filter), name="W")
bias = te.placeholder((1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nhwc_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_nchw():
# ResNeXt-50 workload
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_NCHWc_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_NCHWc_int8(
1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True
)
# dilation
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_NCHWc_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_nchw_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
def test_group_conv2d_nhwc():
# ResNeXt-50 workload
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nhwc(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(9, 128, 56, 128, 3, 1, 1, 1, 32)
if __name__ == "__main__":
test_group_conv2d_nchw()
test_group_conv2d_NCHWc_int8()
test_group_conv2d_nchw_int8()
test_group_conv2d_nhwc()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import pytest
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(
kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn // 4, kh, kw, 4)
)
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1, 6))
return kernel
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
groups,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
dtype="int32",
):
assert dilation == 1, "conv2d_NCHWc does not support dilation for now."
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, groups, in_size, num_filter, kernel, stride, padding)
)
in_height = in_width = in_size
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 8
autotvm.GLOBAL_SCOPE.silent = True
A = te.placeholder(
(batch, in_channel // ic_block, in_height, in_width, ic_block), name="A", dtype="uint8"
)
W = te.placeholder(
(
num_filter // oc_block,
in_channel // ic_block // groups,
kernel,
kernel,
ic_block // 4,
oc_block,
4,
),
name="W",
dtype="int8",
)
@memoize("topi.tests.test_topi_conv2d_NCHWc_int8.verify_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel // groups, kernel, kernel)).astype(
"int8"
)
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(dev):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
(padding, padding),
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
# print(tvm.lower(s, [A, W, C], simple_mode=True))
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
# for device in ["llvm"]:
for device in ["llvm -mcpu=skylake-avx512"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
autotvm.GLOBAL_SCOPE.silent = False
@tvm.testing.uses_gpu
@pytest.mark.skip
def test_conv2d_NCHWc():
# ResNet50 workloads
verify_group_conv2d_NCHWc_int8(1, 256, 32, 224, 64, 7, 2, 3)
if __name__ == "__main__":
# The test requires Skylake and newer Intel machines to generate the correct
# instruction. This test directly calls the topi operator, requiring correct
# kernel shape. For older generation of Intel machines, the kernel needs to
# be 6D. This test tests 7D kernel, that can only work on Skylake+ machines.
# So, disabling the test.
# test_conv2d_NCHWc()
pass
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_group_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group transpose convolution."""
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_group_conv2d_nchw_implement = {
"generic": (
topi.nn.group_conv2d_transpose_nchw,
topi.generic.schedule_group_conv2d_transpose_nchw,
),
"cuda": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw),
}
def verify_group_conv2d_transpose_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
groups,
):
print(
"Workload: (%d, %d, %s, %d, %s, %s, %s, %s, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding, groups)
)
in_height, in_width = in_size
kernel_height, kernel_width = kernel
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((in_channel, num_filter // groups, kernel_height, kernel_width), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d_transpose.verify_group_conv2d_transpose_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding, groups
).astype(dtype)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dtype, output_padding, groups)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="group_conv2d_transpose_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size[0],
in_size[1],
num_filter,
kernel[0],
kernel[1],
stride[0],
stride[1],
padding[0],
padding[1],
padding[2],
padding[3],
output_padding[0],
output_padding[1],
groups,
),
)
func(a, w, c)
c = c.numpy()
for measurement, reference in zip(c, c_np):
tvm.testing.assert_allclose(measurement, reference, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_transpose_nchw():
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 4, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 2)
verify_group_conv2d_transpose_nchw(1, 9, (32, 32), 9, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 3)
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 16, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0), 4)
verify_group_conv2d_transpose_nchw(
1, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 2
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 64, (64, 64), 64, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 64
)
verify_group_conv2d_transpose_nchw(
1, 128, (32, 32), 128, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 128
)
verify_group_conv2d_transpose_nchw(
1, 256, (16, 16), 256, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 256
)
verify_group_conv2d_transpose_nchw(1, 1, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0), 1)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(1, 48, (64, 64), 12, (4, 4), (2, 2), (1, 1, 1, 1), (0, 0), 1)
if __name__ == "__main__":
test_group_conv2d_transpose_nchw()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for bilinear scale """
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
def verify_resize2d(
batch,
in_channel,
in_height,
in_width,
out_height,
out_width,
layout="NCHW",
coord_trans="align_corners",
method="linear",
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, in_channel, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize2d(
A,
[0.0] * 4,
(out_height, out_width),
layout=layout,
coordinate_transformation_mode=coord_trans,
method=method,
)
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize2d_python(a_np, (scale_h, scale_w), layout, method, coord_trans)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize2d():
# Scale NCHW
verify_resize2d(4, 16, 32, 32, 50, 50, "NCHW")
# Scale NCHW + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NCHW")
# Scale NHWC
verify_resize2d(4, 16, 32, 32, 50, 50, "NHWC")
# Scale NHWC + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NHWC")
for layout in ["NCHW", "NHWC"]:
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 64, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 96, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "align_corners", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="linear")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="linear")
def verify_resize3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
layout="NCDHW",
coordinate_transformation_mode="asymmetric",
method="linear",
):
if layout == "NCDHW":
A = te.placeholder(
(batch, in_channel, in_depth, in_height, in_width), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, in_channel, out_depth, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder(
(batch, in_depth, in_height, in_width, in_channel), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, out_depth, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize3d(
A,
[0.0] * 6,
(out_depth, out_height, out_width),
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
method=method,
)
scale_d = out_depth / in_depth
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize3d_python(
a_np, (scale_d, scale_h, scale_w), layout, method, coordinate_transformation_mode
)
def check_target(target, dev):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize3d():
# Trilinear
for method in ["nearest_neighbor", "linear"]:
for coord_trans in ["asymmetric", "align_corners", "half_pixel"]:
for layout in ["NCDHW", "NDHWC"]:
verify_resize3d(3, 16, 32, 32, 32, 10, 10, 10, layout, coord_trans, method)
@tvm.testing.uses_gpu
def test_crop_and_resize():
def verify_crop_and_resize(
image_shape,
np_boxes,
np_box_indices,
np_crop_size,
layout="NHWC",
method="bilinear",
extrapolation_value=0.0,
):
images = te.placeholder(image_shape, name="images", dtype="float32")
np_images = np.random.uniform(size=image_shape).astype("float32")
boxes = te.placeholder(np_boxes.shape, name="boxes", dtype="float32")
box_ind = te.placeholder(np_box_indices.shape, name="box_ind", dtype="int32")
batch = len(np_box_indices)
target_height, target_width = np_crop_size[0], np_crop_size[1]
if layout == "NHWC":
channel = image_shape[3]
out_shape = (batch, target_height, target_width, channel)
elif layout == "NCHW":
channel = image_shape[1]
out_shape = (batch, channel, target_height, target_width)
else:
raise NotImplementedError("Layout {} is not supported.".format(layout))
out = topi.image.crop_and_resize(
images,
boxes,
box_ind,
np_crop_size,
layout=layout,
method=method,
extrapolation_value=extrapolation_value,
)
baseline_np = tvm.topi.testing.crop_and_resize_python(
np_images, np_boxes, np_box_indices, np_crop_size, layout, method, extrapolation_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_images = tvm.nd.array(np_images, dev)
tvm_boxes = tvm.nd.array(np_boxes, dev)
tvm_indices = tvm.nd.array(np_box_indices, dev)
tvm_out = tvm.nd.array(np.zeros(out_shape, dtype="float32"), dev)
f = tvm.build(s, [images, boxes, box_ind, out], target, name="crop_and_resize")
f(tvm_images, tvm_boxes, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), baseline_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
boxes_1 = np.array([[0.2, 0.3, 0.7, 0.9]], dtype="float32")
boxes_2 = np.array([[0.2, 0.3, 0.7, 0.9], [0, 0.1, 0.8, 1]], dtype="float32")
indices_1 = np.array([0], dtype="int32")
indices_2 = np.array([1, 0], dtype="int32")
size_1 = (7, 11)
size_2 = (90, 60)
verify_crop_and_resize((1, 255, 255, 3), boxes_1, indices_1, size_1, layout="NHWC")
verify_crop_and_resize(
(10, 224, 224, 5), boxes_2, indices_2, size_2, extrapolation_value=0.3, layout="NHWC"
)
verify_crop_and_resize((1, 100, 100, 3), boxes_1, indices_1, size_1, method="nearest_neighbor")
verify_crop_and_resize((1, 3, 224, 224), boxes_1, indices_1, size_1, layout="NCHW")
@tvm.testing.uses_gpu
def test_affine_grid():
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = te.placeholder(data_shape, dtype=dtype)
out = topi.image.affine_grid(data, target_shape)
@memoize("topi.tests.test_affine_grid.verify_affine_grid")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
return data_np, out_np
data_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample():
def verify_grid_sample(
data_shape,
grid_shape,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
dtype = "float32"
data = te.placeholder(data_shape, dtype=dtype)
grid = te.placeholder(grid_shape, dtype=dtype)
out = topi.image.grid_sample(data, grid, method, layout, padding_mode, align_corners)
@memoize("topi.tests.test_grid_sample.verify_grid_sample")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
# allow grid values to be out-of-bound
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
out_np = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
return data_np, grid_np, out_np
data_np, grid_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_grid = tvm.nd.array(grid_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, grid, out], target)
f(tvm_data, tvm_grid, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
layout_2D = "NCHW"
# choosing smaller sizes to be testable on weaker GPUs
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
layout_3D = "NCDHW"
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(
data_2D_shape, grid_2D_shape, _method, layout_2D, _padding, _align
)
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_grid_sample(
data_3D_shape, grid_3D_shape, _method, layout_3D, _padding, _align
)
if __name__ == "__main__":
test_resize2d()
test_resize3d()
test_crop_and_resize()
test_affine_grid()
test_grid_sample()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_layer_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for layer_norm."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_layer_norm_schedule = {
"generic": topi.generic.schedule_injective,
}
# only test on llvm because schedule is missing
@tvm.testing.parametrize_targets("llvm")
@pytest.mark.parametrize("shape,axis", [([4, 16], (1,)), ([4, 16, 16], (1, 2))])
def test_layer_norm(target, dev, shape, axis, episilon=1e-5, dtype="float32", rtol=1e-5, atol=1e-5):
data = te.placeholder(shape, dtype=dtype, name="data")
scale_shape = [shape[dim] for dim in axis]
gamma = te.placeholder(scale_shape, dtype=dtype, name="gamma")
beta = te.placeholder(scale_shape, dtype=dtype, name="beta")
B = topi.nn.layer_norm(data, gamma, beta, axis, episilon)
data_np = np.random.uniform(size=shape).astype(dtype)
gamma_np = np.random.uniform(size=scale_shape).astype(dtype)
beta_np = np.random.uniform(size=scale_shape).astype(dtype)
b_np = tvm.topi.testing.layer_norm_python(data_np, gamma_np, beta_np, axis, episilon)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _layer_norm_schedule)
s = s_func([B])
data_tvm = tvm.nd.array(data_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [data, gamma, beta, B], target)
f(data_tvm, gamma_tvm, beta_tvm, b_tvm)
tvm.testing.assert_allclose(b_tvm.asnumpy(), b_np, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_loss.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for loss operators."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
import tvm.testing
prediction_shape, reduction, ignore_index, dtype = tvm.testing.parameters(
((10, 5), "mean", -100, "float32"),
((10, 5, 2, 2), "mean", -100, "float32"),
((10, 5), "sum", -100, "float32"),
((10, 5), "none", -100, "float32"),
((10, 5), "mean", 3, "float32"),
((10, 5), "mean", -100, "float64"),
)
def test_nll_loss(target, dev, prediction_shape, reduction, ignore_index, dtype):
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = te.placeholder(shape=prediction_shape, name="predictions", dtype=dtype)
targets = te.placeholder(shape=target_shape, name="targets", dtype="int32")
weights = te.placeholder(shape=(C,), name="weights", dtype=dtype)
nll_loss_result = topi.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.get_reduce_schedule(target)
s = fschedule([nll_loss_result])
fn = tvm.build(s, [predictions, targets, weights, nll_loss_result], target, name="nll_loss")
predictions_npy = np.random.uniform(size=prediction_shape).astype(dtype)
targets_npy = np.random.randint(0, C, target_shape).astype("int32")
weights_npy = np.random.uniform(size=(C,)).astype(dtype)
out_npy = tvm.topi.testing.nll_loss(
predictions_npy, targets_npy, weights_npy, reduction, ignore_index
)
predictions_nd = tvm.nd.array(predictions_npy, dev)
targets_nd = tvm.nd.array(targets_npy, dev)
weights_nd = tvm.nd.array(weights_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(nll_loss_result.dtype), dev)
fn(predictions_nd, targets_nd, weights_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_lrn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for local response normalization"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_lrn_schedule = {
"generic": topi.generic.schedule_lrn,
"gpu": topi.cuda.schedule_lrn,
"opencl": topi.cuda.schedule_lrn,
"metal": topi.cuda.schedule_lrn,
"rocm": topi.cuda.schedule_lrn,
"vulkan": topi.cuda.schedule_lrn,
"nvptx": topi.cuda.schedule_lrn,
}
def verify_lrn(shape, size, axis, bias, alpha, beta, dtype="float32", rtol=1e-5, atol=1e-5):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.nn.lrn(A, size, axis, alpha, beta, bias)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _lrn_schedule)
s = s_func([B])
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=rtol, atol=atol)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_device(device)
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((1, 3, 5, 5), 3, 1, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 20, 20), 3, 1, 2.0, 1.0, 0.75)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5, dtype="float16", rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test_lrn()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Test code for LSTM."""
import numpy as np
from rsa import verify
import tvm
from tvm import te, topi
import tvm.testing
import tvm.topi.testing
def verify_lstm(
target,
dev,
seq_len,
batch_size,
in_dim,
hidden_dim,
proj_dim=0,
bias=True,
zero_init=True,
peephole=False,
reverse=False,
weight_layout="IFGO",
):
out_dim = proj_dim if proj_dim > 0 else hidden_dim
def rand(*shape):
sqrt_k = np.sqrt(1 / hidden_dim)
return np.random.uniform(-sqrt_k, sqrt_k, size=shape).astype("float32")
def get_ref_data():
Xs = np.random.normal(size=(seq_len, batch_size, in_dim)).astype("float32")
Wi = rand(4 * hidden_dim, in_dim)
Wh = rand(4 * hidden_dim, out_dim)
Bi = None
Bh = None
h0 = None
c0 = None
proj = None
p_i = None
p_f = None
p_o = None
if bias:
Bi = rand(4 * hidden_dim)
Bh = rand(4 * hidden_dim)
if not zero_init:
h0 = np.random.normal(size=(batch_size, out_dim)).astype("float32")
c0 = np.random.normal(size=(batch_size, hidden_dim)).astype("float32")
if proj_dim > 0:
proj = rand(proj_dim, hidden_dim)
if peephole:
p_i, p_f, p_o = [rand(batch_size, hidden_dim) for _ in range(3)]
hs, cs = tvm.topi.testing.lstm_python(
Xs,
Wi,
Wh,
Bi=Bi,
Bh=Bh,
h_init=h0,
c_init=c0,
proj=proj,
p_i=p_i,
p_f=p_f,
p_o=p_o,
reverse=reverse,
weight_layout=weight_layout,
)
return [Xs, Wi, Wh, Bi, Bh, h0, c0, proj, p_i, p_f, p_o], [hs, cs]
args_np, (hs_np, cs_np) = get_ref_data()
args = [te.placeholder(a.shape, "float32") if a is not None else a for a in args_np]
real_args = [a for a in args if a is not None]
hs, cs = topi.nn.lstm(*args, reverse=reverse, weight_layout=weight_layout)
with tvm.target.Target(target):
sch = topi.generic.schedule_lstm([hs, cs])
func = tvm.build(sch, real_args + [hs, cs], target=target)
args_nd = [tvm.nd.array(a, dev) for a in args_np if a is not None]
hs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, out_dim), "float32"), dev)
cs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, hidden_dim), "float32"), dev)
func(*args_nd, hs_nd, cs_nd)
tvm.testing.assert_allclose(hs_nd.numpy(), hs_np, rtol=1e-4)
tvm.testing.assert_allclose(cs_nd.numpy(), cs_np, rtol=1e-4)
def test_lstm():
verify_lstm(
"llvm",
tvm.cpu(0),
1,
1,
1,
1,
0,
True,
True,
False,
False,
"IFGO",
)
verify_lstm(
"llvm",
tvm.cpu(0),
8,
4,
8,
16,
0,
True,
False,
False,
False,
"IFGO",
)
def test_lstm_proj():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 8, True, True, False, False, "IFGO")
def test_lstm_peephole():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, True, False, "IFGO")
def test_lstm_reverse():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, True, "IFGO")
def test_lstm_weight_layout_iofg():
# IOFG is used by ONNX, while IFGO is used by PyTorch
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, False, "IOFG")
def test_lstm_assorted():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 16, True, False, True, True, "OIGF")
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import scipy
from scipy import special
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi import utils
def test_util():
x = tvm.tir.const(100, "int32")
assert utils.get_const_int(x) == 100
assert utils.get_const_tuple((x, x)) == (100, 100)
ewise_operations = {
"floor": {"topi": topi.floor, "ref": np.floor, "input_range": (-100, 100)},
"ceil": {"topi": topi.ceil, "ref": np.ceil, "input_range": (-100, 100)},
"sign": {
"topi": topi.sign,
"ref": np.sign,
"input_range": (-100, 100),
"skip_name_check": True,
},
"trunc": {"topi": topi.trunc, "ref": np.trunc, "input_range": (-100, 100)},
"fabs": {"topi": topi.abs, "ref": np.fabs, "input_range": (-100, 100)},
"round": {"topi": topi.round, "ref": np.round, "input_range": (-100, 100), "check_round": True},
"exp": {"topi": topi.exp, "ref": np.exp, "input_range": (-1, 1)},
"tanh": {
"topi": topi.tanh,
"ref": np.tanh,
"input_range": (-10, 10),
"shape": (128, 128),
"dtype": ["float32", "float64"],
},
"sigmoid": {
"topi": topi.sigmoid,
"ref": lambda x: 1 / (1 + np.exp(-x)),
"input_range": (-1, 1),
},
"log": {"topi": topi.log, "ref": np.log, "input_range": (0, 100)},
"sqrt": {"topi": topi.sqrt, "ref": np.sqrt, "input_range": (0, 100)},
"rsqrt": {
"topi": topi.rsqrt,
"ref": lambda x: np.ones_like(x) / np.sqrt(x),
"input_range": (0, 100),
"skip_name_check": True,
},
"cos": {"topi": topi.cos, "ref": np.cos, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"tan": {
"topi": topi.tan,
"ref": np.tan,
"input_range": (-2.0 * np.pi, 2.0 * np.pi),
"dtypes": ["float32", "float64"],
},
"sin": {"topi": topi.sin, "ref": np.sin, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"erf": {"topi": topi.erf, "ref": scipy.special.erf, "input_range": (-0.1, 0.1)},
"isnan": {
"topi": topi.isnan,
"ref": np.isnan,
"input_range": (-1, 1),
"replace_with_nan": True,
},
"isfinite": {
"topi": topi.isfinite,
"ref": np.isfinite,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"isinf": {
"topi": topi.isinf,
"ref": np.isinf,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"fast_exp": {
"topi": topi.fast_exp,
"ref": np.exp,
"skip_name_check": True,
"input_range": (-88, 88),
"step": 0.01,
},
"fast_erf": {
"topi": topi.fast_erf,
"ref": scipy.special.erf,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
"dtypes": ["float32", "float16"],
"cast_output": True,
"tolerance": [1e-5, 1e-1],
},
"fast_tanh": {
"topi": topi.fast_tanh,
"ref": np.tanh,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
},
}
topi_name, dtype, tolerance = tvm.testing.parameters(
*[
(name, dtype, config.get("tolerance", [1e-5] * len(dtype))[i])
for name, config in ewise_operations.items()
for i, dtype in enumerate(config.get("dtypes", ["float32"]))
]
)
@tvm.testing.fixture(cache_return_value=True)
def ewise_ref_data(topi_name, dtype):
config = ewise_operations[topi_name]
input_range = config["input_range"]
shape = config.get("shape", (20, 3))
a_np = np.random.uniform(*input_range, size=shape).astype(dtype)
if dtype.startswith("float"):
if config.get("replace_with_nan", False):
a_np.ravel()[np.random.choice(a_np.size, int(a_np.size * 0.5), replace=False)] = np.nan
if config.get("replace_with_inf", False):
a_np.ravel()[
np.random.choice(a_np.size, int(a_np.size * 0.5), replace=False)
] = np.infty
# avoid round check too close to boundary
if topi_name == "round":
a_np += ((np.abs(np.fmod(a_np, 1)) - 0.5) < 1e-6) * 1e-4
b_np = config["ref"](a_np)
if config.get("cast_output", False):
b_np = b_np.astype(dtype)
return a_np, b_np
def test_ewise(target, dev, topi_name, dtype, tolerance, ewise_ref_data):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and topi_name in ["tan", "erf", "isnan", "isfinite", "isinf"]:
pytest.xfail(f"Vulkan runtime doesn't support {topi_name} yet")
topi_op = ewise_operations[topi_name]["topi"]
skip_name_check = ewise_operations[topi_name].get("skip_name_check", False)
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=dtype, name="A")
B = topi_op(A)
assert tuple(B.shape) == tuple(A.shape)
if not skip_name_check:
assert B.op.body[0].op.name == "tir." + topi_name
a_np, b_np = ewise_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=topi_name)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros_like(b_np), dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=tolerance, atol=tolerance)
from_dtype, to_dtype = tvm.testing.parameters(
("int32", "float32"),
("int32", "float64"),
("int32", "bool"),
("float32", "int32"),
("float32", "float64"),
("float32", "bool"),
("bool", "float32"),
("bool", "int32"),
)
@tvm.testing.fixture(cache_return_value=True)
def cast_ref_data(from_dtype, to_dtype):
shape = (5, 4)
input_range = (-100, 100)
if from_dtype == "bool":
a_np = np.random.choice([True, False], size=shape)
else:
a_np = np.random.uniform(*input_range, size=shape).astype(from_dtype)
if to_dtype == "bool":
a_np = a_np - a_np[2, 3]
b_np = a_np.astype(to_dtype)
return a_np, b_np
def test_cast(target, dev, cast_ref_data, from_dtype, to_dtype):
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=from_dtype, name="A")
B = topi.cast(A, to_dtype)
a_np, b_np = cast_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=to_dtype, device=dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
def with_tvm(lam, *args):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i, arg in enumerate(args):
pls.append(te.placeholder(arg.shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_nn_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(
lambda A, B: topi.nn.matmul(A, B, transpose_a=transp_a, transpose_b=transp_b),
a,
b,
)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_nn_matmul():
verify_nn_matmul((1, 1), (1, 1), False, False)
verify_nn_matmul((1, 1), (1, 1), True, True)
verify_nn_matmul((2, 2), (2, 2), False, False)
verify_nn_matmul((2, 2), (2, 2), True, True)
verify_nn_matmul((2, 3), (3, 5), False, False)
verify_nn_matmul((5, 3), (3, 2), False, False)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((3, 5), (2, 3), True, True)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((5, 3), (2, 3), False, True)
def verify_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(lambda A, B: topi.matmul(A, B, transp_a, transp_b), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_matmul():
verify_matmul((1, 1), (1, 1), False, False)
verify_matmul((1, 1), (1, 1), True, True)
verify_matmul((2, 2), (2, 2), False, False)
verify_matmul((2, 2), (2, 2), True, True)
verify_matmul((2, 3), (3, 5), False, False)
verify_matmul((5, 3), (3, 2), False, False)
verify_matmul((3, 5), (3, 2), True, False)
verify_matmul((3, 5), (2, 3), True, True)
def verify_tensordot(sa, sb, axes):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.tensordot(a, b, axes)
c2 = with_tvm(lambda A, B: topi.tensordot(A, B, axes), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_tensordot():
verify_tensordot((3), (3), 0)
verify_tensordot((2, 3), (3, 5), 1)
verify_tensordot((2, 2, 3), (2, 3, 5), 2)
verify_tensordot((2, 2, 3, 4), (2, 3, 4, 5), 3)
verify_tensordot((3, 2, 2), (2, 3, 5), (1, 0))
verify_tensordot((3, 2, 2), (2, 3, 5), ((1, 0), (0, 1)))
verify_tensordot((4, 3, 2, 2), (2, 4, 3, 5), ((1, 2, 0), (2, 0, 1)))
if __name__ == "__main__":
test_nn_matmul()
test_matmul()
test_tensordot()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Test code for pooling"""
import math
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
_pool_schedule = {
"generic": topi.generic.schedule_pool,
"cpu": topi.x86.schedule_pool,
"gpu": topi.cuda.schedule_pool,
"hls": topi.hls.schedule_pool,
}
_adaptive_pool_schedule = {
"generic": topi.generic.schedule_adaptive_pool,
"cpu": topi.x86.schedule_adaptive_pool,
"gpu": topi.cuda.schedule_adaptive_pool,
"hls": topi.hls.schedule_adaptive_pool,
}
_pool_grad_schedule = {
"generic": topi.generic.schedule_pool_grad,
"gpu": topi.cuda.schedule_pool_grad,
}
def verify_pool_grad(
n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, add_relu=False
):
"""verify function of pool_grad"""
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
A = te.placeholder((n, ic, ih, iw), name="A")
B = topi.nn.pool2d(
A,
kernel=[kh, kw],
stride=[sh, sw],
dilation=[1, 1],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
OutGrad = te.placeholder(bshape, name="OutGrad")
PoolGrad = topi.nn.pool_grad(
OutGrad,
A,
kernel=[kh, kw],
stride=[sh, sw],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
if add_relu:
PoolGrad = topi.nn.relu(PoolGrad)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype)
pool_grad_np = tvm.topi.testing.pool_grad_nchw(
a_np,
out_grad_np,
pool_size=(kh, kw),
strides=(sh, sw),
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
if add_relu:
pool_grad_np = np.maximum(pool_grad_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_grad_schedule)
s = s_func(PoolGrad)
a = tvm.nd.array(a_np, dev)
out_grad = tvm.nd.array(out_grad_np, dev)
pool_grad = tvm.nd.array(np.zeros(get_const_tuple(PoolGrad.shape), dtype=dtype), dev)
f = tvm.build(s, [A, OutGrad, PoolGrad], target)
f(a, out_grad, pool_grad)
tvm.testing.assert_allclose(pool_grad.numpy(), pool_grad_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_pool_grad():
"""test cases of pool_grad"""
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "avg", False, True)
verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", True)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], "max", True)
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "max", False)
verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False, add_relu=True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False, add_relu=True)
def verify_global_pool(dshape, pool_type, layout="NCHW"):
"""verify function of global_pool"""
assert layout in ["NCHW", "NHWC"]
A = te.placeholder(shape=dshape, name="A")
B = topi.nn.global_pool(A, pool_type=pool_type, layout=layout)
B = topi.nn.relu(B)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
axis = (layout.find("H"), layout.find("W"))
if pool_type == "avg":
b_np = np.mean(a_np, axis=axis, keepdims=True)
elif pool_type == "max":
b_np = np.max(a_np, axis=axis, keepdims=True)
b_np = np.maximum(b_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(B, layout)
else:
s = s_func(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_global_pool():
"""test cases of global_pool"""
verify_global_pool((1, 1024, 7, 7), "avg")
verify_global_pool((4, 1024, 7, 7), "avg")
verify_global_pool((1, 1024, 7, 7), "max")
verify_global_pool((4, 1024, 7, 7), "max")
verify_global_pool((1, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((4, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((1, 7, 7, 1024), "max", "NHWC")
verify_global_pool((4, 7, 7, 1024), "max", "NHWC")
def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
"""verify function of adaptive_pool"""
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
oshape = np_out.shape
data = te.placeholder(dshape, name="data", dtype=dtype)
if len(out_size) == 2:
out = topi.nn.adaptive_pool(data, out_size, pool_type, layout)
else:
assert len(out_size) == 3
out = topi.nn.adaptive_pool3d(data, out_size, pool_type, layout)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(out, layout)
else:
s = s_func(out)
a = tvm.nd.array(np_data, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), dev)
f = tvm.build(s, [data, out], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_out, rtol=4e-5, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_adaptive_pool():
"""test cases of adaptive_pool"""
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "max")
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "avg")
verify_adaptive_pool((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg", layout="NHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 64, 32, 32), (7, 8, 9), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 64, 32, 32), (8, 16, 16), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "max", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
def verify_poolnd(
n,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout,
count_include_pad=True,
):
"""verify function of pool1d"""
A = te.placeholder(input_shape, name="A")
if n == 1:
B = topi.nn.pool1d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 2:
B = topi.nn.pool2d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 3:
B = topi.nn.pool3d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
else:
raise ValueError(f"PoolND only supports n=1, 2, 3 got n={n}")
B = topi.nn.relu(B)
dtype = A.dtype
output_shape = [int(i) for i in B.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
padding_before = padding[:n]
padding_after = padding[n:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
padding_before,
padding_after,
pool_type,
count_include_pad,
ceil_mode,
layout=layout,
)
np.testing.assert_equal(tuple(output_shape), tuple(ref_np.shape))
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(input_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def verify_pool3d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCDHW",
):
verify_poolnd(
3,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool3d():
"""test cases of pool3d"""
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "avg", False, True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 1, 2, 2, 2, 1], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [3, 2, 1, 0, 5, 4], "max", True
)
# Test non-1 dilation
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
# Test channel last layouts
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 0, 5, 4, 3, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[3, 2, 1, 0, 5, 4],
"max",
True,
layout="NDHWC",
)
# Test non-1 dilation
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
def verify_pool2d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCHW",
):
verify_poolnd(
2,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool2d():
"""test cases of pool"""
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True)
# Test non-1 dilations
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True)
# Test channel last
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, layout="NHWC")
def verify_pool1d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCW",
):
verify_poolnd(
1,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool1d():
"""test cases of pool1d"""
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "avg", False, True)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 2], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [1, 2], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [3, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [0, 0], "avg", False, False)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", True)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [3, 0], "max", True)
# Test non-1 dilations
verify_pool1d([1, 16, 31], [3], [3], [2], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [3], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [2], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [3], [3, 0], "max", True)
# Test Channel last
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 2], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [1, 2], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [3, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [0, 0], "avg", False, False, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [2], [2], [1], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [3, 0], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [3], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [3], [3, 0], "max", True, layout="NWC")
if __name__ == "__main__":
test_pool1d()
test_pool2d()
test_pool3d()
test_pool_grad()
test_global_pool()
test_adaptive_pool()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_prng.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.relay
import tvm.testing
import tvm.topi
import numpy as np
import scipy.stats
def threefry_split(target, dev, gen):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_split(gen_placeholder)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
left = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
right = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
f(tvm.nd.array(gen), left, right)
return left.numpy(), right.numpy()
def threefry_generate(target, dev, gen, size):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_generate(gen_placeholder, size)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
rands = tvm.nd.array(np.zeros(size, dtype="uint64"))
f(tvm.nd.array(gen), out_gen, rands)
return out_gen.numpy(), rands.numpy()
def uniform(target, dev, gen, low, high, size, dtype):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
low_placeholder = tvm.te.placeholder(low.shape, name="low", dtype=dtype)
high_placeholder = tvm.te.placeholder(high.shape, name="high", dtype=dtype)
left_placeholder, right_placeholder = tvm.topi.random.uniform(
gen_placeholder, low_placeholder, high_placeholder, size, dtype
)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(
s,
[gen_placeholder, low_placeholder, high_placeholder, left_placeholder, right_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
rands = tvm.nd.array(np.zeros(size, dtype=dtype), device=dev)
f(
tvm.nd.array(gen, device=dev),
tvm.nd.array(low, device=dev),
tvm.nd.array(high, device=dev),
out_gen,
rands,
)
return out_gen.numpy(), rands.asnumpy()
def multinomial(target, dev, gen, probs, num_samples):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
probs_placeholder = tvm.te.placeholder(probs.shape, name="probs", dtype="float32")
new_gen_placeholder, indices_placeholder = tvm.topi.random.multinomial(
gen_placeholder, probs_placeholder, num_samples
)
s = tvm.topi.generic.schedule_extern([new_gen_placeholder, indices_placeholder])
f = tvm.build(
s,
[gen_placeholder, probs_placeholder, new_gen_placeholder, indices_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
indices = tvm.nd.array(np.zeros((*probs.shape[:-1], num_samples), dtype="int32"), device=dev)
f(tvm.nd.array(gen), tvm.nd.array(probs), out_gen, indices)
return out_gen.numpy(), indices.asnumpy()
@tvm.testing.parametrize_targets("llvm")
def test_threefry_split(target, dev):
# test that results of split do not equal eachother or the input
gen = tvm.relay.random.threefry_key(0).data.numpy()
a, b = threefry_split(target, dev, gen)
assert (a != b).any() and (
a != gen
).any(), "Splitting a gen should result in different output gens"
# unittest some split inputs
assert (a == np.array([0, 0, 0, 0, 0, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
assert (b == np.array([0, 0, 0, 0, 1 << 63, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
# test enough splits to go over path length
for i in range(129):
a, b = threefry_split(target, dev, b)
assert (a[0:4] == b[0:4]).all(), "State part of split should be the same"
assert (b[0:4] != np.zeros(4, dtype="uint64")).any()
# check that split then generate does not generate the same for both sides
a, a_rands = threefry_generate(target, dev, a, (100,))
b, b_rands = threefry_generate(target, dev, b, (100,))
assert (
a_rands != b_rands
).all(), "Numbers generated from different initial states should be different"
# check repeatability
_, rands1 = threefry_generate(target, dev, a, (100,))
_, rands2 = threefry_generate(target, dev, a, (100,))
assert (
rands1 == rands2
).all(), "Numbers generated from the same initial state should be the same"
a1, b1 = threefry_split(target, dev, a)
a2, b2 = threefry_split(target, dev, a)
assert (a1 == a2).all() and (
b1 == b2
).all(), "Split called on the same input should return the same result"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_generate(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
# check that we can generate some data
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
rands.shape[0] == 2048 and len(rands.shape) == 1
), "Output shape should match requested shape"
# check that gen out does not equal input
assert (a != gen).any(), "Output generator should be different from input generator"
# check that we can generate data whose total number of elements is not a multiple of 4.
a, rands = threefry_generate(target, dev, gen, (7,))
assert (
rands.shape[0] == 7 and len(rands.shape) == 1
), "Output shape should match requested shape"
# test enough generates to go over generate limit
gen = np.array(
[0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 1 << 63, 0], dtype="uint64"
) # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert gen[4] != a[4], "Overflow of counter should trigger path change"
assert a[7] == 2048, "Overflow of counter should still update counter"
# check generate with path at length limit
gen = np.array([0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 0, 0], dtype="uint64") # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
gen[0:4] != a[0:4]
).any(), "Overflowing counter with no space left in path should change state"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_wrapping(target, dev):
assert tvm.topi.random.threefry_test_wrapping(
target, dev
), f"{target} does not suppport wrapping unsigned integer arithmetic"
@tvm.testing.parametrize_targets("llvm")
def test_uniform(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
m = 1024
n = 1024
dtypes = ["float32", "float64"]
for dtype in dtypes:
low = np.array(5.0, dtype=dtype)
high = np.array(10.0, dtype=dtype)
new_gen, rands = uniform(target, dev, gen, low, high, (m, n), dtype)
assert (gen != new_gen).any()
assert abs(np.mean(rands) - 7.5) < 1e-1
assert np.min(rands) >= 5.0
assert np.max(rands) <= 10.0
@tvm.testing.parametrize_targets("llvm")
def test_multinomial(target, dev):
def _verify_multinomial(size, num_samples, test_statistics=False):
gen = tvm.relay.random.threefry_key(np.random.randint(0, 1e5)).data.numpy()
probs = np.random.randint(low=-50, high=1000, size=size).astype("float32")
new_gen, indices = multinomial(target, dev, gen, probs, num_samples)
assert (gen != new_gen).any()
assert np.min(indices) >= 0
assert np.max(indices) < probs.shape[-1]
# Note, only use test_statistics with sample size > 10,000.
if test_statistics:
# Clipped and normalized probabilities * number of samples
# represents expected frequency of each category.
# First upcast to float64 to remove numerical error.
probs = probs.astype("float64")
probs = np.reshape(probs, [-1, probs.shape[-1]])
probs = np.maximum(probs, 0)
probs = probs / np.expand_dims(np.sum(probs, axis=-1), axis=-1)
# Multiply by number of samples and add epsilon to get non-zero expected samples per index.
expected_frequency = probs * num_samples + np.finfo(float).eps
# Do a small adjustment to make sure each row of expected_frequencies sums to exactly num_samples.
expected_frequency = (
np.expand_dims((num_samples / np.sum(expected_frequency, axis=-1)), axis=-1)
* expected_frequency
)
# Reduce shape to a 2D matrix.
indices = np.reshape(indices, [-1, indices.shape[-1]])
# Split indendent rows of indices.
index_list = [np.squeeze(x, 0) for x in np.split(indices, indices.shape[0], axis=0)]
# Count frequency of selected indices in each row.
observed_freqs = [np.bincount(samples, minlength=size[-1]) for samples in index_list]
# Stack observed frequencies back into a matrix.
observed_freqs = np.stack(observed_freqs, axis=0)
# Test how closely observed samples match expectations.
_, p_value = scipy.stats.chisquare(observed_freqs, expected_frequency, axis=-1)
# If sampled correctly, p_value should be greater than 1e-6 almost all the time.
assert np.all(p_value > 1e-6)
# Test simple 1-D case.
_verify_multinomial([3], 2)
# Test 2-D case.
_verify_multinomial([2, 10], 1)
# Test 3-D case.
_verify_multinomial([2, 3, 10], 4)
# Test large sample size statistics.
_verify_multinomial([3, 10], 10000, test_statistics=True)
if __name__ == "__main__":
test_threefry_split(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_generate(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_wrapping(tvm.target.Target("llvm"), tvm.device("cpu"))
test_uniform(tvm.target.Target("llvm"), tvm.device("cpu"))
test_multinomial(tvm.target.Target("llvm"), tvm.device("cpu"))
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for QNN operators."""
import numpy as np
import tvm
from tvm import topi, relay, te
from tvm.contrib import graph_executor
import tvm.topi.testing
def verify_simulated_quantize(data_shape, out_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(size=data_shape).astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
q_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
q = tvm.nd.array(q_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype="float32")
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_q_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)
func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize")
func(a, d, s, z, q)
# Check correctness against the true qnn output.
mismatch = q.numpy() != real_q_out.numpy().astype("float32")
# Allow some rounding errors due to GPU fp32 arithmetic.
assert np.sum(mismatch) <= 3
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_quantize():
verify_simulated_quantize([1], "int8", [1], -1)
verify_simulated_quantize([2, 5], "int8", [5], 1)
verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_quantize([2, 5], "int32", [5], 1)
def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)
a_np_f = a_np.astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
dq_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np_f, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
dq = tvm.nd.array(dq_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype=in_dtype)
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_dq_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)
func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize")
func(a, d, s, z, dq)
# Check correctness against the true qnn output.
tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_dequantize():
verify_simulated_dequantize([1], "int8", [1], -1)
verify_simulated_dequantize([2, 5], "int8", [5], 1)
verify_simulated_dequantize([2, 5], "int8", [2], 0)
verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_dequantize([2, 5], "int32", [5], 1)
if __name__ == "__main__":
test_simulated_quantize()
test_simulated_dequantize()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for reduce."""
import os
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters(
((32,), 0, False, "argmax", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float32"),
((2, 3), None, True, "all", "bool"),
((128, 24 * 128 * 24), (1,), False, "max", "float32"),
((32, 128, 24), None, True, "sum", "float32"),
((32, 128, 24), None, True, "all", "bool"),
((128, 24, 128, 24), (0, 2), False, "min", "float32"),
((32, 128), 1, True, "argmax", "float32"),
((32, 24, 32, 24), 2, False, "argmin", "float32"),
((31, 21, 15), None, True, "argmax", "float32"),
((31, 21, 15), None, False, "sum", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float64"),
((2, 3), None, True, "any", "bool"),
((32, 128, 24), None, True, "any", "bool"),
((1, 4, 7), 1, True, "any", "bool"),
((128, 24, 128, 24), 2, False, "any", "bool"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(in_shape, axis, keepdims, reduce_type, dtype):
# Test
if dtype == "bool":
in_npy_map = in_npy = np.random.choice([True, False], size=in_shape)
else:
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)
if reduce_type == "sum":
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
elif reduce_type == "all" and dtype == "bool":
out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
elif reduce_type == "any" and dtype == "bool":
out_npy = in_npy_map.any(axis=axis, keepdims=keepdims)
elif reduce_type == "max":
out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
elif reduce_type == "min":
out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
elif reduce_type == "argmin":
out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
return in_npy, in_npy_map, out_npy
def _my_npy_argmax(arr, axis, keepdims):
if not keepdims:
return arr.argmax(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmax(axis=axis).reshape(out_shape)
def _my_npy_argmin(arr, axis, keepdims):
if not keepdims:
return arr.argmin(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmin(axis=axis).reshape(out_shape)
def test_reduce_map(target, dev, ref_data, in_shape, axis, keepdims, reduce_type, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_type in ["sum", "any", "all"]:
pytest.xfail(f"Vulkan backend has known errors on {reduce_type}")
in_npy, in_npy_map, out_npy = ref_data
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
A1 = topi.sqrt(topi.exp(A))
out_dtype = dtype
if reduce_type == "sum":
B = topi.sum(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "all":
B = topi.all(A, axis=axis, keepdims=keepdims)
elif reduce_type == "any":
B = topi.any(A, axis=axis, keepdims=keepdims)
elif reduce_type == "max":
B = topi.max(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "min":
B = topi.min(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
B = topi.argmax(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
elif reduce_type == "argmin":
B = topi.argmin(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
else:
raise NotImplementedError
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=reduce_type)
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype)
foo(data_tvm, out_tvm)
if reduce_type == "argmax" or reduce_type == "argmin":
out_tvm_indices = out_tvm.numpy()
if keepdims:
out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
if axis is None:
out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
else:
other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :]))
sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
out_tvm_val = in_npy_map[sel_indices]
if reduce_type == "argmax":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3)
elif reduce_type == "argmin":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3)
else:
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
def test_complex_reduce(target, dev):
in_shape = (2, 3)
dtype = "float32"
axis = 0
keepdims = False
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
B = topi.sum(A, axis=axis, keepdims=keepdims)
C = topi.add(B, B)
D = topi.multiply(B, B)
E = topi.add(C, D)
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(E)
foo = tvm.build(s, [A, E], target, name="sum")
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
sum_npy = in_npy.sum(axis=axis, keepdims=keepdims)
out_npy = sum_npy * 2 + sum_npy * sum_npy
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=dtype)
foo(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for relu activation"""
import sys
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.nvcc import have_fp16
import pytest
import tvm.testing
m, n, dtype = tvm.testing.parameters(
(10, 128, "float32"),
(128, 64, "float16"),
# Commented due to weird killed
# (1024 * 100, 512, "float32"),
)
def test_relu(target, dev, m, n, dtype):
A = te.placeholder((m, n), name="A", dtype=dtype)
B = topi.nn.relu(A)
a_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
if dtype == "float16" and target == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
pytest.skip("Skip because %s does not have fp16 support" % target)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], target, name="relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
size, alpha = tvm.testing.parameters((100, 0.1))
def test_leaky_relu(size, alpha):
A = te.placeholder((size,), name="A")
B = topi.nn.leaky_relu(A, alpha)
s = te.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
x, w, axis, weight_reshape = tvm.testing.parameters(
((1, 3, 2, 2), (3,), 1, (3, 1, 1)),
((1, 3, 2, 2), (2,), 2, (2, 1)),
((1, 3), (3,), 1, (3,)),
)
def test_prelu(x, w, axis, weight_reshape):
X = te.placeholder((x), name="X")
W = te.placeholder((w), name="W")
x_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(X.shape)).astype(X.dtype)
w_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(W.shape)).astype(W.dtype)
def _prelu_numpy(x, W):
return (x < 0) * (x * W.reshape(weight_reshape)) + (x >= 0) * x
B = topi.nn.prelu(X, W, axis)
s = te.create_schedule([B.op])
dev = tvm.cpu(0)
x_tvm = tvm.nd.array(x_np, dev)
w_tvm = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(X.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [X, W, B], "llvm", name="prelu")
foo(x_tvm, w_tvm, b)
out_np = _prelu_numpy(x_np, w_np)
tvm.testing.assert_allclose(b.numpy(), out_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_reorg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do reorg."""
import numpy as np
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm
from tvm import te
import tvm.topi.testing
import tvm.testing
_reorg_schedule = {
"generic": topi.generic.schedule_reorg,
"gpu": topi.cuda.schedule_reorg,
}
def verify_reorg(batch, in_size, in_channel, stride):
"""Verify reorg operator by comparing outputs from tvm and numpy implementation"""
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
B = topi.vision.reorg(A, stride)
a_shape = get_const_tuple(A.shape)
dtype = A.dtype
def get_ref_data_reorg():
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = tvm.topi.testing.reorg_python(a_np, stride)
return a_np, b_np
a_np, b_np = get_ref_data_reorg()
def check_device(device):
"""Cheching devices is enabled or not"""
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _reorg_schedule)
s = s_func([B])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, B], device)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_reorg():
verify_reorg(1, 20, 8, 2)
if __name__ == "__main__":
test_reorg()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import topi
topi_funcs = {
"cumsum": {"generic": topi.cumsum, "cuda": topi.cuda.cumsum},
"cumprod": {"generic": topi.cumprod, "cuda": topi.cuda.cumprod},
}
identity_value = {"cumsum": 0, "cumprod": 1}
def get_implementations(name, axis, dtype, exclusive):
topi_func_generic = topi_funcs[name]["generic"]
topi_func_cuda = topi_funcs[name]["cuda"]
return {
"generic": (
lambda x: topi_func_generic(x, axis, dtype, exclusive=exclusive),
topi.generic.schedule_extern,
),
"cuda": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"vulkan": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"metal": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
}
def _run_tests(
dev,
target,
op_name: str = "cumsum",
gt_func: Callable[..., np.array] = np.cumsum,
):
def check_scan(np_ref, data, axis=None, dtype=None, exclusive=False):
implementations = get_implementations(op_name, axis, dtype, exclusive)
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm([data], np_ref, target, dev, fcompute, fschedule)
data = np.array([2, 3, 0])
check_scan(gt_func(data), data)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
check_scan(gt_func(data, dtype=np.int32), data)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.rand(10) > 0.5
check_scan(gt_func(data, dtype=np.int32), data, dtype="int32")
for in_dtype in ["float32", "float64"]:
if target == "metal" and in_dtype == "float64":
# float64 is not supported in metal
continue
data = np.random.randn(10, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
data = np.random.randn(10, 5, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
check_scan(gt_func(data, axis=-1), data, axis=-1)
for in_dtype in ["int32", "int64"]:
data = np.random.randint(-100, 100, size=(100, 100)).astype(in_dtype)
check_scan(gt_func(data, dtype=in_dtype), data)
check_scan(gt_func(data), data, dtype="int64")
check_scan(gt_func(data, axis=0, dtype=in_dtype), data, axis=0)
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
data = np.random.randint(1 << 30, (1 << 31) - 1, size=(100)).astype(in_dtype)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.randint(-100, 100, size=(100, 100)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value[op_name]
check_scan(expected_result, data, dtype="int64", exclusive=True)
expected_result = np.roll(gt_func(data, axis=0, dtype=in_dtype), 1, axis=0)
expected_result[0, :] = identity_value[op_name]
check_scan(expected_result, data, axis=0, exclusive=True)
expected_result = np.roll(gt_func(data, axis=1, dtype=in_dtype), 1, axis=1)
expected_result[:, 0] = identity_value[op_name]
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(dev, target):
_run_tests(dev, target, op_name="cumsum", gt_func=np.cumsum)
@tvm.testing.parametrize_targets
def test_cumprod(dev, target):
_run_tests(dev, target, op_name="cumprod", gt_func=np.cumprod)
if __name__ == "__main__":
test_cumsum(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumsum(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumsum(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumsum(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumsum(tvm.device("metal"), tvm.target.Target("metal"))
test_cumprod(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumprod(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumprod(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumprod(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumprod(tvm.device("metal"), tvm.target.Target("metal"))
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import topi
import tvm.topi.testing
@tvm.testing.parametrize_targets
def test_scatter_nd(dev, target):
def check_scatter_nd(data, indices, updates, out, mode="add"):
implementations = {
"generic": (
lambda x, y, z: topi.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
"gpu": (
lambda x, y, z: topi.cuda.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
"cpu": (
lambda x, y, z: topi.x86.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm(
[data, indices, updates], out, target, dev, fcompute, fschedule
)
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
check_scatter_nd(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]])
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
check_scatter_nd(data, indices, updates, out)
indices = np.array([[1, 0, 0]])
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
check_scatter_nd(data, indices, updates, out)
for mode in ["add", "update"]:
updates = np.ones((5, 3)).astype("float64")
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
"int64"
)
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy()
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
check_scatter_nd(data, indices, updates, out, mode)
if __name__ == "__main__":
test_scatter_nd(tvm.device("cpu"), tvm.target.Target("llvm"))
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm.topi.testing import searchsorted_ref
from tvm import te, topi
topi_funcs = {"generic": topi.searchsorted, "cuda": topi.cuda.searchsorted}
def get_implementations():
topi_func_generic = topi_funcs["generic"]
topi_func_cuda = topi_funcs["cuda"]
return {
"generic": (
lambda x, y, side, out_dtype: topi_func_generic(x, y, side, out_dtype),
topi.generic.schedule_extern,
),
"cuda": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
"vulkan": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
}
@tvm.testing.parametrize_targets
def test_searchsorted(dev, target):
def verify_with_input(sorted_sequence_np, values_np, right):
sorted_sequence = te.placeholder(sorted_sequence_np.shape, dtype="float32")
values = te.placeholder(values_np.shape, dtype="float32")
out_dtype = "int32"
implementations = get_implementations()
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
with tvm.target.Target(target):
indices = fcompute(sorted_sequence, values, right, out_dtype)
s = fschedule([indices])
func = tvm.build(s, [sorted_sequence, values, indices], target=target)
dev = tvm.device(target, 0)
a = tvm.nd.array(sorted_sequence_np, dev)
b = tvm.nd.array(values_np, dev)
c = tvm.nd.array(np.zeros(values_np.shape, dtype=indices.dtype), dev)
func(a, b, c)
ref = searchsorted_ref(sorted_sequence_np, values_np, right, out_dtype)
np.testing.assert_equal(c.numpy(), ref)
def verify(sequence_len, num_search, outer_axes, right, sorted_sequence_1d=False):
if sorted_sequence_1d:
sorted_sequence_shape = (sequence_len,)
else:
sorted_sequence_shape = outer_axes + (sequence_len,)
values_shape = outer_axes + (num_search,)
verify_with_input(
np.sort(np.random.randn(*sorted_sequence_shape).astype("float32"), axis=-1),
np.random.randn(*values_shape).astype("float32"),
right,
)
verify(1024, 1000, (10, 5, 3), False)
verify(999, 2000, (10, 5, 3), True)
verify(1000, 1000, (), False)
verify(2001, 100, (500,), True)
verify(2001, 100, (500,), False, sorted_sequence_1d=True)
# Check edge cases
for right in [True, False]:
sorted_sequence = np.array([1, 2, 3, 4, 5], dtype="float32")
verify_with_input(sorted_sequence, np.array([6], dtype="float32"), right)
verify_with_input(sorted_sequence, np.array([0], dtype="float32"), right)
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for softmax"""
import logging
import os
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
_softmax_schedule = {
"generic": topi.generic.schedule_softmax,
"cpu": topi.x86.schedule_softmax,
"gpu": topi.cuda.schedule_softmax,
"hls": topi.hls.schedule_softmax,
}
dtype = tvm.testing.parameter("float32", "float64")
configs = {
"softmax": {
"topi": topi.nn.softmax,
"ref": tvm.topi.testing.softmax_python,
"dimensions": [1, 2, 4],
"axis": [0, 1, 2, 3],
},
"log_softmax": {
"topi": topi.nn.log_softmax,
"ref": tvm.topi.testing.log_softmax_python,
"dimensions": [2, 3],
"axis": [1],
},
}
shapes = [(32, 10), (3, 4), (1, 16, 256, 256), (32,)]
softmax_operation, shape, axis = tvm.testing.parameters(
*[
(name, shape, axis)
for name, config in configs.items()
for shape in shapes
if len(shape) in config["dimensions"]
for axis in range(len(shape))
if axis in config["axis"]
]
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(shape, dtype, softmax_operation, axis):
ref_func = configs[softmax_operation]["ref"]
a_np = np.random.uniform(size=shape).astype(dtype)
perm = list(range(a_np.ndim))
perm[-1], perm[axis] = perm[axis], perm[-1]
trans_shape = [a_np.shape[i] for i in perm]
a_np_2d = a_np.transpose(perm).reshape(-1, trans_shape[-1])
b_np_2d = ref_func(a_np_2d)
b_np = b_np_2d.reshape(*trans_shape).transpose(perm)
return a_np, b_np
def test_softmax(target, dev, shape, dtype, ref_data, softmax_operation, axis):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and dtype == "float64":
# https://www.khronos.org/registry/SPIR-V/specs/1.0/GLSL.std.450.html
pytest.xfail("Vulkan GLSL.std.450 does not support 64-bit floats")
A = te.placeholder(shape, dtype=dtype, name="A")
topi_op = configs[softmax_operation]["topi"]
B = topi_op(A, axis=axis)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.dispatch(target, _softmax_schedule)
s = fschedule(B)
a_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for vision package"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
_sort_implement = {
"generic": (topi.sort, topi.generic.schedule_sort),
"gpu": (topi.cuda.sort, topi.cuda.schedule_sort),
}
_argsort_implement = {
"generic": (topi.argsort, topi.generic.schedule_argsort),
"gpu": (topi.cuda.argsort, topi.cuda.schedule_argsort),
}
_topk_implement = {
"generic": (topi.topk, topi.generic.schedule_topk),
"gpu": (topi.cuda.topk, topi.cuda.schedule_topk),
}
axis = tvm.testing.parameter(0, -1, 1)
is_ascend = tvm.testing.parameter(True, False, ids=["is_ascend", "not_ascend"])
dtype = tvm.testing.parameter("int64", "float32")
topk = tvm.testing.parameter(0, 1, 5)
topk_ret_type = tvm.testing.parameter("values", "indices", "both")
def test_sort(target, dev, axis, is_ascend):
np.random.seed(0)
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_sort = np.sort(np_data, axis=axis)
else:
np_sort = -np.sort(-np_data, axis=axis)
if axis == 0:
np_sort = np_sort[: dshape[axis], :]
else:
np_sort = np_sort[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_sort, rtol=1e0)
def test_argsort(target, dev, axis, is_ascend):
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
if axis == 0:
np_indices = np_indices[: dshape[axis], :]
else:
np_indices = np_indices[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _argsort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_indices.astype(data_dtype), rtol=1e0)
def test_topk(target, dev, topk, axis, topk_ret_type, is_ascend, dtype):
np.random.seed(0)
shape = (20, 100)
data_dtype = "float32"
data = te.placeholder(shape, name="data", dtype=data_dtype)
np_data = np.random.uniform(size=shape).astype(data_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = topk if topk >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _topk_implement)
outs = fcompute(data, topk, axis, topk_ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = fschedule(outs)
tvm_data = tvm.nd.array(np_data, dev)
tvm_res = []
for t in outs:
tvm_res.append(tvm.nd.empty(t.shape, dtype=t.dtype, device=dev))
f = tvm.build(s, [data] + outs, target)
f(tvm_data, *tvm_res)
if topk_ret_type == "both":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_indices)
elif topk_ret_type == "values":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
else:
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_indices)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_space_to_batch_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for space to batch"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_space_to_batch_nd(input_shape, block_shape, pad_before, pad_after, pad_value=0):
out_shape = []
out_shape.append(int((input_shape[0] * np.prod(block_shape))))
for i in range(1, len(block_shape) + 1):
pad = pad_before[i - 1] + pad_after[i - 1]
out_shape.append(int((input_shape[i] + pad) // block_shape[i - 1]))
for i in range(len(block_shape) + 1, len(input_shape)):
out_shape.append(input_shape[i])
A = te.placeholder(input_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
B = topi.nn.space_to_batch_nd(A, block_shape, pad_before, pad_after, pad_value)
b_np = tvm.topi.testing.space_to_batch_nd_python(
a_np, block_shape, pad_before, pad_after, pad_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_space_to_batch():
# Without paddings
verify_space_to_batch_nd([3, 3, 2, 1], [3], [0], [0])
# With paddings
verify_space_to_batch_nd([3, 3, 2, 1], [3], [1], [2])
# Multiple spatial dims
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2], [1, 0, 3], [2, 0, 0])
# No remaining dims
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2, 2], [1, 4, 0, 0], [2, 0, 1, 0])
if __name__ == "__main__":
test_space_to_batch()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_space_to_depth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for space to depth"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, layout="NCHW"):
out_channel = int(in_channel * (block_size * block_size))
out_height = int(in_height / block_size)
out_width = int(in_width / block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_space_to_depth():
for layout in ["NCHW", "NHWC"]:
# Simplest possible case
verify_space_to_depth(2, 1, 1, 2, 2, layout=layout)
# Average input size
verify_space_to_depth(2, 1, 32, 32, 32, layout=layout)
# Large block size
verify_space_to_depth(8, 1, 32, 64, 64, layout=layout)
# Large batch size
verify_space_to_depth(4, 8, 32, 32, 32, layout=layout)
# Large input size
verify_space_to_depth(4, 8, 32, 128, 128, layout=layout)
if __name__ == "__main__":
test_space_to_depth()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for sparse operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
from tvm import relay
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
import tvm.contrib.sparse as tvmsp
from collections import namedtuple
import time
import scipy.sparse as sp
import tvm.testing
_sparse_dense_implement = {
"generic": (topi.nn.sparse_dense, topi.generic.schedule_sparse_dense),
"cuda": (topi.cuda.sparse_dense, topi.cuda.schedule_sparse_dense),
"x86": (topi.nn.sparse_dense, topi.x86.schedule_sparse_dense),
}
def verify_dynamic_csrmv(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, 1), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmv(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, 1), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), dev)
assert a.data.dtype == A.data.dtype
assert a.indices.dtype == A.indices.dtype
assert a.indptr.dtype == A.indptr.dtype
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
for device in ["llvm"]:
check_device(device)
def verify_dynamic_csrmm(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, out_dim), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmm(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, out_dim), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), dev)
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-2, atol=1e-2)
for device in ["llvm"]:
check_device(device)
def verify_dense_si(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = tvmsp.placeholder(shape=(batch, in_dim), nonzeros=nonzeros, dtype=dtype, name="A")
B = te.placeholder((out_dim, in_dim), dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.0
a_np = np.maximum(
mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
b_np = (mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5)).astype(
dtype
)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense")
f(a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def verify_dense_sw(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = te.placeholder((batch, in_dim), dtype=dtype, name="A")
B = tvmsp.placeholder(shape=(out_dim, in_dim), nonzeros=nonzeros, dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.0
a_np = (mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5)).astype(
dtype
)
b_np = np.maximum(
mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvm.nd.array(a_np, dev)
b = tvmsp.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense")
f(a, b.data, b.indices, b.indptr, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def test_csrmv():
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float32", use_bias=False)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float64", use_bias=True)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="int32", use_bias=True)
def test_csrmm():
M, K, N = 5, 7, 2
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="int64", use_bias=False)
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="float64", use_bias=True)
def test_dense_si():
M, K, N = 3, 5, 2
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int16")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense_sw():
M, K, N = 3, 5, 2
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int16")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense():
test_dense_si()
test_dense_sw()
def test_sparse_dense_csr():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np.dot(W_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_dense_csr_reverse():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_transpose_csr():
N, density = 1023, 0.3
X_sp = sp.random(N, N, density=density, format="csr", dtype="float32")
X_sp_T = X_sp.transpose()
X_np_T = X_sp_T.todense()
X_data = te.placeholder(shape=X_sp.data.shape, dtype=str(X_sp.data.dtype))
X_indices = te.placeholder(shape=X_sp.indices.shape, dtype=str(X_sp.indices.dtype))
X_indptr = te.placeholder(shape=X_sp.indptr.shape, dtype=str(X_sp.indptr.dtype))
X_T_data, X_T_indices, X_T_indptr = topi.nn.sparse_transpose(X_data, X_indices, X_indptr)
s = te.create_schedule([X_T_data.op, X_T_indices.op, X_T_indptr.op])
func = tvm.build(s, [X_data, X_indices, X_indptr, X_T_data, X_T_indices, X_T_indptr])
X_T_data_tvm = tvm.nd.array(np.zeros(X_sp_T.data.shape, dtype=X_sp_T.data.dtype))
X_T_indices_tvm = tvm.nd.array(np.zeros(X_sp_T.indices.shape, dtype=X_sp_T.indices.dtype))
X_T_indptr_tvm = tvm.nd.array(np.zeros(X_sp_T.indptr.shape, dtype=X_sp_T.indptr.dtype))
func(
tvm.nd.array(X_sp.data),
tvm.nd.array(X_sp.indices),
tvm.nd.array(X_sp.indptr),
X_T_data_tvm,
X_T_indices_tvm,
X_T_indptr_tvm,
)
X_T_out = sp.csr_matrix(
(X_T_data_tvm.numpy(), X_T_indices_tvm.numpy(), X_T_indptr_tvm.numpy()), shape=(N, N)
).todense()
tvm.testing.assert_allclose(X_np_T, X_T_out, atol=1e-4, rtol=1e-4)
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype):
import itertools
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M // BS_R + 1,)
return s
def verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, use_relu, device, target):
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np @ W_np.T
if use_relu:
Y_np = np.maximum(Y_np, 0.0)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sparse_dense_implement)
with tvm.target.Target(target):
Y = fcompute(X, W_data, W_indices, W_indptr)
if use_relu:
Y = topi.nn.relu(Y)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=device)
func(
tvm.nd.array(X_np, device=device),
tvm.nd.array(W_sp_np.data, device=device),
tvm.nd.array(W_sp_np.indices, device=device),
tvm.nd.array(W_sp_np.indptr, device=device),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_sparse_dense_bsr_relu(dev, target):
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, True, dev, target)
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, False, dev, target)
def test_sparse_dense_bsr_reverse():
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_sparse_dense_bsr_randomized():
for _ in range(20):
BS_R = np.random.randint(1, 16)
BS_C = np.random.randint(1, 16)
M = np.random.randint(1, 32)
N = int(np.random.randint(1, 16) * BS_R)
K = int(np.random.randint(1, 16) * BS_C)
density = np.clip(np.random.random(), 0.1, 0.9)
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = np.array(X_np.dot(W_np.T))
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
with tvm.target.Target(device):
Y = fcompute(X, W_data, W_indices, W_indptr)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np.data, device=dev),
tvm.nd.array(W_sp_np.indices, device=dev),
tvm.nd.array(W_sp_np.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_gpu(target, dev):
M = 128
N = 1280
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 1, 1, density=0.01, dtype="float32")
W_sp_np_padded = tvm.topi.cuda.pad_sparse_matrix(W_sp_np, 32)
W_np = W_sp_np.todense()
Y_np = X_np @ W_sp_np.T
W_data = te.placeholder(shape=W_sp_np_padded.data.shape, dtype=str(W_sp_np_padded.data.dtype))
W_indices = te.placeholder(
shape=W_sp_np_padded.indices.shape, dtype=str(W_sp_np_padded.indices.dtype)
)
W_indptr = te.placeholder(
shape=W_sp_np_padded.indptr.shape, dtype=str(W_sp_np_padded.indptr.dtype)
)
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
with tvm.target.Target(target):
Y = topi.cuda.sparse_dense_padded(X, W_data, W_indices, W_indptr)
s = topi.cuda.schedule_sparse_dense_padded([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np_padded.data, device=dev),
tvm.nd.array(W_sp_np_padded.indices, device=dev),
tvm.nd.array(W_sp_np_padded.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_alter_op(target, dev):
with tvm.target.Target(target):
M = 128
N = 16
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 2, 2, density=0.01, dtype="float32")
x = relay.var("x", relay.TensorType(X_np.shape, "float32"))
mult = relay.op.nn.sparse_dense(
x,
(
relay.Constant(tvm.nd.array(W_sp_np.data)),
relay.Constant(tvm.nd.array(W_sp_np.indices)),
relay.Constant(tvm.nd.array(W_sp_np.indptr)),
),
)
f = relay.Function([x], mult)
f_ = relay.transform.InferType()(tvm.IRModule.from_expr(f))
f_ = relay.transform.AlterOpLayout()(f_)
assert f_["main"].body.op.name == "nn.internal.sparse_dense_padded"
# build with cuda and AlterOpLayout to ensure that sparse_dense_padded is in action
with tvm.transform.PassContext(opt_level=3, required_pass="AlterOpLayout"):
x = relay.build(tvm.IRModule.from_expr(f), target=target)
def test_sparse_add_csr():
for indices_dtype in ["int32", "int64"]:
for data_dtype in ["float32", "float64"]:
M, K, density = 3, 49, 0.2
X_np = np.random.randn(M, K).astype(data_dtype)
Y_sp_np = sp.random(M, K, density=density, format="csr", dtype=data_dtype)
Y_np = Y_sp_np.todense()
Z_np = X_np + Y_np
Y_data = te.placeholder(shape=Y_sp_np.data.shape, dtype=data_dtype)
Y_indices = te.placeholder(shape=Y_sp_np.indices.shape, dtype=indices_dtype)
Y_indptr = te.placeholder(shape=Y_sp_np.indptr.shape, dtype=indices_dtype)
X = te.placeholder(shape=X_np.shape, dtype=data_dtype)
Z = topi.nn.sparse_add(X, Y_data, Y_indices, Y_indptr)
s = te.create_schedule(Z.op)
func = tvm.build(s, [X, Y_data, Y_indices, Y_indptr, Z])
Z_tvm = tvm.nd.array(np.zeros(Z_np.shape, dtype=Z_np.dtype))
func(
tvm.nd.array(X_np.astype(data_dtype)),
tvm.nd.array(Y_sp_np.data.astype(data_dtype)),
tvm.nd.array(Y_sp_np.indices.astype(indices_dtype)),
tvm.nd.array(Y_sp_np.indptr.astype(indices_dtype)),
Z_tvm,
)
tvm.testing.assert_allclose(Z_tvm.numpy(), Z_np, atol=1e-4, rtol=1e-4)
def verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, layout):
if layout == "NHWC":
X_np = np.random.randn(M, H, W, K).astype("float32")
elif layout == "NCHW":
X_np = np.random.randn(M, K, H, W).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
if layout == "NHWC":
Y_np = tvm.topi.testing.conv2d_nhwc_python(X_np, np.array(W_np).T.reshape(1, 1, K, N), 1, 0)
elif layout == "NCHW":
Y_np = tvm.topi.testing.conv2d_nchw_python(X_np, np.array(W_np).reshape(N, K, 1, 1), 1, 0)
if BS_C == 1:
W_data = te.placeholder(shape=W_sp_np.data.shape[:-1], dtype=str(W_sp_np.data.dtype))
W_sp_np_data = W_sp_np.data.reshape(W_sp_np.data.shape[0], BS_R)
else:
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_sp_np_data = W_sp_np.data
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_conv2d(X, W_data, W_indices, W_indptr, layout)
s = te.create_schedule(Y.op)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype="float32"))
func(
tvm.nd.array(X_np, dev),
tvm.nd.array(W_sp_np_data, dev),
tvm.nd.array(W_sp_np.indices, dev),
tvm.nd.array(W_sp_np.indptr, dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4)
check_device("llvm")
def test_sparse_conv2d_bsr():
M, H, W, N, K, BS_R, BS_C, density = 1, 32, 32, 128, 64, 8, 16, 0.9
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NHWC")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NCHW")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, 1, density, "NHWC")
if __name__ == "__main__":
# test_csrmv()
# test_csrmm()
# test_dense()
# test_sparse_dense_csr()
# test_sparse_dense_bsr_randomized()
# test_sparse_transpose_csr()
# test_sparse_dense_padded_cuda()
# test_sparse_dense_padded_alter_op()
# test_sparse_dense_csr_reverse()
# test_sparse_dense_bsr_reverse()
# test_sparse_add_csr()
test_sparse_conv2d()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for tensor operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def verify_elemwise_sum(num_args, dtype):
shape = (3, 5, 4)
tvm_placeholders = []
for i in range(num_args):
tvm_placeholders.append(te.placeholder(shape, name="data" + str(i), dtype=dtype))
esum = topi.elemwise_sum(tvm_placeholders)
s = te.create_schedule([esum.op])
@memoize("topi.tests.test_topi_elemwise_sum")
def get_ref_data():
np_nd = [np.random.uniform(0, 10, size=shape).astype(dtype) for i in range(num_args)]
return np_nd
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s, tvm_placeholders + [esum], target, name="elemwise_sum")
tvm_nd = [tvm.nd.array(nd, dev) for nd in np_nd] + [out]
f(*tvm_nd)
np_out = np.sum(np.array(np_nd), axis=0)
tvm.testing.assert_allclose(out.numpy(), np_out, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_full(shape, dtype, fill_value):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.full_like(A, fill_value=fill_value)
C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
s1 = te.create_schedule([B.op])
s2 = te.create_schedule([C.op])
@memoize("topi.tests.test_topi_full")
def get_ref_data():
return np.full(shape, fill_value, dtype)
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s1, [A, B], target, name="full_like")
f(tvm.nd.array(np.zeros(shape, dtype), dev), out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
f = tvm.build(s2, [C], target, name="full")
f(out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_vectorization(n, m, dtype):
def check_targeta(targeta):
if not tvm.testing.device_enabled(targeta):
print("Skip because %s is not enabled" % targeta)
return
if dtype == "float16" and targeta == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
with tvm.target.Target(targeta):
dev = tvm.device(targeta, 0)
A = te.placeholder((n, m), name="A", dtype=dtype)
B = te.compute((n, m), lambda i, j: A[i, j] + tvm.tir.const(1, A.dtype), name="B")
S = tvm.topi.testing.get_elemwise_schedule(targeta)(B)
fun = tvm.build(S, [A, B], targeta)
np_A = tvm.nd.empty((n, m), A.dtype, dev).copyfrom(np.random.uniform(size=(n, m)))
np_B = tvm.nd.empty((n, m), B.dtype, dev)
fun(np_A, np_B)
tvm.testing.assert_allclose(np_B.numpy(), np_A.numpy() + 1, rtol=1e-5)
for targeta in ["cuda"]:
check_targeta(targeta)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorization():
verify_vectorization(128, 64, "float16")
def test_elemwise_sum():
verify_elemwise_sum(1, "float32")
verify_elemwise_sum(5, "float32")
verify_elemwise_sum(4, "int32")
def test_full():
verify_full((3, 4, 5), "float32", 3.14)
verify_full((10,), "int32", 7)
if __name__ == "__main__":
test_elemwise_sum()
test_full()
test_vectorization()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for broadcasting operators."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm import relay
import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.expand_dims(A, axis, num_newaxis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reinterpret(in_shape, in_dtype, out_dtype, generator):
A = te.placeholder(shape=in_shape, name="A", dtype=in_dtype)
B = topi.reinterpret(A, out_dtype)
def check_device(target, dev):
if in_dtype == "float16" and target == "cuda" and not have_fp16(dev.compute_version):
print("Skip because %s does not have fp16 support" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reinterpret")
data_npy = generator(in_shape).astype(in_dtype)
out_npy = data_npy.view(B.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(in_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
np.testing.assert_equal(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_transpose(in_shape, axes):
A = te.placeholder(shape=in_shape, name="A")
B = topi.transpose(A, axes)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="transpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reshape(src_shape, dst_shape):
A = te.placeholder(shape=src_shape, name="A")
B = topi.reshape(A, dst_shape)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_squeeze(src_shape, axis):
A = te.placeholder(shape=src_shape, name="A")
B = topi.squeeze(A, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_concatenate(shapes, axis):
def get_concat_schedule(target):
schedule_map = {
"cpu": topi.x86.schedule_concatenate,
"arm_cpu": topi.arm_cpu.schedule_concatenate,
}
if isinstance(target, str):
target = tvm.target.Target(target)
for key in target.keys:
if key in schedule_map:
return schedule_map[key]
return tvm.topi.testing.get_injective_schedule(target)
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.concatenate(a_tuple=tensor_l, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = get_concat_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_stack(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.stack(tensor_l, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, name="stack")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.stack(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_split(src_shape, indices_or_sections, axis):
A = te.placeholder(shape=src_shape, name="A")
tensor_l = topi.split(A, indices_or_sections, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(tensor_l)
foo = tvm.build(s, [A] + list(tensor_l), target, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nds = [
tvm.nd.empty(out_npy.shape, device=dev, dtype=tensor_l[0].dtype) for out_npy in out_npys
]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_expand_like(in_shape, out_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=out_shape, name="B")
C = topi.expand_like(A, B, axis)
s = te.create_schedule([C.op])
def check_device(target):
print("Running on target: %s" % target)
dev = tvm.device(target, 0)
f = tvm.build(s, [A, B, C], target, name="expand_like")
input = np.random.uniform(size=in_shape).astype(A.dtype)
tvm_input = tvm.nd.array(input, dev)
odim = len(out_shape)
real_axis = [x if x >= 0 else x + odim for x in axis]
real_axis = sorted(real_axis)
for x in real_axis:
input = np.expand_dims(input, x).astype(A.dtype)
for x in real_axis:
input = np.concatenate([input] * out_shape[x], axis=x).astype(A.dtype)
assert input.shape == out_shape
tvm_shape_like = tvm.nd.array(np.zeros(out_shape).astype(B.dtype), dev)
out = tvm.nd.array(np.zeros(out_shape).astype(A.dtype), dev)
f(tvm_input, tvm_shape_like, out)
tvm.testing.assert_allclose(out.numpy(), input)
for target in ["llvm"]:
check_device(target)
def verify_flip(in_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.flip(A, axis) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reverse")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.flip(x_np, axis) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "cuda", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(in_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths = np.array(seq_lengths).astype("int32")
A = te.placeholder(shape=in_data.shape, name="A", dtype=str(in_data.dtype))
B = te.placeholder(shape=seq_lengths.shape, name="B", dtype=str(seq_lengths.dtype))
C = topi.reverse_sequence(A, B, seq_axis, batch_axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name="reverse_sequence")
data_nd = tvm.nd.array(in_data, dev)
seq_lengths_nd = tvm.nd.array(seq_lengths, dev)
out_nd = tvm.nd.empty(in_data.shape, device=dev, dtype=A.dtype)
foo(data_nd, seq_lengths_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), ref_res)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def verify_take(src_shape, indices_src, axis=None, mode="clip", indices_dtype="int32"):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.take(a=A, indices=indices, mode=mode)
else:
out_tensor = topi.take(a=A, indices=indices, axis=axis, mode=mode)
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, mode=np_mode)
else:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, axis=axis, mode=np_mode)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_slice(in_shape, begin, end, strides=None, axes=None):
A = te.placeholder(shape=in_shape, name="A")
strides = [1, 1, 1] if strides is None else strides
if axes:
strides = [strides[axis] for axis in axes]
B = topi.strided_slice(A, begin, end, strides, axes) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides, axes=axes) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_dynamic_strided_slice(in_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
Begin = te.placeholder(shape=[len(in_shape)], name="begin", dtype="int64")
End = te.placeholder(shape=[len(in_shape)], name="end", dtype="int64")
Strides = te.placeholder(shape=[len(in_shape)], name="strides", dtype="int64")
strides = [1, 1, 1] if strides is None else strides
B = topi.strided_slice(A, Begin, End, Strides) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, Begin, End, Strides, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
begin_nd = tvm.nd.array(np.array(begin).astype("int64"), dev)
end_nd = tvm.nd.array(np.array(end).astype("int64"), dev)
strides_nd = tvm.nd.array(np.array(strides).astype("int64"), dev)
foo(data_nd, begin_nd, end_nd, strides_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_set(in_shape, v_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
V = te.placeholder(shape=v_shape, name="V")
b = te.placeholder(shape=(len(begin),), name="b", dtype="int32")
e = te.placeholder(shape=(len(end),), name="e", dtype="int32")
if strides is not None:
st = te.placeholder(shape=(len(strides),), name="st", dtype="int32")
B = topi.strided_set(A, V, b, e, st) + 1
else:
B = topi.strided_set(A, V, b, e) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
if strides is not None:
foo = tvm.build(s, [A, V, b, e, st, B], target, name="stride_set")
s_np = np.asarray(strides).astype("int32")
s_nd = tvm.nd.array(s_np, dev)
else:
foo = tvm.build(s, [A, V, b, e, B], target, name="stride_set")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
v_np = np.random.uniform(size=v_shape).astype(V.dtype)
b_np = np.asarray(begin).astype("int32")
e_np = np.asarray(end).astype("int32")
out_npy = tvm.topi.testing.strided_set_python(x_np, v_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
v_nd = tvm.nd.array(v_np, dev)
b_nd = tvm.nd.array(b_np, dev)
e_nd = tvm.nd.array(e_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
if strides is not None:
foo(data_nd, v_nd, b_nd, e_nd, s_nd, out_nd)
else:
foo(data_nd, v_nd, b_nd, e_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_gather(data, axis, indices):
data = np.asarray(data)
indices = np.asarray(indices)
var_data = te.placeholder(shape=data.shape, dtype=data.dtype.name, name="data")
var_indices = te.placeholder(shape=indices.shape, dtype=indices.dtype.name, name="indices")
out_tensor = topi.gather(var_data, axis, var_indices)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [var_data, var_indices, out_tensor], target, name="gather")
out_npys = tvm.topi.testing.gather_python(data, axis, indices)
data_nd = tvm.nd.array(data, dev)
indices_nd = tvm.nd.array(indices, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=data.dtype.name)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_gather_nd(src_shape, indices_src, indices_dtype):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
out_tensor = topi.gather_nd(a=A, indices=indices)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [A, indices, out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_arange(start, stop, step):
if start is None and step is None:
A = topi.arange(stop)
a_np = np.arange(stop)
elif start is None:
A = topi.arange(stop, step=step)
a_np = np.arange(stop, step=step)
elif step is None:
A = topi.arange(start, stop)
a_np = np.arange(start, stop)
else:
A = topi.arange(start, stop, step)
a_np = np.arange(start, stop, step)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(A)
f = tvm.build(s, [A], target, name="arange")
a_nd = tvm.nd.empty(a_np.shape, dtype="float32", device=dev)
f(a_nd)
tvm.testing.assert_allclose(a_nd.numpy(), a_np)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_repeat(in_shape, repeats, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.repeat(A, repeats, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="repeat")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.repeat(data_npy, repeats, axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_tile(in_shape, reps):
A = te.placeholder(shape=in_shape, name="A")
B = topi.tile(A, reps)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="tile")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.tile(data_npy, reps)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_where(in_shape):
Cond = te.placeholder(shape=in_shape, name="cond")
dtype = Cond.dtype
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=in_shape, name="B")
C = topi.where(Cond, A, B)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
f = tvm.build(s, [Cond, A, B, C], target, name="where")
cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
x_npy = np.random.uniform(size=in_shape).astype(dtype)
y_npy = np.random.uniform(size=in_shape).astype(dtype)
out_npy = np.where(cond_npy, x_npy, y_npy)
cond_nd = tvm.nd.array(cond_npy, dev)
x_nd = tvm.nd.array(x_npy, dev)
y_nd = tvm.nd.array(y_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
f(cond_nd, x_nd, y_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_one_hot(indices_shape, depth, on_value, off_value, axis, dtype):
indices = te.placeholder(shape=indices_shape, name="indices", dtype="int32")
on_value_const = tvm.tir.const(on_value, dtype)
off_value_const = tvm.tir.const(off_value, dtype)
one_hot_result = topi.transform.one_hot(
indices, on_value_const, off_value_const, depth, axis, dtype
)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(one_hot_result)
fn = tvm.build(s, [indices, one_hot_result], target, name="one_hot")
indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
indices_nd = tvm.nd.array(indices_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), dev)
fn(indices_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_unravel_index(indices, shape, dtype, indice_dtype="int64"):
x_data = np.array(indices).astype(indice_dtype)
y_data = np.array(shape).astype(dtype)
if len(x_data.shape) == 1:
dst_shape = [y_data.shape[0], x_data.shape[0]]
else:
dst_shape = [y_data.shape[0]]
X = te.placeholder(shape=x_data.shape, dtype=indice_dtype, name="X")
Y = te.placeholder(shape=y_data.shape, dtype=dtype, name="Y")
Z = topi.unravel_index(X, Y)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(Z)
foo = tvm.build(s, [X, Y, Z], target, name="unravel_index")
out_npy = np.unravel_index(x_data, y_data)
datax_nd = tvm.nd.array(x_data, dev)
datay_nd = tvm.nd.array(y_data, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=Z.dtype)
foo(datax_nd, datay_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
output_shape_data = np.array(output_shape)
default_value_data = np.array(default_value)
A = te.placeholder(
shape=sparse_indices_data.shape, name="sparse_indices", dtype=str(sparse_indices_data.dtype)
)
B = te.placeholder(
shape=sparse_values_data.shape, name="sparse_values", dtype=str(sparse_values_data.dtype)
)
if default_value is None:
args = [A, B]
D = topi.sparse_to_dense(A, output_shape, B)
else:
C = te.placeholder(shape=(), name="default_value", dtype=str(default_value_data.dtype))
args = [A, B, C]
D = topi.sparse_to_dense(A, output_shape, B, C)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(D)
foo = tvm.build(s, args + [D], target, name="sparse_to_dense")
sparse_indices_nd = tvm.nd.array(sparse_indices_data, dev)
sparse_values_nd = tvm.nd.array(sparse_values_data, dev)
out_nd = tvm.nd.empty(output_shape_data, device=dev, dtype=B.dtype)
if default_value is None:
foo(sparse_indices_nd, sparse_values_nd, out_nd)
else:
default_value_nd = tvm.nd.array(default_value_data, dev)
foo(sparse_indices_nd, sparse_values_nd, default_value_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), np.array(xpected))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_matrix_set_diag(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = te.placeholder(shape=input_shape, name="input", dtype=dtype)
diagonal = te.placeholder(shape=diagonal_shape, name="diagonal", dtype=dtype)
matrix_set_diag_result = topi.transform.matrix_set_diag(input, diagonal, k, align)
def check_device(target, dev):
dev = tvm.device(target, 0)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(matrix_set_diag_result)
fn = tvm.build(s, [input, diagonal, matrix_set_diag_result], target, name="matrix_set_diag")
input_npy = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_npy = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_npy = tvm.topi.testing.matrix_set_diag(input_npy, diagonal_npy, k, align)
input_nd = tvm.nd.array(input_npy, dev)
diagonal_nd = tvm.nd.array(diagonal_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(matrix_set_diag_result.dtype), dev)
fn(input_nd, diagonal_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_adv_index(data_shape, index_shapes, indice_dtype="int64"):
dtype = "float32"
data = te.placeholder(shape=data_shape, name="data", dtype=dtype)
indices = []
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype(indice_dtype))
indices.append(
te.placeholder(shape=index_shape, name="index_{}".format(i), dtype=indice_dtype)
)
np_out = np_data[tuple(np_indices)]
out = topi.adv_index(data, indices)
def check_device(target, dev):
dev = tvm.device(target, 0)
if not dev.exist:
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
func = tvm.build(s, [data] + indices + [out], target, name="adv_index")
nd_list = [tvm.nd.array(np_data, dev)]
for np_index in np_indices:
nd_list.append(tvm.nd.array(np_index, dev))
nd_list.append(tvm.nd.empty(out.shape, device=dev, dtype=data.dtype))
func(*nd_list)
tvm.testing.assert_allclose(nd_list[-1].numpy(), np.array(np_out))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_trilu(input_shape, upper, k=0):
x = te.placeholder(shape=input_shape, name="x", dtype="float32")
k_tir = tvm.tir.const(k, dtype="int32")
trilu_result = topi.transform.trilu(x, k_tir, upper)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(trilu_result)
fn = tvm.build(s, [x, trilu_result], target, name="trilu")
x_npy = np.random.normal(size=input_shape).astype(x.dtype)
if upper:
out_npy = np.triu(x_npy, k)
else:
out_npy = np.tril(x_npy, k)
x_nd = tvm.nd.array(x_npy, dev)
out_nd = tvm.nd.array(np.empty(x_npy.shape).astype(trilu_result.dtype), dev)
fn(x_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_strided_slice():
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
verify_strided_slice((3, 4, 3), [0, 0, 0], [None, None, None])
verify_strided_slice((3, 4, 3), [0], [2], None, axes=[1])
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
verify_dynamic_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_dynamic_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_dynamic_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
@tvm.testing.uses_gpu
def test_strided_set():
verify_strided_set((3, 4, 3), (3, 2, 2), [0, 3, 0], [4, 1, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (3, 1, 2), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (1, 3, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_set((3, 4, 3), (1, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 2), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, 1, 0], [2, 3, 3], [1])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1, 0], [4, 4, 3])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1], [4, 4, 3])
@tvm.testing.uses_gpu
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_reinterpret():
verify_reinterpret((1000,), "float32", "int32", lambda shape: np.random.randn(*shape) * 1000)
verify_reinterpret((1000,), "float16", "int16", lambda shape: np.random.randn(*shape) * 100)
verify_reinterpret(
(1000,), "int16", "uint16", lambda shape: np.random.randint(-1000, 1000, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
@tvm.testing.uses_gpu
def test_transpose():
verify_transpose((3, 10, 2), (1, 0, 2))
verify_transpose((3, 10, 5), (2, 0, 1))
verify_transpose((3, 10), None)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_transpose_unfused_schedule(target, dev):
shape = (100, tvm.target.Target(target).thread_warp_size + 3)
x = relay.var("x", relay.TensorType(shape, "float32"))
f = relay.transpose(x)
r = np.random.rand(*shape)
func = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(relay.Function([x], f)), device=dev, target=target
).evaluate()
tvm.testing.assert_allclose(func(r).numpy(), np.transpose(r))
# We want to make sure schedule does not fire here, but there is no way of
# inspecting which schedules were used.
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(shape, "float32"))
f = relay.transpose(x + y)
func = relay.create_executor(
kind="graph",
mod=tvm.IRModule.from_expr(relay.Function([x, y], f)),
device=dev,
target=target,
).evaluate()
tvm.testing.assert_allclose(func(r, r).numpy(), np.transpose(r + r))
@tvm.testing.uses_gpu
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16,), (2, 2, 2, 2))
verify_reshape((4, 0), (2, 0, 2))
@tvm.testing.uses_gpu
def test_where():
verify_where(())
verify_where((1, 2, 3, 4))
@tvm.testing.uses_gpu
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
verify_squeeze((1, 1, 1, 1), ())
# a special case to trigger inline let expression
A = te.placeholder((2,), "float32", "A")
E = topi.squeeze(A)
C = te.compute((1,), lambda i: E[(2 * A[0] - 1).astype("int32")])
for target in ["llvm", "cuda", "opencl"]:
dev = tvm.device(target, 0)
if tvm.testing.device_enabled(target):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
func = tvm.build(s, [A, C])
a = tvm.nd.array(np.array((1, 2)).astype("float32"), device=dev)
c = tvm.nd.empty((1,), dtype="float32", device=dev)
func(a, c)
assert c.numpy()[0] == 2
@tvm.testing.uses_gpu
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], -1)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3), (16, 6, 7, 3), (12, 6, 7, 3), (8, 6, 7, 3), (2, 6, 7, 3)], 0)
verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)
@tvm.testing.uses_gpu
def test_stack():
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
@tvm.testing.uses_gpu
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
@tvm.testing.uses_gpu
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
@tvm.testing.requires_llvm
def test_expand_like():
verify_expand_like((3,), (2, 3), [0])
verify_expand_like((2,), (2, 3), [1])
verify_expand_like((3, 4), (3, 5, 4), [1])
verify_expand_like((5, 7), (5, 6, 7, 8), [1, 3])
@tvm.testing.uses_gpu
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0, 1, 2, 3]])
verify_take((3, 3, 3), [[11, 25]])
verify_take((4,), [[0, 1], [2, 3]])
verify_take((4,), [1], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 1)
verify_take((4, 3, 5, 6), [[2, 1, 0, 0]], -2)
verify_take((3, 4), [-5, 20])
verify_take((3, 4), [-5, 20], mode="wrap")
verify_take((3, 4), [-1, 2], axis=0)
verify_take((3, 4), [-1, 2], axis=0, mode="wrap")
verify_take((3, 4), [-1, 2], axis=1)
verify_take((3, 4), [-1, 2], axis=1, mode="wrap")
verify_take((3, 3, 3), [[11, 25]], mode="fast")
verify_take((3, 4), [0, 2], axis=0, mode="fast")
verify_take((3, 4), [0, 2], axis=1, mode="fast")
verify_take((3, 4), [1, 2], axis=1, indices_dtype="uint32")
verify_take((3, 4), [1, 2], axis=1, mode="wrap", indices_dtype="uint16")
verify_take((3, 3, 3), [[11, 20]], mode="fast", indices_dtype="uint8")
@tvm.testing.uses_gpu
def test_gather():
verify_gather([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]])
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(1, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(4, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 2)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 10)))
@tvm.testing.uses_gpu
def test_gather_nd():
for indices_dtype in ["int32", "float32", "uint8"]:
verify_gather_nd((4,), [[1.8]], indices_dtype)
verify_gather_nd((4,), [[1, 3, 2]], indices_dtype)
verify_gather_nd((2, 3), [[1]], indices_dtype)
verify_gather_nd((2, 3), [[1], [0]], indices_dtype)
verify_gather_nd((2, 3), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2], [3, 1]], indices_dtype)
verify_gather_nd(
(2, 3, 4), [[[1, 0], [0, 1]], [[0, 2], [1, 2]], [[3, 1], [0, 2]]], indices_dtype
)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [2, 1], [3, 2], [4, 2]], indices_dtype)
@tvm.testing.uses_gpu
def test_arange():
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
verify_arange(20, 1, -1.5)
@tvm.testing.uses_gpu
def test_repeat():
verify_repeat((2,), 1, 0)
verify_repeat((3, 2), 2, 0)
verify_repeat((3, 2, 4), 3, 1)
verify_repeat((1, 3, 2, 4), 4, -1)
@tvm.testing.uses_gpu
def test_tile():
verify_tile((3, 2), (2, 3))
verify_tile((3, 2, 5), (2,))
verify_tile((3,), (2, 3, 3))
verify_tile((4, 0), (5,))
@tvm.testing.uses_gpu
def test_layout_transform():
in_shape = (1, 32, 8, 8)
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.layout_transform(A, "NCHW", "NCHW16c")
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.transpose(input, axes=(0, 2, 3, 1))
output = np.reshape(output, newshape=(1, 8, 8, 2, 16))
output = np.transpose(output, axes=(0, 3, 1, 2, 4))
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="layout_transform")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (8, 7, 13)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.shape(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(in_shape).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="shape")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_sequence_mask():
for in_shape in (5, 10), (3, 4, 5, 4):
for axis in [0, 1]:
for mask_value in [0.0, 1.0]:
max_length = in_shape[axis]
batch_size = in_shape[1 - axis]
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = te.placeholder(shape=(batch_size,), dtype="int32", name="B")
C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
def check_device(target, dev):
tvm_A = tvm.nd.array(A_data, dev)
tvm_B = tvm.nd.array(B_data, dev)
tvm_C = tvm.nd.empty(in_shape, device=dev, dtype="float32")
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
f = tvm.build(s, [A, B, C], target, name="SequenceMask")
f(tvm_A, tvm_B, tvm_C)
tvm.testing.assert_allclose(tvm_C.numpy(), C_gt_data)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_ndarray_size():
in_shape = (5, 11, 7)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.ndarray_size(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(np.size(input)).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, device=dev)
tvm_output = tvm.nd.empty((), device=dev, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_where_fusion():
"""integration test that where and zeros should be properly inlined"""
def check_device(target, dev):
with tvm.target.Target(target):
print("Running on target: %s" % target)
conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(target)
data = te.placeholder((2, 1, 2, 4), "int8", "data")
w = te.placeholder((3, 1, 2, 2), "int8", "w")
conv1 = conv2d_compute(data, w, 1, 0, 1, "int32")
zeros = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(0, dtype="int32"))
gt = topi.greater_equal(conv1, zeros)
one = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(1, dtype="int32"))
two = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(2, dtype="int32"))
where = topi.where(gt, one, two)
add = topi.add(conv1, where)
outs = [add]
s = conv2d_schedule(outs)
tvm.build(s, [data, w, add], target=backend)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_one_hot():
verify_one_hot((3,), 3, 1, 0, -1, "int32")
verify_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
verify_one_hot((2, 2), 5, 2, -2, 0, "int32")
verify_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
verify_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
verify_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_unravel_index():
for dtype in ["int32", "int64"]:
for indice_dtype in ["int64", "uint8", "uint16", "uint32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype, indice_dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype, indice_dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified
# negative test cases
# sparse indices should be ints
# verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_values should be 0d or 1d only
# verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_indices should not be > 2d tensor
# verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
@tvm.testing.uses_gpu
def test_matrix_set_diag():
for dtype in ["float32", "int32"]:
verify_matrix_set_diag((2, 2), (2,), dtype)
verify_matrix_set_diag((4, 3, 3), (4, 3), dtype)
verify_matrix_set_diag((2, 3, 4), (2, 3), dtype, 1)
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_RIGHT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_LEFT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "RIGHT_RIGHT")
@tvm.testing.uses_gpu
def test_adv_index():
for indice_dtype in ["int32", "int64", "uint8", "uint16", "uint32"]:
verify_adv_index((3, 4, 5), [(2,), (2,), (1,)], indice_dtype=indice_dtype)
verify_adv_index((10, 15, 5), [(4, 1), (1, 7)], indice_dtype=indice_dtype)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)], indice_dtype=indice_dtype)
@tvm.testing.uses_gpu
def test_trilu():
# Test upper and lower triangle
verify_trilu((3, 3), True, 0)
verify_trilu((3, 3), False, 0)
# Test larger matrices with offset.
verify_trilu((6, 6), True, 1)
verify_trilu((6, 6), False, 2)
verify_trilu((6, 6), False, -2)
# Test batch size
verify_trilu((8, 6, 6), False, -2)
if __name__ == "__main__":
test_strided_slice()
test_concatenate()
test_stack()
test_transpose()
test_expand_dims()
test_reshape()
test_where()
test_squeeze()
test_split()
test_flip()
test_expand_like()
test_take()
test_gather_nd()
test_arange()
test_layout_transform()
test_repeat()
test_tile()
test_shape()
test_sequence_mask()
test_ndarray_size()
test_where_fusion()
test_one_hot()
test_unravel_index()
test_sparse_to_dense()
test_matrix_set_diag()
test_adv_index()
test_trilu()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import topi
import tvm.topi.testing
in_dtype = tvm.testing.parameter("int32", "int64")
is_sorted = tvm.testing.parameter(True, False, ids=["sorted", "unsorted"])
with_counts = tvm.testing.parameter(True, False, ids=["with_counts", "no_counts"])
arr_size, maxval = tvm.testing.parameters((1, 100), (10, 10), (10000, 100))
@tvm.testing.parametrize_targets
def test_unique(dev, target, in_dtype, is_sorted, with_counts, arr_size, maxval):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
index = np.sort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
counts,
num_uniq,
]
data = np.random.randint(0, maxval, size=(arr_size)).astype(in_dtype)
# numpy reference
np_unique, np_indices, np_inverse_indices, np_counts, np_num_unique = calc_numpy_unique(
data, is_sorted
)
num_unique = np_num_unique[0]
implementations = {
"generic": (
lambda x, return_counts: topi.unique(x, is_sorted, return_counts),
topi.generic.schedule_unique,
),
"gpu": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm_data = tvm.nd.array(data, device=dev)
tvm_unique = tvm.nd.array(np.zeros(data.shape).astype(data.dtype), device=dev)
tvm_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_inverse_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_num_unique = tvm.nd.array(np.zeros([1]).astype("int32"), device=dev)
with tvm.target.Target(target):
te_input = tvm.te.placeholder(shape=data.shape, dtype=str(data.dtype))
outs = fcompute(te_input, with_counts)
s = fschedule(outs)
func = tvm.build(s, [te_input, *outs])
if with_counts:
tvm_counts = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
func(
tvm_data,
tvm_unique,
tvm_indices,
tvm_inverse_indices,
tvm_num_unique,
tvm_counts,
)
else:
func(tvm_data, tvm_unique, tvm_indices, tvm_inverse_indices, tvm_num_unique)
num_unique = np_num_unique[0]
assert tvm_num_unique.numpy()[0] == np_num_unique
np.testing.assert_allclose(tvm_unique.numpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(tvm_indices.numpy()[:num_unique], np_indices, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
tvm_inverse_indices.numpy(), np_inverse_indices, atol=1e-5, rtol=1e-5
)
if with_counts:
np.testing.assert_allclose(tvm_counts.numpy()[:num_unique], np_counts, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_upsampling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for upsampling"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import math
from tvm.topi.utils import nchw_pack_layout
def verify_upsampling(
batch,
in_channel,
in_height,
in_width,
scale_h,
scale_w,
layout="NCHW",
method="nearest_neighbor",
in_batch_block=0,
in_channel_block=0,
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif nchw_pack_layout(layout):
A = te.placeholder(
(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block), name="A"
)
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_batch_block,
in_channel_block,
)
a_np = np.random.uniform(
size=(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block)
).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling(A, scale_h, scale_w, layout=layout, method=method, align_corners=False)
b_np = tvm.topi.testing.resize2d_python(
a_np,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_int_div_upsampling():
"""Test whether upsampling op is tilable when scale_h and scale_w is integer.
Compute_at cannot work correctly in the original floating-point multiplication.
After using integer division,compute_at can work correctly and reduce the
capacity of cache buffer.
In this test case, scale_h and scale_w are set to integers, the size
of cache buffer should be equal to (h_i/scale_h * w_i/scale_w * c_i).
"""
dtype = "int8"
scale_h = 2
scale_w = 2
x = te.placeholder([1, 32, 64, 64], dtype, "x")
y = topi.nn.upsampling(x, scale_h, scale_w)
func = te.create_prim_func([x, y])
s = tvm.tir.Schedule(func)
block = s.get_block("resize")
cache = s.cache_read(block, 0, "local")
n, c, h, w = s.get_loops(block)
s_factor = 8
c_o, c_i = s.split(c, factors=[None, s_factor])
h_o, h_i = s.split(h, factors=[None, s_factor])
w_o, w_i = s.split(w, factors=[None, s_factor])
s.reorder(n, c_o, h_o, w_o, h_i, w_i, c_i)
s.compute_at(cache, w_o)
wanted_rt = s_factor**3 / (scale_h * scale_w)
def analyze_upsampling_allocate(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
tvm.testing.assert_allclose(stmt.extents[0].value, wanted_rt)
lowerd_irmodule = tvm.lower(s.mod["main"])
tvm.tir.stmt_functor.post_order_visit(
lowerd_irmodule.functions.items()[0][1].body, analyze_upsampling_allocate
)
@tvm.testing.uses_gpu
def test_upsampling():
# nearest_neighbor - NCHW
verify_upsampling(8, 16, 32, 32, 2.0, 2.0)
verify_upsampling(2, 32, 64, 64, 3.0, 3.0)
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0)
## nearest_neighbor - NHWC
verify_upsampling(8, 16, 32, 32, 2.0, 2.0, layout="NHWC")
verify_upsampling(2, 32, 64, 64, 3.0, 3.0, layout="NHWC")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, layout="NHWC")
# bilinear - NCHW
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, method="bilinear")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, method="bilinear")
# nearest_neighbor - NCHWinic
verify_upsampling(2, 2, 32, 32, in_batch_block=4, in_channel_block=8, scale_h=2.0, scale_w=2.0)
verify_upsampling(2, 2, 64, 64, in_batch_block=1, in_channel_block=16, scale_h=3.0, scale_w=3.0)
verify_upsampling(
1, 4, 22, 32, in_batch_block=1, in_channel_block=16, scale_h=1.954545497894287, scale_w=2.0
)
# bilinear - NCHWinic
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=2.0,
scale_w=2.0,
method="bilinear",
)
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=3.0,
scale_w=3.0,
method="bilinear",
)
verify_upsampling(
2,
4,
22,
32,
in_batch_block=1,
in_channel_block=16,
scale_h=1.954545497894287,
scale_w=2.0,
layout="NCHW1n16c",
method="bilinear",
)
# bilinear - NHWC
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, layout="NHWC", method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
verify_upsampling(1, 64, 22, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
def verify_upsampling3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
scale_d,
scale_h,
scale_w,
layout="NCDHW",
method="nearest_neighbor",
):
if layout == "NCDHW":
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling3d(
A,
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
b_np = tvm.topi.testing.resize3d_python(
a_np,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_upsampling3d():
# nearest_neighbor - NCDHW
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0)
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0)
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5)
## nearest_neighbor - NDHWC
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC")
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC")
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC")
# trilinear - NCDHW
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, method="trilinear")
verify_upsampling3d(1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, method="trilinear")
# trilinear - NDHWC
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(
1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC", method="trilinear"
)
if __name__ == "__main__":
test_upsampling()
test_upsampling3d()
test_int_div_upsampling()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for util"""
from tvm import topi
def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape):
dst_shape = topi.utils.get_shape(src_shape, src_layout, dst_layout)
assert dst_shape == expect_shape, "Shape mismatch: expecting %s but got %s" % (
expect_shape,
dst_shape,
)
def test_get_shape():
verify_get_shape((1, 3, 224, 224), "NCHW", "NCHW", (1, 3, 224, 224))
verify_get_shape((1, 3, 224, 224), "NCHW", "NHWC", (1, 224, 224, 3))
verify_get_shape((3, 2, 32, 48, 16), "NCHW16c", "NC16cWH", (3, 2, 16, 48, 32))
verify_get_shape((2, 3, 32, 32, 16, 8), "OIHW16i8o", "HWO8oI16i", (32, 32, 2, 8, 3, 16))
if __name__ == "__main__":
test_get_shape()
| https://github.com/zk-ml/tachikoma |
tests/python/topi/python/test_topi_vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for vision package"""
import math
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.vision import ssd, non_max_suppression, get_valid_counts
_get_valid_counts_implement = {
"generic": (topi.vision.get_valid_counts, topi.generic.schedule_get_valid_counts),
"gpu": (topi.cuda.get_valid_counts, topi.cuda.schedule_get_valid_counts),
}
_nms_implement = {
"generic": (topi.vision.non_max_suppression, topi.generic.schedule_nms),
"gpu": (topi.cuda.non_max_suppression, topi.cuda.schedule_nms),
}
_multibox_prior_implement = {
"generic": (topi.vision.ssd.multibox_prior, topi.generic.schedule_multibox_prior),
"gpu": (topi.cuda.multibox_prior, topi.cuda.schedule_multibox_prior),
}
_multibox_detection_implement = {
"generic": (topi.vision.ssd.multibox_detection, topi.generic.schedule_multibox_detection),
"gpu": (topi.cuda.multibox_detection, topi.cuda.schedule_multibox_detection),
}
_roi_align_implement = {
"generic": (topi.vision.roi_align_nchw, topi.generic.schedule_roi_align),
"cpu": (topi.x86.roi_align_nchw, topi.generic.schedule_roi_align),
"gpu": (topi.vision.roi_align_nchw, topi.cuda.schedule_roi_align),
}
_roi_pool_schedule = {
"generic": topi.generic.schedule_roi_pool,
"gpu": topi.cuda.schedule_roi_pool,
}
_proposal_implement = {
"generic": (topi.vision.rcnn.proposal, topi.generic.schedule_proposal),
"gpu": (topi.cuda.proposal, topi.cuda.schedule_proposal),
}
_all_class_nms_implement = {
"generic": (topi.vision.all_class_non_max_suppression, topi.generic.schedule_nms),
"gpu": (topi.cuda.all_class_non_max_suppression, topi.cuda.schedule_nms),
}
class TestValidCounts:
dshape, score_threshold, id_index, score_index = tvm.testing.parameters(
((1, 1000, 5), 0.5, -1, 0),
((1, 2500, 6), 0, 0, 1),
((1, 2500, 5), -1, -1, 0),
((3, 1000, 6), 0.55, 1, 0),
((16, 500, 5), 0.95, -1, 1),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dtype, dshape, score_threshold, id_index, score_index):
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor))
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
return np_data, np_out1, np_out2, np_out3
def test_get_valid_counts(
self, target, dev, ref_data, dtype, dshape, score_threshold, id_index, score_index
):
np_data, np_out1, np_out2, np_out3 = ref_data
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _get_valid_counts_implement)
data = te.placeholder(dshape, name="data", dtype=dtype)
outs = fcompute(data, score_threshold, id_index, score_index)
s = fschedule(outs)
tvm_input_data = tvm.nd.array(np_data, dev)
tvm_out1 = tvm.nd.array(np.zeros(np_out1.shape, dtype="int32"), dev)
tvm_out2 = tvm.nd.array(np.zeros(np_out2.shape, dtype=dtype), dev)
tvm_out3 = tvm.nd.array(np.zeros(np_out3.shape, dtype="int32"), dev)
f = tvm.build(s, [data, outs[0], outs[1], outs[2]], target)
f(tvm_input_data, tvm_out1, tvm_out2, tvm_out3)
tvm.testing.assert_allclose(tvm_out1.numpy(), np_out1, rtol=1e-3)
tvm.testing.assert_allclose(tvm_out2.numpy(), np_out2, rtol=1e-3)
tvm.testing.assert_allclose(tvm_out3.numpy(), np_out3, rtol=1e-3)
def verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
):
dshape = np_data.shape
batch, num_anchors, _ = dshape
indices_dshape = (batch, num_anchors)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((batch,), dtype="int32", name="valid_count")
indices = te.placeholder((batch, num_anchors), dtype="int32", name="indices")
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _nms_implement)
out = fcompute(
data,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
)
indices_out = fcompute(
data,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=True,
)
s = fschedule(out)
indices_s = fschedule(indices_out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_indices = tvm.nd.array(np_indices, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f = tvm.build(s, [data, valid_count, indices, out], target)
f(tvm_data, tvm_valid_count, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_result, rtol=1e-4)
tvm_indices_out = tvm.nd.array(np.zeros(indices_dshape, dtype="int32"), dev)
f = tvm.build(indices_s, [data, valid_count, indices, indices_out[0]], target)
f(tvm_data, tvm_valid_count, tvm_indices, tvm_indices_out)
tvm.testing.assert_allclose(tvm_indices_out.numpy(), np_indices_result, rtol=1e-4)
def test_non_max_suppression(target, dev):
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 2, 3, 4]]).astype("int32")
max_output_size = -1
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[3, 0, -1, -1, -1]])
verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
0.7,
True,
2,
2,
1,
0,
)
np_data = np.array(
[
[
[0.8, 1, 20, 25, 45],
[0.7, 30, 60, 50, 80],
[0.4, 4, 21, 19, 40],
[0.9, 35, 61, 52, 79],
[0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 2, 3, 4]]).astype("int32")
max_output_size = 2
np_result = np.array(
[
[
[0.9, 35, 61, 52, 79],
[0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[3, 0, -1, -1, -1]])
verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
0.7,
False,
2,
1,
0,
-1,
)
class TestMultiboxPrior:
dshape, sizes, ratios, steps, offsets, clip = tvm.testing.parameters(
((1, 3, 50, 50), (1,), (1,), (-1, -1), (0.5, 0.5), False),
((1, 3, 224, 224), (0.5, 0.25, 0.1), (1, 2, 0.5), (-1, -1), (0.5, 0.5), False),
((1, 32, 32, 32), (0.5, 0.25), (1, 2), (2, 2), (0.5, 0.5), True),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dtype, dshape, sizes, ratios, offsets, steps, clip):
in_height = dshape[2]
in_width = dshape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
out_shape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
np_in = np.random.uniform(size=dshape).astype(dtype)
np_out = np.zeros(out_shape).astype(dtype)
for i in range(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = (
size_ratio_concat[k] * in_height / in_width / 2.0
if k < num_sizes
else size_ratio_concat[0]
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0
)
h = (
size_ratio_concat[k] / 2.0
if k < num_sizes
else size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
np_out[0][count][0] = center_w - w
np_out[0][count][1] = center_h - h
np_out[0][count][2] = center_w + w
np_out[0][count][3] = center_h + h
if clip:
np_out = np.clip(np_out, 0, 1)
return np_in, np_out
def test_multibox_prior(
self, target, dev, dtype, dshape, ref_data, sizes, ratios, steps, offsets, clip
):
np_in, np_out = ref_data
data = te.placeholder(dshape, name="data", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _multibox_prior_implement)
with tvm.target.Target(target):
out = fcompute(data, sizes, ratios, steps, offsets, clip)
s = fschedule(out)
tvm_input_data = tvm.nd.array(np_in, dev)
tvm_out = tvm.nd.array(np.zeros(np_out.shape, dtype=dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_input_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-3)
def test_multibox_detection(target, dev):
batch_size = 1
num_anchors = 3
num_classes = 3
cls_prob = te.placeholder((batch_size, num_anchors, num_classes), name="cls_prob")
loc_preds = te.placeholder((batch_size, num_anchors * 4), name="loc_preds")
anchors = te.placeholder((1, num_anchors, 4), name="anchors")
# Manually create test case
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]])
np_loc_preds = np.array([[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]])
np_anchors = np.array([[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]])
expected_np_out = np.array(
[
[
[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292],
]
]
)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _multibox_detection_implement)
with tvm.target.Target(target):
out = fcompute(cls_prob, loc_preds, anchors)
s = fschedule(out)
tvm_cls_prob = tvm.nd.array(np_cls_prob.astype(cls_prob.dtype), dev)
tvm_loc_preds = tvm.nd.array(np_loc_preds.astype(loc_preds.dtype), dev)
tvm_anchors = tvm.nd.array(np_anchors.astype(anchors.dtype), dev)
tvm_out = tvm.nd.array(np.zeros((batch_size, num_anchors, 6)).astype(out.dtype), dev)
f = tvm.build(s, [cls_prob, loc_preds, anchors, out], target)
f(tvm_cls_prob, tvm_loc_preds, tvm_anchors, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), expected_np_out, rtol=1e-4)
class TestRoiAlign:
(
batch,
in_channel,
in_size,
num_roi,
pooled_size,
spatial_scale,
sample_ratio,
mode,
) = tvm.testing.parameters(
(1, 16, 32, 64, 7, 1.0, -1, 0),
(4, 16, 32, 64, 7, 0.5, 2, 0),
(1, 32, 32, 80, 8, 0.0625, 2, 0),
(1, 32, 500, 80, 8, 0.0625, 2, 0),
(1, 16, 32, 64, 7, 1.0, -1, 1),
(4, 16, 32, 64, 7, 0.5, 2, 1),
(1, 32, 32, 80, 8, 0.0625, 2, 1),
(1, 32, 500, 80, 8, 0.0625, 2, 1),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self,
batch,
in_channel,
in_size,
num_roi,
pooled_size,
spatial_scale,
sample_ratio,
mode,
):
a_shape = (batch, in_channel, in_size, in_size)
rois_shape = (num_roi, 5)
a_np = np.random.uniform(-1, 1, size=a_shape).astype("float32")
rois_np = np.random.uniform(-1, 1, size=rois_shape).astype("float32") * in_size
rois_np[:, 0] = np.random.randint(low=0, high=batch, size=num_roi)
b_np = tvm.topi.testing.roi_align_nchw_python(
a_np,
rois_np,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
return a_np, rois_np, b_np
def test_roi_align(
self,
target,
dev,
ref_data,
pooled_size,
spatial_scale,
sample_ratio,
mode,
):
# For mode, 0 = avg, 1 = max
a_np, rois_np, b_np = ref_data
a = te.placeholder(a_np.shape)
rois = te.placeholder(rois_np.shape)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _roi_align_implement)
b = fcompute(
a,
rois,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
s = fschedule(b)
tvm_a = tvm.nd.array(a_np, dev)
tvm_rois = tvm.nd.array(rois_np, dev)
tvm_b = tvm.nd.array(np.zeros(b_np.shape, dtype=b.dtype), device=dev)
f = tvm.build(s, [a, rois, b], target)
f(tvm_a, tvm_rois, tvm_b)
tvm_val = tvm_b.numpy()
tvm.testing.assert_allclose(tvm_val, b_np, rtol=1e-3, atol=1e-4)
class TestRoiPool:
batch, in_channel, in_size, num_roi, pooled_size, spatial_scale = tvm.testing.parameters(
(1, 4, 16, 32, 7, 1.0),
(4, 4, 16, 32, 7, 0.5),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, batch, in_channel, in_size, num_roi, pooled_size, spatial_scale):
a_shape = (batch, in_channel, in_size, in_size)
rois_shape = (num_roi, 5)
a_np = np.random.uniform(size=a_shape).astype("float32")
rois_np = np.random.uniform(size=rois_shape).astype("float32") * in_size
rois_np[:, 0] = np.random.randint(low=0, high=batch, size=num_roi).astype("float32")
b_np = tvm.topi.testing.roi_pool_nchw_python(
a_np, rois_np, pooled_size=pooled_size, spatial_scale=spatial_scale
)
return a_np, rois_np, b_np
def test_roi_pool(self, target, dev, ref_data, pooled_size, spatial_scale):
a_np, rois_np, b_np = ref_data
a = te.placeholder(a_np.shape)
rois = te.placeholder(rois_np.shape)
with tvm.target.Target(target):
b = topi.vision.rcnn.roi_pool_nchw(
a, rois, pooled_size=pooled_size, spatial_scale=spatial_scale
)
s_func = tvm.topi.testing.dispatch(target, _roi_pool_schedule)
s = s_func(b)
tvm_a = tvm.nd.array(a_np, dev)
tvm_rois = tvm.nd.array(rois_np, dev)
tvm_b = tvm.nd.array(np.zeros(b_np.shape, dtype=b.dtype), device=dev)
f = tvm.build(s, [a, rois, b], target)
f(tvm_a, tvm_rois, tvm_b)
tvm.testing.assert_allclose(tvm_b.numpy(), b_np, rtol=1e-4)
def verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
cls_prob = te.placeholder(np_cls_prob.shape)
bbox_pred = te.placeholder(np_bbox_pred.shape)
im_info = te.placeholder(np_im_info.shape)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _proposal_implement)
out = fcompute(cls_prob, bbox_pred, im_info, **attrs)
s = fschedule(out)
f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], target)
tvm_cls_prob = tvm.nd.array(np_cls_prob, device=dev)
tvm_bbox_pred = tvm.nd.array(np_bbox_pred, device=dev)
tvm_im_info = tvm.nd.array(np_im_info, device=dev)
tvm_out = tvm.nd.empty(device=dev, shape=out.shape, dtype=out.dtype)
f(tvm_cls_prob, tvm_bbox_pred, tvm_im_info, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-4)
@tvm.testing.known_failing_targets("vulkan")
def test_proposal(target, dev):
attrs = {
"scales": (0.5,),
"ratios": (0.5,),
"feature_stride": 16,
"iou_loss": False,
"rpn_min_size": 16,
"threshold": 0.7,
"rpn_pre_nms_top_n": 200,
"rpn_post_nms_top_n": 4,
}
np_cls_prob = np.array(
[
[
[[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],
[[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]],
]
],
dtype="float32",
)
np_bbox_pred = np.array(
[
[
[[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],
[[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],
[[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],
[[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],
]
],
dtype="float32",
)
np_im_info = np.array([[48.0, 48.0, 1.0]], dtype="float32")
np_out = np.array(
[
[0.0, 0.0, 2.8451548, 28.38012, 18.154846],
[0.0, 0.0, 15.354933, 41.96971, 41.245064],
[0.0, 18.019852, 1.0538368, 51.98015, 25.946163],
[0.0, 27.320923, -1.266357, 55.0, 24.666357],
],
dtype="float32",
)
verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
np_out = np.array(
[
[0.0, -5.25, -2.5, 21.75, 19.0],
[0.0, 11.25, -2.0, 37.25, 18.5],
[0.0, 26.849998, -2.3000002, 53.45, 18.6],
[0.0, -4.95, 13.799999, 22.25, 35.5],
],
dtype="float32",
)
attrs["iou_loss"] = True
verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
def verify_all_class_non_max_suppression(
target,
dev,
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected_indices,
):
dshape = boxes_np.shape
batch, num_boxes, _ = dshape
_, num_class, _ = scores_np.shape
boxes = te.placeholder(dshape, name="boxes")
scores = te.placeholder(scores_np.shape, dtype="float32", name="scores")
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _all_class_nms_implement)
out = fcompute(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
s = fschedule(out)
tvm_boxes = tvm.nd.array(boxes_np, dev)
tvm_scores = tvm.nd.array(scores_np, dev)
selected_indices = tvm.nd.array(np.zeros((batch * num_class * num_boxes, 3), "int64"), dev)
num_detections = tvm.nd.array(np.zeros((1,), "int64"), dev)
f = tvm.build(s, [boxes, scores, out[0], out[1]], target)
f(tvm_boxes, tvm_scores, selected_indices, num_detections)
tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]]
np.testing.assert_equal(tvm_res, expected_indices)
def test_all_class_non_max_suppression(target, dev):
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.0
expected = np.array(
[[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 2], [1, 0, 4], [1, 0, 1], [1, 1, 4], [1, 1, 1]]
)
verify_all_class_non_max_suppression(
target,
dev,
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = 3
iou_threshold = 0.5
score_threshold = 0.4
expected = np.array([[0, 0, 3], [0, 0, 0]])
verify_all_class_non_max_suppression(
target,
dev,
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_aot_legalize_packed_call.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func
def tvm_test_cpacked(
A: T.handle, B: T.handle, C: T.handle, device_context: T.handle
) -> T.handle:
A_0 = T.match_buffer(A, (1,), dtype="float32")
T.preflattened_buffer(A_0, (1,), dtype="float32")
B_0 = T.match_buffer(B, (1,), dtype="float32")
T.preflattened_buffer(B_0, (1,), dtype="float32")
C_0 = T.match_buffer(C, (1,), dtype="float32")
T.preflattened_buffer(C_0, (1,), dtype="float32")
T.evaluate(C)
@T.prim_func
def tir_packed_call() -> None:
A = T.var("handle")
B = T.var("handle")
C = T.var("handle")
device_context = T.var("handle")
# body
T.evaluate(
T.tvm_call_cpacked(
"tvm_test_cpacked",
A,
B,
C,
device_context,
dtype="int32",
)
)
@tvm.script.ir_module
class Expected:
@T.prim_func
def tvm_test_cpacked(
A: T.handle, B: T.handle, C: T.handle, device_context: T.handle
) -> T.handle:
A_0 = T.match_buffer(A, (1,), dtype="float32")
T.preflattened_buffer(A_0, (1,), dtype="float32")
B_0 = T.match_buffer(B, (1,), dtype="float32")
T.preflattened_buffer(B_0, (1,), dtype="float32")
C_0 = T.match_buffer(C, (1,), dtype="float32")
T.preflattened_buffer(C_0, (1,), dtype="float32")
T.evaluate(C)
@T.prim_func
def tir_packed_call() -> None:
A = T.var("handle")
B = T.var("handle")
C = T.var("handle")
device_context = T.var("handle")
# body
T.evaluate(
T.tvm_call_cpacked(
"tvm_test_cpacked",
T.tvm_stack_make_array(
A,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
T.tvm_stack_make_array(
B,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
T.tvm_stack_make_array(
C,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
device_context,
dtype="int32",
)
)
def test_aot_packed_call():
mod = Module
expected = Expected
out = tir.transform.LegalizePackedCalls()(mod)
tvm.ir.assert_structural_equal(expected, out, map_free_vars=True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_canonical_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
class CanonicalChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, expected):
res = self.analyzer.canonical_simplify(data)
expected = tvm.runtime.convert(expected)
assert tvm.ir.structural_equal(res, expected), "\ndata={}\nres={}\nexpected={}".format(
data, res, expected
)
def test_mul_sum_simplify():
ck = CanonicalChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck.verify(2 + (3 * x + z + y + 1) * 4 + x, x * 13 + z * 4 + y * 4 + 6)
ck.verify(x * 3 - 4 * x + 1, 1 - x)
ck.verify(y + x * 3 - 5 * x + 1 + y, y * 2 + 1 - x * 2)
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# trucdiv
ck.verify(tdiv(x + y + x + y * 3, 2), y * 2 + x)
ck.verify(tmod(x + y + x + y * 3, 2), 0)
# floordiv
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck.verify(flm(x + x + y * 3, 2), flm(y * 3, 2))
ck.verify(fld(x + y + x + y * 3, 2), y * 2 + x)
ck.verify(flm(x + y + x + y * 3, 2), 0)
ck.verify(fld(x + x + y * 3, 2), fld(y * 3, 2) + x)
def test_split_index_simplify():
ck = CanonicalChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
# trucdiv
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# split div const
ck.verify(tdiv(x, 3) * 3 + tmod(x, 3), x)
ck.verify(tdiv(x, 6) * 6 + tmod(tdiv(x, 3), 2) * 3 + tmod(x, 3), x)
ck.verify(tdiv(tdiv(tmod(x, 16), 2) * 2, 4), tdiv(tmod(x, 16), 4))
ck.verify(tdiv(tmod(x, 2), 8), 0)
ck.verify(tdiv(tmod(x, 2), 7), 0)
ck.verify(tdiv(tdiv(tmod(x, 16), 2) * 2, 6), tdiv(tmod(x, 16), 6))
# split mod const
ck.verify(tmod((x * 8), 16), tmod(x, 2) * 8)
ck.verify(tmod(x * 8, 2), 0)
# simplify then fold
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000))
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1000))
ck.verify(tdiv(x * 4 + y, 2) * 2 + tmod(x * 4 + y, 2), x * 4 + y)
# complex fold
ck.verify(tdiv(z * 9 + y, 2) * 2 + tmod(z * 9 + y, 2), z * 9 + y)
ck.analyzer.update(x, tvm.arith.ConstIntBound(-100, 1000), True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(-100, 1000), True)
ck.verify(tdiv(x * 4 + y, 2) * 2 + tmod(x * 4 + y, 2), x * 4 + y)
# floordiv
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck.verify(fld(x * 5, 2), fld(x * 5, 2))
ck.verify(fld(x, 3) * 3 + flm(x, 3), x)
ck.verify(fld(x, 6) * 6 + flm(fld(x, 3), 2) * 3 + flm(x, 3), x)
ck.verify(fld(fld(flm(x, 16), 2) * 2, 4), fld(flm(x, 16), 4))
ck.verify(fld(flm(x, 2), 8), 0)
ck.verify(fld(flm(x, 2), 7), 0)
ck.verify(fld(fld(flm(x, 16), 2) * 2, 6), fld(flm(x, 16), 6))
# cannot simplify mixed case, unless we canonicalize into one mode.
ck.verify(tdiv(x, 6) * 2 + tmod(fld(x, 3), 2), tdiv(x, 6) * 2 + tmod(fld(x, 3), 2))
ck.verify(tmod(-x, 2), tmod(x, -2) * -1)
def test_div_simplify():
ck = CanonicalChecker()
x = te.var("x")
tdiv = tvm.tir.truncdiv
# truc div
ck.verify(tdiv(16 + 48 * x, 16), x * 3 + 1)
# (17+48*x)/16 is not simplifiable for arbitrary x because when 17+48*x<0
# (17+48*x)/16 != 1+3*x
ck.verify(tdiv(17 + 48 * x, 16), tdiv(x * 48 + 17, 16))
# However, when x >= 0, then 17+48*x >= 0 and (17+48*x)/16 can be simplified
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 10))
ck.verify(tdiv(17 + 48 * x, 16), x * 3 + 1)
# Trying expressions that are not simplifiable for any values of the variables
ck.verify(tdiv(17 + 47 * x, 16), tdiv(x * 47 + 17, 16))
# floordiv
fld = tvm.te.floordiv
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1000, 10000), True)
ck.verify(fld(16 + 48 * x, 16), x * 3 + 1)
ck.verify(fld(17 + 48 * x, 16), x * 3 + 1)
ck.verify(fld(17 + 47 * x, 16), fld(x * 47 + 17, 16))
def test_floormod_simplify():
ck = CanonicalChecker()
flm = tvm.te.floormod
x, y = te.var("x"), te.var("y")
ck.verify(flm(flm((x * 4) + y - 466036, 24528) - 24512, 16), flm((x * 4) + y + 12, 16))
ck.verify(flm(flm((x * 4), 16), 8), flm(x, 2) * 4)
ck.verify(flm(-x, 2), flm(x, -2) * -1)
def test_canonical_mixed():
ck = CanonicalChecker()
x = te.var("x")
z = tvm.tir.const(3, "int32")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
ck.verify(tdiv(x, (z * z)) - tdiv(x, (z * z)), 0)
ck.verify(tdiv(x, (z + z)) - tdiv(x, (z + z)), 0)
ck.verify(x - 2 < 3, x < 5)
ck.verify(tvm.te.max(x, 1) - tvm.te.max(x, 1), 0)
ck.verify(tvm.te.min(x, 1) - tvm.te.min(x, 1), 0)
ck.verify(x * x - x * x, 0)
ck.verify(tmod(tdiv(tmod(x, 20), 2) * 2, 4), tdiv(tmod(x, 4), 2) * 2)
fld = tvm.te.floordiv
ck.verify(fld(x, (z * z)) - fld(x, (z * z)), 0)
ck.verify(fld(x, (z + z)) - fld(x, (z + z)), 0)
def test_reduce_combiner_simplify():
ck = CanonicalChecker()
dummy = te.var("dummy")
comm_reducer = te.comm_reducer
prod = comm_reducer(lambda x, y: x * y, lambda t0: tvm.tir.const(1, t0))
sum_or_prod = comm_reducer(
lambda x, y: tvm.tir.Select(dummy < 0, x + y, x * y),
lambda t0: tvm.tir.Select(dummy < 0, tvm.tir.const(0, t0), tvm.tir.const(1, t0)),
)
sum_and_prod = comm_reducer(
lambda x, y: (x[0] + y[0], x[1] * y[1]),
lambda t0, t1: (tvm.tir.const(0, t0), tvm.tir.const(5, t1) - tvm.tir.const(4, t1)),
)
some_reducer1 = comm_reducer(
lambda x, y: (
x[0] + y[0],
x[0] + y[0] + x[1] + y[1],
x[0] * y[2] + y[0] * x[2],
x[1] + y[2],
4.0,
),
lambda t0, t1, t2, t3, t4: (
tvm.tir.const(0, t0),
tvm.tir.const(1, t1),
tvm.tir.const(2, t2),
tvm.tir.const(3, t3),
tvm.tir.const(4, t4),
),
)
k = te.reduce_axis((0, 10), name="k")
A = te.placeholder((10,), name="A")
# Test that SimplifyCombiner makes use of vranges
ck.analyzer.update(dummy, tvm.arith.ConstIntBound(-10, -4))
ck.verify(sum_or_prod(A[k], k), te.sum(A[k], k))
ck.verify(sum_or_prod(A[k], k, init=1), te.sum(A[k], k, init=1))
ck.analyzer.update(dummy, tvm.arith.ConstIntBound(5, 9), True)
ck.verify(sum_or_prod(A[k], k), prod(A[k], k))
ck.verify(sum_or_prod(A[k], k, init=1), prod(A[k], k, init=1))
ck.analyzer.update(dummy, tvm.arith.ConstIntBound(-10, 100), True)
ck.verify(sum_and_prod((A[k], A[10 - k]), k)[0], te.sum(A[k], k))
ck.verify(sum_and_prod((A[k], A[10 - k]), k)[1], prod(A[10 - k], k))
reference_simplified_sources = [
[A[0]],
[A[0], A[1]],
[A[0], A[2]],
[A[0], A[1], A[2], A[3]],
[A[4]],
]
for j in range(5):
# Here we use the j-th component of the result, so only it and the components it
# depends on are left.
simplified = ck.analyzer.canonical_simplify(
some_reducer1((A[0], A[1], A[2], A[3], A[4]), k)[j]
)
# Check that the remaining components are the expected ones.
for lhs, rhs in zip(simplified.source, reference_simplified_sources[j]):
assert tvm.ir.structural_equal(lhs, rhs)
# Test that components with side effects are not removed
dummy = tvm.ir.GlobalVar("dummy")
side_effect = lambda *xs: tvm.tir.Call("int32", dummy, xs)
ck.verify(
sum_and_prod((A[k], side_effect(A[10 - k])), k)[0],
sum_and_prod((A[k], side_effect(A[10 - k])), k)[0],
)
ck.verify(sum_and_prod((side_effect(A[k]), A[10 - k]), k)[0], te.sum(side_effect(A[k]), k))
def test_reduce_simplify():
ck = CanonicalChecker()
k = te.reduce_axis((0, 10), name="k")
j = te.reduce_axis((-5, 3), name="j")
A = te.placeholder((10,), name="A")
ck.verify(te.sum(tvm.tir.Select(k + j < 12, k + j, 0), [k, j]), te.sum(k + j, [k, j]))
ck.verify(te.sum(A[3], []), A[3])
ck.verify(te.sum(A[3], [], where=k > 12, init=1.0), tvm.tir.const(1.0, dtype="float32"))
# The rule below is not typical, removed for now
ck.verify(te.sum(te.div(k, 10), k), te.sum(tvm.tir.const(0, "int32"), k))
def test_simplify_if_then_else():
ck = CanonicalChecker()
x = te.var("x")
y = te.var("y")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# simplification that takes condition into account.
res = tvm.tir.if_then_else(
(x * 4 + y) >= 466036,
tvm.tir.if_then_else(
24512 <= tmod(((x * 4) + y) - 466036, 24528),
tmod(tmod(((x * 4) + y) - 466036, 24528) - 24512, 16),
x,
),
y,
)
res2 = tvm.tir.if_then_else(
(x * 4) >= 466036 - y,
tvm.tir.if_then_else(
24512 <= tmod(((x * 4) + y) - 466036, 24528),
tmod(tmod(((x * 4) + y) - 466036, 24528) - 24512, 16),
x,
),
y,
)
expected = tvm.tir.if_then_else(
tvm.tir.LE(466036, (x * 4 + y)),
tvm.tir.if_then_else(
tvm.tir.LE(24512, tmod(((x * 4) + y) - 4, 24528)), tmod(((x * 4) + y) - 4, 16), x
),
y,
)
ck.verify(res, expected)
ck.verify(res2, expected)
# can only simplify if condition
res = tvm.tir.Select(tvm.tir.all(x >= -1, y >= 0), tmod(x + y + 100, 3), tmod(x + 100, 3))
expected = tvm.tir.Select(tvm.tir.all(x >= -1, y >= 0), tmod(x + y + 1, 3), tmod(x + 100, 3))
ck.verify(res, ck.analyzer.canonical_simplify(expected))
res = tvm.tir.Select(x >= 10, tvm.tir.if_then_else(tdiv(x, 3) > 2, x, 0), 0)
expected = tvm.tir.Select(x >= 10, x, 0)
ck.verify(res, ck.analyzer.canonical_simplify(expected))
res = tvm.tir.Select(x >= 10, tvm.tir.if_then_else(tdiv(x, 3) < 2, x, 0), 0)
ck.verify(res, 0)
def test_complex_cases():
ck = CanonicalChecker()
x = te.var("x")
y = te.var("y")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
res2 = (
tdiv(tdiv(tmod(x * 128 + y, 1296), 36) * 2 + 1, 2) * 36
+ tdiv(tmod((x * 128) + y, 36) * 2 + 1, 2)
- tmod((x * 128) + y, 1296)
+ 1
)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 5))
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 127))
ck.verify(res2, 1)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1024), True)
res3 = (
tdiv(x * 1024 + y, 65536)
+ tdiv(tmod(x * 1024 + y, 65536), 256)
+ tdiv(tmod(x * 1024 + y, 256), 16)
+ tmod(x * 1024 + y, 16)
- tdiv(y, 256)
- tdiv(tmod(y, 256), 16)
- tmod(y, 16)
- (x * 4)
)
ck.verify(res3, tdiv((x * 1024) + y, 256) - tdiv(y, 256) - (x * 4))
def test_simplify_cast():
ck = CanonicalChecker()
tcast = tvm.tir.Cast
fld = tvm.te.floordiv
flm = tvm.te.floormod
# cast(i64, i + j + 1) - cast(i64, i)
i = te.var("i", dtype="int32")
j = te.var("j", dtype="int32")
res = tcast("int64", i + j + 1) - tcast("int64", i)
ck.verify(res, tcast("int64", j) + tvm.tir.const(1, "int64"))
# cast(i32, i + j + 1) - cast(i32, i)
i = te.var("i", dtype="int64")
j = te.var("j", dtype="int64")
ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 10))
ck.analyzer.update(j, tvm.arith.ConstIntBound(0, 10))
res = tcast("int32", i + j + 1) - tcast("int32", i)
ck.verify(res, tcast("int32", j) + 1)
# cast(i32, i + j - 100)
i = te.var("i", dtype="int64")
j = te.var("j", dtype="int64")
ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 2**31 - 1))
ck.analyzer.update(j, tvm.arith.ConstIntBound(0, 10))
res = tcast("int32", i + j - 100)
ck.verify(res, res)
# cast(i32, flm(axis, 7i64) * 2i64 + 1i64) + 1i32
# - cast(i32, flm(axis, 7i64) * 2i64)
axis = te.var("axis", dtype="int64")
ck.analyzer.update(axis, tvm.arith.ConstIntBound(0, 42))
res = (
tcast(
"int32",
flm(axis, tvm.tir.const(7, "int64")) * tvm.tir.const(2, "int64")
+ tvm.tir.const(1, "int64"),
)
+ tvm.tir.const(1, "int32")
- tcast("int32", flm(axis, tvm.tir.const(7, "int64")) * tvm.tir.const(2, "int64"))
)
ck.verify(res, 2)
if __name__ == "__main__":
test_floormod_simplify()
test_mul_sum_simplify()
test_simplify_if_then_else()
test_div_simplify()
test_reduce_simplify()
test_reduce_combiner_simplify()
test_split_index_simplify()
test_canonical_mixed()
test_complex_cases()
test_simplify_cast()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_const_int_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_dtype_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int64")
bd = analyzer.const_int_bound(x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
x = te.var("x", dtype="int8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == -128
assert bd.max_value == 127
x = te.var("x", dtype="uint8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == 0
assert bd.max_value == 255
def test_cast_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int8")
tmod = tvm.tir.truncmod
bd = analyzer.const_int_bound(tmod(x, 3).astype("uint32"))
assert bd.min_value == 0
assert bd.max_value == 2
bd = analyzer.const_int_bound(tmod(x, 3).astype("float32").astype("int32"))
assert bd.min_value == -2
assert bd.max_value == 2
def test_add_sub_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x", "int64"), te.var("y", "int64")
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(0, 4))
analyzer.update(y, tvm.arith.ConstIntBound(1, 10))
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == 1
assert bd.max_value == 14
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == 3
analyzer.update(x, tvm.arith.ConstIntBound(0, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == bd.POS_INF
bd = analyzer.const_int_bound(1 - x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 1
## constants with negative or positive max(int64) occassionally show up
## in models, this is to ensure we can handle those cases
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.NEG_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(bd.POS_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_mul_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-2, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(x * y + 20)
assert bd.min_value == 0
assert bd.max_value == 60
analyzer.update(x, tvm.arith.ConstIntBound(-3, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == -32
assert bd.max_value == 24
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_truncdiv_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -2
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 0), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -4
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 1), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-4, 12), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
def test_truncmod_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == -9
assert bd.max_value == 4
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
def test_floordiv_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
fld = tvm.te.floordiv
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -9 // 4
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 0), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -4
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 1), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-4, 12), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
# Test handling unsigned integers well
x, y = te.var("x", dtype="uint32"), te.var("y", dtype="uint32")
analyzer.update(x, tvm.arith.ConstIntBound(1, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(0, 12), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == 0
assert bd.max_value == 4
def test_floormod_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
flm = tvm.te.floormod
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
def test_min_max_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tvm.te.min(x, y))
assert bd.min_value == -9
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.te.min(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 10
bd = analyzer.const_int_bound(tvm.te.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.te.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
def test_select_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tvm.tir.Select(x > 1, (y < 0).astype("int32"), y + 1))
assert bd.min_value == 0
assert bd.max_value == 11
def test_shift_and_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(2, 10))
bd = analyzer.const_int_bound(x >> y)
assert bd.min_value == -3
assert bd.max_value == 2
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(10, 11), override=True)
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
def test_mix_index_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
analyzer.update(x, tvm.arith.ConstIntBound(0, 24 - 1))
analyzer.update(y, tvm.arith.ConstIntBound(0, 3 - 1))
bd = analyzer.const_int_bound(tmod(x, 8) + tdiv(x, 8) * 8)
assert bd.min_value == 0
assert bd.max_value == 24 - 1
bd = analyzer.const_int_bound(y + x * 3)
assert bd.min_value == 0
assert bd.max_value == 24 * 3 - 1
bd = analyzer.const_int_bound(tmod(x, 7) + tdiv(x, 7) * 7)
assert bd.min_value == 0
assert bd.max_value == (23 // 7) * 7 + 6
def test_size_var_bound():
analyzer = tvm.arith.Analyzer()
x = te.size_var("x")
bd = analyzer.const_int_bound(x)
assert bd.min_value == 0
assert bd.max_value == bd.POS_INF
def test_let_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
bd = analyzer.const_int_bound(tvm.tir.Let(x, 1, x + 1))
assert bd.min_value == 2
assert bd.max_value == 2
def test_floormod_negative_divisor():
analyzer = tvm.arith.Analyzer()
flm, fld = tvm.te.floormod, tvm.te.floordiv
a, b = te.var("a"), te.var("b")
analyzer.update(a, tvm.arith.ConstIntBound(0, 6))
analyzer.update(b, tvm.arith.ConstIntBound(-5, 7))
bd = analyzer.const_int_bound(flm(a, b))
assert bd.min_value == -4
assert bd.max_value == 6
def test_multiple_condition():
analyzer = tvm.arith.Analyzer()
flm, fld = tvm.te.floormod, tvm.te.floordiv
a = te.var("a")
analyzer.update(a, tvm.arith.ConstIntBound(0, 128))
with analyzer.constraint_scope(tvm.tir.all(1 <= flm(a, 58), flm(a, 58) < 57)):
bound = analyzer.const_int_bound(flm(a, 58) - 1)
assert bound.min_value == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_deduce_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.tir.buffer import decl_buffer
def test_deduce():
a = te.var("a")
b = te.var("b")
c = te.var("c")
d = te.var("d")
b_s = tvm.arith.IntervalSet(2, 3)
c_s = tvm.arith.IntervalSet(10, 15)
d_s = tvm.arith.IntervalSet(-3, -1)
zero = tvm.tir.const(0, "int32")
fdiv = tvm.te.floordiv
e0 = (-b) * a + c - d
res0 = tvm.arith.deduce_bound(a, e0 >= 0, {b: b_s, c: c_s, d: d_s}, {})
ans0 = fdiv(d - c, b * -1)
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
# expression containing variable a is on rhs
res0 = tvm.arith.deduce_bound(a, zero <= e0, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
e0 = d * a + c - d
res0 = tvm.arith.deduce_bound(a, e0 >= 0, {b: b_s, c: c_s, d: d_s}, {})
ans0 = fdiv(d - c, d)
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
# expression containing variable a is on rhs
res0 = tvm.arith.deduce_bound(a, zero <= e0, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
e1 = a * 4 + b < c
res1 = tvm.arith.deduce_bound(a, e1, {b: b_s, c: c_s, d: d_s}, {})
ans1 = fdiv(c - 1 - b, 4)
tvm.testing.assert_prim_expr_equal(res1.max_value, ans1)
# expression containing variable a is on rhs
e1 = c > a * 4 + b
res1 = tvm.arith.deduce_bound(a, e1, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res1.max_value, ans1)
e2 = tvm.te.max(5, a * 4) < 0
res2 = tvm.arith.deduce_bound(a, e2, {b: b_s, c: c_s, d: d_s}, {})
assert str(res2.max_value) == "neg_inf: handle"
assert str(res2.min_value) == "pos_inf: handle"
# expression containing variable a is on rhs
e2 = zero < tvm.te.max(5, a * 4)
res2 = tvm.arith.deduce_bound(a, e2, {b: b_s, c: c_s, d: d_s}, {})
assert str(res2.max_value) == "neg_inf: handle"
assert str(res2.min_value) == "pos_inf: handle"
e3 = (-b) + a * c - d
res3 = tvm.arith.deduce_bound(a, e3 >= 0, {b: b_s, c: c_s, d: d_s}, {b: b_s, d: d_s})
ans3 = fdiv(2, c) + 1
tvm.testing.assert_prim_expr_equal(res3.min_value, ans3)
res3 = tvm.arith.deduce_bound(a, zero <= e3, {b: b_s, c: c_s, d: d_s}, {b: b_s, d: d_s})
tvm.testing.assert_prim_expr_equal(res3.min_value, ans3)
# tests for `EQ` op
res4 = tvm.arith.deduce_bound(a, a == b, {}, {})
tvm.testing.assert_prim_expr_equal(res4.max_value, b)
tvm.testing.assert_prim_expr_equal(res4.min_value, b)
# Unsatisfiable `EQ`, variable as one of the Operand
res5 = tvm.arith.deduce_bound(a, (a == b), {b: b_s}, {b: b_s})
assert str(res5.max_value) == "neg_inf: handle"
assert str(res5.min_value) == "pos_inf: handle"
# variable `a` on the RHS side
res6 = tvm.arith.deduce_bound(a, 10 == a, {}, {})
tvm.testing.assert_prim_expr_equal(res6.max_value, 10)
tvm.testing.assert_prim_expr_equal(res6.min_value, 10)
# Add, Sub in `EQ`
e4 = (a - c) == (b + d)
ans4 = b + d + c
res7 = tvm.arith.deduce_bound(a, e4, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res7.max_value, ans4)
tvm.testing.assert_prim_expr_equal(res7.min_value, ans4)
# Satisfiable Mul in `EQ` with negative sign
res8 = tvm.arith.deduce_bound(a, (5 * a == -10), {}, {})
tvm.testing.assert_prim_expr_equal(res8.max_value, -2)
tvm.testing.assert_prim_expr_equal(res8.min_value, -2)
# Unsatisfiable Mul in `EQ`
e5 = 4 * a == b
res9 = tvm.arith.deduce_bound(a, e5, {b: b_s}, {})
assert str(res9.max_value) == "neg_inf: handle"
assert str(res9.min_value) == "pos_inf: handle"
# Unsatisfiable Mul in `EQ`
res10 = tvm.arith.deduce_bound(
a, (b * a == b), {b: b_s}, {}
) # simplifier is not able to prove that (b % b == 0)
assert str(res10.max_value) == "neg_inf: handle"
assert str(res10.min_value) == "pos_inf: handle"
def test_check():
a = te.var("a")
b = te.var("b")
c = te.var("c")
d = te.var("d")
b_s = tvm.arith.IntervalSet(2, 3)
c_s = tvm.arith.IntervalSet(5, 7)
d_s = tvm.arith.IntervalSet(-3, -1)
# no compare operator
res1 = tvm.arith.deduce_bound(a, a + b, {b: b_s}, {})
assert res1.is_nothing()
# multiple compare operators
res2 = tvm.arith.deduce_bound(a, (a + b > 3).astype(c.dtype) > c, {b: b_s, c: c_s}, {})
assert res2.is_nothing()
# multiple target variable
res2 = tvm.arith.deduce_bound(a, a * 2 - a > b, {b: b_s}, {})
assert res2.is_nothing()
def test_deduce_basic():
def test_basic(a1, a2, coff):
a = te.var("a")
b = te.var("b")
b_s = tvm.arith.IntervalSet(a1, a2)
e0 = b + a * coff + 3
res1 = tvm.arith.deduce_bound(a, e0 < 17, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) < 17, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(17, "int32") < e0, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) > 17, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(17, "int32") >= e0, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) <= 17, True)
res1 = tvm.arith.deduce_bound(a, e0 >= 17, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) >= 17, True)
test_basic(0, 4, 4)
test_basic(1, 5, 4)
test_basic(2, 6, 4)
test_basic(0, 4, -4)
test_basic(1, 5, -4)
test_basic(2, 6, -4)
def test_deduce_complex():
def test_complex(a1, a2, coff):
a = te.var("a")
b = te.var("b")
b_s = tvm.arith.IntervalSet(a1, a2)
e0 = (b * 3 + a * coff) * 4
res1 = tvm.arith.deduce_bound(a, e0 < 63, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) < 63, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(63, "int32") >= e0, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) <= 63, True)
res1 = tvm.arith.deduce_bound(a, e0 > 63, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) > 63, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(63, "int32") <= e0, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) >= 63, True)
test_complex(0, 4, 4)
test_complex(0, 4, -4)
test_complex(2, 6, 4)
test_complex(0, 4, -4)
test_complex(1, 5, -4)
test_complex(2, 6, -4)
def test_deduce_non_support():
a = te.var("a")
def test_non_support(lhs):
res = tvm.arith.deduce_bound(a, lhs < 10, {}, {})
assert res.is_nothing()
test_non_support(tvm.tir.floordiv(a, 16))
test_non_support(tvm.tir.floormod(a, 16))
test_non_support(tvm.tir.Min(a, 16))
test_non_support(tvm.tir.Max(a, 16))
test_non_support(tvm.tir.LE(a, 16))
test_non_support(tvm.tir.LT(a, 16))
test_non_support(tvm.tir.GE(a, 16))
test_non_support(tvm.tir.GT(a, 16))
test_non_support(tvm.tir.EQ(a, 16))
test_non_support(tvm.tir.NE(a, 16))
test_non_support(tvm.tir.log(a))
test_non_support(tvm.tir.BufferLoad(decl_buffer([16], "int32"), [a]))
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_detect_clip_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_basic():
a = te.var("a")
b = te.var("b")
c = te.var("c")
m = tvm.arith.detect_clip_bound(tvm.tir.all(a * 1 < b * 6, a - 1 > 0), [a])
tvm.testing.assert_prim_expr_equal(m[1], b * 6 - 1)
assert m[0].value == 2
m = tvm.arith.detect_clip_bound(tvm.tir.all(a * 1 < b * 6, a - 1 > 0), [a, b])
assert len(m) == 0
m = tvm.arith.detect_clip_bound(tvm.tir.all(a + 10 * c <= 20, b - 1 > 0), [a, b])
tvm.testing.assert_prim_expr_equal(m[1], 20 - 10 * c)
tvm.testing.assert_prim_expr_equal(m[2], 2)
m = tvm.arith.detect_clip_bound(tvm.tir.all(tvm.tir.Not(a * 1 > b * 6), a - 1 > 0), [a])
tvm.testing.assert_prim_expr_equal(m[1], b * 6)
m = tvm.arith.detect_clip_bound(tvm.tir.all(tvm.tir.Min(a, b) > 3, a - 10 < 0), [a, b])
tvm.testing.assert_prim_expr_equal(m[0], 4)
tvm.testing.assert_prim_expr_equal(m[1], 9)
tvm.testing.assert_prim_expr_equal(m[2], 4)
if __name__ == "__main__":
test_basic()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_detect_linear_equation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_basic():
a = te.var("a")
b = te.var("b")
m = tvm.arith.detect_linear_equation(a * 4 + b * 6 + 7, [a])
assert m[0].value == 4
tvm.testing.assert_prim_expr_equal(m[1], b * 6 + 7)
m = tvm.arith.detect_linear_equation(a * 4 * (a + 1) + b * 6 + 7, [a])
assert len(m) == 0
m = tvm.arith.detect_linear_equation(a * 4 + (a + 1) + b * 6 + 7, [a])
assert m[0].value == 5
tvm.testing.assert_prim_expr_equal(m[1], b * 6 + 7 + 1)
m = tvm.arith.detect_linear_equation(a * b + 7, [a])
assert m[0] == b
m = tvm.arith.detect_linear_equation(b * 7, [a])
assert m[0].value == 0
m = tvm.arith.detect_linear_equation(b * 7, [])
assert len(m) == 1
tvm.testing.assert_prim_expr_equal(m[0], b * 7)
def test_multivariate():
v = [te.var("v%d" % i) for i in range(4)]
b = te.var("b")
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8, v)
tvm.testing.assert_prim_expr_equal(m[0], b + 5)
assert m[1].value == 8
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8 * v[2], v)
assert len(m) == 0
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8 * v[1] + v[3], v)
assert len(m) == 0
m = tvm.arith.detect_linear_equation(((v[0] * b + v[1]) * 8 + v[2] + 1) * 2, v)
assert m[1].value == 16
assert m[2].value == 2
assert m[len(m) - 1].value == 2
m = tvm.arith.detect_linear_equation((v[0] - v[1]), [v[2]])
assert m[0].value == 0
tvm.testing.assert_prim_expr_equal(m[1], v[0] - v[1])
m = tvm.arith.detect_linear_equation((v[0] - v[1]), [])
assert len(m) == 1
tvm.testing.assert_prim_expr_equal(m[0], v[0] - v[1])
if __name__ == "__main__":
test_basic()
test_multivariate()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_domain_touched.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
@T.prim_func
def scalar_func(a: T.handle, b: T.handle):
m = T.var("int32")
n = 100
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m))
for i, j in T.grid(n, m):
A[i, j] = B[i - 1, j + 1] + A[i - 1, j - 1]
@T.prim_func
def vector_func(a: T.handle, b: T.handle):
n = T.var("int32")
m = 128
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m))
for i in T.serial(n):
for j in T.vectorized(m):
A[i, j] = A[i, j] + B[i, j]
def test_domain_touched():
func = scalar_func
a, b = [func.buffer_map[var] for var in func.params]
ir = func.body
a_domain_r = tvm.arith._ffi_api.DomainTouched(ir, a, True, False)
assert a_domain_r[0].min.value == -1
assert a_domain_r[0].extent.value == 100
assert a_domain_r[1].min.value == -1
assert a_domain_r[1].extent.name == "m"
a_domain_w = tvm.arith._ffi_api.DomainTouched(ir, a, False, True)
assert a_domain_w[0].min.value == 0
assert a_domain_w[0].extent.value == 100
assert a_domain_w[1].min.value == 0
assert a_domain_w[1].extent.name == "m"
a_domain_rw = tvm.arith._ffi_api.DomainTouched(ir, a, True, True)
assert a_domain_rw[0].min.value == -1
assert a_domain_rw[0].extent.value == 101
assert a_domain_rw[1].min.value == -1
assert isinstance(a_domain_rw[1].extent, tvm.tir.Add)
assert a_domain_rw[1].extent.a.name == "m"
assert a_domain_rw[1].extent.b.value == 1
b_domain_r = tvm.arith._ffi_api.DomainTouched(ir, b, True, False)
assert b_domain_r
assert b_domain_r[0].min.value == -1
assert b_domain_r[0].extent.value == 100
assert b_domain_r[1].min.value == 1
assert b_domain_r[1].extent.name == "m"
b_domain_w = tvm.arith._ffi_api.DomainTouched(ir, b, False, True)
assert isinstance(b_domain_w, tvm.container.Array)
assert len(b_domain_w) == 0
def test_domain_touched_vector():
func = tvm.lower(vector_func)["main"]
a, b = [func.buffer_map[var] for var in func.params]
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, True)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, b, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, b, True, False)[0].extent.value == 128
if __name__ == "__main__":
test_domain_touched()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_intset.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm import tir
from tvm.arith.analyzer import Analyzer
class IntSetChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, dmap, expected):
res = self.analyzer.int_set(data, dmap)
def err_msg():
return "\ndata={}\ndmap={}\nres={}\nexpected={}".format(data, dmap, res, expected)
assert self.analyzer.can_prove_equal(res.min_value, expected[0]), err_msg()
assert self.analyzer.can_prove_equal(res.max_value, expected[1]), err_msg()
def test_basic():
s = tvm.arith.IntervalSet(2, 3)
assert s.min_value.value == 2
assert s.max_value.value == 3
s = tvm.arith.IntSet.single_point(2)
assert s.min_value.value == 2
assert s.max_value.value == 2
def test_vector():
base = 10
stride = 3
lanes = 2
s = tvm.arith.IntSet.vector(tvm.tir.Ramp(base, stride, lanes))
assert s.min_value.value == base
assert s.max_value.value == base + stride * (lanes - 1)
def test_add_sub():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10)}, (y, 10 + y))
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (1, 21))
ck.verify(x - y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (-11, 9))
def test_mul_div():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(x * y, {x: tvm.arith.IntervalSet(0, 10)}, (0, 10 * y))
ck.verify(x * 2, {x: tvm.arith.IntervalSet(1, 10)}, (2, 20))
ck.verify(x * -2, {x: tvm.arith.IntervalSet(1, 10)}, (-20, -2))
ck.verify(tdiv(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, tdiv(10, y)))
ck.verify(tdiv(x, 2), {x: tvm.arith.IntervalSet(1, 10)}, (0, 5))
fld = tvm.te.floordiv
ck.verify(fld(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, fld(10, y)))
ck.verify(fld(x, 2), {x: tvm.arith.IntervalSet(-1, 10)}, (-1, 5))
def test_mod():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(tmod(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, y - 1))
ck.verify(tmod(x, 10), {x: tvm.arith.IntervalSet(1, 10)}, (0, 9))
flm = tvm.te.floormod
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(-10, 10)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 5)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(13, 15)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 15)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 11)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(1, 21)}, (0, 9))
fld = tvm.te.floordiv
z = te.var("z")
ck.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 3))
ck.verify(
flm(y, 8),
{y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)},
(
z * 8 + x * 4 - 8 * fld(z * 8 + x * 4, 8),
z * 8 + x * 4 + 3 - 8 * fld(z * 8 + x * 4, 8),
),
)
ck1 = IntSetChecker()
ck1.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 2))
ck1.verify(
flm(y, 8), {y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)}, (x * 4, x * 4 + 3)
)
def test_max_min():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.te.max(x, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (1, 11))
ck.verify(tvm.te.min(x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 9))
ck.verify(tvm.te.min(x, y), {}, (tvm.te.min(x, y), tvm.te.min(x, y)))
ck.verify(tvm.te.max(x, y), {}, (tvm.te.max(x, y), tvm.te.max(x, y)))
def test_select():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.tir.Select(x > 0, x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 11))
def check_region_bound(expect_region, var_dom, mode, predicate=None):
"""Helper to check region bound estimation.
Parameters
----------
expect_region: dict
The keys are of form (begin, end) or PrimExpr as a single point. The values are
expected estimated region or region dict on different bindings.
var_dom: dict
Map var to iteration domain range.
mode: str
Specify "lowerbound", "upperbound" or else use strict bound estimation.
predicate: PrimExpr
Extra predicate, defaults to True.
"""
if predicate is None:
predicate = tvm.tir.IntImm("bool", 1)
region = []
expect = []
for k, v in expect_region.items():
if not isinstance(k, (tuple, list)):
k = (k, k + 1)
region.append(tvm.ir.Range.from_min_extent(k[0], Analyzer().simplify(k[1] - k[0])))
expect.append(v)
if mode == "lowerbound":
result = tvm.arith.estimate_region_lower_bound(
region=region, var_dom=var_dom, predicate=predicate
)
elif mode == "upperbound":
result = tvm.arith.estimate_region_upper_bound(
region=region, var_dom=var_dom, predicate=predicate
)
else:
result = tvm.arith.estimate_region_strict_bound(
region=region, var_dom=var_dom, predicate=predicate
)
if result is None:
assert all([_ is None for _ in expect])
return
assert len(result) == len(expect)
for intset, expect_desc in zip(result, expect):
if isinstance(expect_desc, dict):
# check range on different free var bindings
for binding in expect_desc:
analyzer = Analyzer()
for k, v in binding:
analyzer.bind(k, v)
expect_begin, expect_end = expect_desc[binding]
result_begin = analyzer.simplify(intset.min_value, 3)
result_end = analyzer.simplify(intset.max_value + 1, 3)
print(result_end)
assert analyzer.can_prove_equal(
result_begin - expect_begin, 0
), f"{result_begin} vs {expect_begin}"
assert analyzer.can_prove_equal(
result_end - expect_end, 0
), f"{result_end} vs {expect_end}"
else:
# check range
expect_begin, expect_end = expect_desc
analyzer = Analyzer()
assert analyzer.can_prove_equal(
intset.min_value - expect_begin, 0
), f"{intset.min_value} vs {expect_begin}"
assert analyzer.can_prove_equal(
intset.max_value - expect_end + 1, 0
), f"{intset.max_value} vs {expect_end - 1}"
def test_region_bound_not_independent():
# (i, i+2) and (i+2, i+4) are dependent, this the lowerbound is not available
i = tvm.tir.Var("i", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({(i, i + 2): None, (i + 2, i + 4): None}, var_dom, mode="lowerbound")
check_region_bound({(i, i + 2): (0, 65), (i + 2, i + 4): (2, 67)}, var_dom, mode="upperbound")
# when only a subset of access indices are affine
i, j, k = tvm.tir.Var("i", "int32"), tvm.tir.Var("j", "int32"), tvm.tir.Var("k", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=16),
j: tvm.ir.Range(begin=0, end=16),
k: tvm.ir.Range(begin=0, end=16),
}
check_region_bound(
{i // 4: None, j * 4 + i % 4: None, tir.truncdiv(k, 2): None},
var_dom,
predicate=j * 4 + i % 4 > 3,
mode="lowerbound",
)
check_region_bound(
{i // 4: (0, 4), j * 4 + i % 4: (4, 64), tir.truncdiv(k, 2): (0, 8)},
var_dom,
predicate=j * 4 + i % 4 > 3,
mode="upperbound",
)
def test_region_bound_stride_too_wide():
i = tvm.tir.Var("i", "int32")
var_dom = {i: tvm.ir.Range(begin=0, end=64)}
check_region_bound({(i * 4, i * 4 + 2): None}, var_dom, mode="lowerbound")
check_region_bound({(i * 4, i * 4 + 2): (0, 254)}, var_dom, mode="upperbound")
def test_region_bound_small_stride():
i = tvm.tir.Var("i", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({(i * 4, i * 4 + 8): (0, 260)}, var_dom, mode="lowerbound")
def test_region_lower_bound_split_predicate():
x_o = tvm.tir.Var("xo", "int32")
x_i = tvm.tir.Var("xi", "int32")
x = x_o * 4 + x_i
var_dom = {
x_o: tvm.ir.Range(begin=0, end=16),
x_i: tvm.ir.Range(begin=0, end=4),
}
check_region_bound({(x * 4, x * 4 + 8): (0, 256)}, var_dom, predicate=x < 63, mode="lowerbound")
check_region_bound(
{(x * 4, x * 4 + 8): (0, 256), (x * 3, x * 3 + 5): (0, 191)},
var_dom,
predicate=x < 63,
mode="upperbound",
)
def test_region_lower_bound_multiple_variables():
div = tvm.tir.floordiv
mod = tvm.tir.floormod
x = tvm.tir.Var("x", "int32")
wid = tvm.tir.Var("wid", "int32")
i = div(x, 16)
j = div(mod(x, 16), 4) * 8 + mod(x, 4) + div(wid, 32) * 4
k = wid % 32
var_dom = {
x: tvm.ir.Range(begin=0, end=32),
wid: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({i: (0, 2), j: (0, 32), k: (0, 32)}, var_dom, mode="lowerbound")
def test_region_lower_bound_negative_scale():
i = tvm.tir.Var("i", "int32")
j = tvm.tir.Var("j", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=4),
j: tvm.ir.Range(begin=0, end=4),
}
check_region_bound(
{(1 - i, 5 - i): (-2, 5), (20 - j * 4, 36 - j * 4): (8, 36)}, var_dom, mode="lowerbound"
)
def test_region_lower_bound_for_non_perfect_tile():
h1 = tvm.tir.Var("h1", "int32")
h2 = tvm.tir.Var("h2", "int32")
h3 = tvm.tir.Var("h3", "int32")
# non-uniform tiling, single inner variable
var_dom = {
h2: tvm.ir.Range(begin=0, end=10),
}
check_region_bound(
{
h3 * 8
+ h2: {
(): (
tvm.tir.max(h3 * 8, 1),
tvm.tir.max(h3 * 8, 1)
- tvm.tir.max(h3 * 8, 214)
- tvm.tir.max(1 - h3 * 8, 0)
+ 224,
),
((h3, 0),): (1, 10), # h3 == 0: region is [1, 10)
((h3, 10),): (h3 * 8, h3 * 8 + 10), # 0 < h3 <= 26: region is [h3 * 8, h3 * 8 + 10)
((h3, 27),): (h3 * 8, 224), # h3 > 26: region is [h3 * 8, 224)
}
},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2, h3 * 8 + h2 < 224),
mode="lowerbound",
)
# non-uniform tiling, two inner variables
var_dom = {
h1: tvm.ir.Range(begin=0, end=5),
h2: tvm.ir.Range(begin=0, end=2),
}
check_region_bound(
{
h3 * 8
+ h2 * 5
+ h1: {
(): (
tvm.tir.max(h3 * 8, 1),
tvm.tir.max(h3 * 8, 1)
- tvm.tir.max(h3 * 8, 214)
- tvm.tir.max(1 - h3 * 8, 0)
+ 224,
),
((h3, 0),): (1, 10),
((h3, 10),): (h3 * 8, h3 * 8 + 10),
((h3, 27),): (h3 * 8, 224),
}
},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h2 * 5 + h1 < 224),
mode="lowerbound",
)
# lowerbound should fail on incompatible predicates
check_region_bound(
{h3 * 8 + h2 * 5 + h1: None},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h1 * 2 + h2 < 224),
mode="lowerbound",
)
check_region_bound(
{h3 * 8 + h2 * 5 + h1: (h3 * 8, h3 * 8 + 10)},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h1 * 2 + h2 < 224),
mode="upperbound",
)
def test_region_lower_bound_unfusable():
var_dom = {
tvm.tir.Var("i", "int32"): tvm.ir.Range(8),
tvm.tir.Var("j", "int32"): tvm.ir.Range(4),
}
i, j = var_dom
check_region_bound({(i + j) // 2: (0, 6)}, var_dom, mode="lowerbound")
def test_union_lower_bound():
neg_inf = tvm.arith.int_set.neg_inf()
pos_inf = tvm.arith.int_set.pos_inf()
set_0 = tvm.arith.IntervalSet(min_value=neg_inf, max_value=0)
set_1 = tvm.arith.IntervalSet(min_value=1, max_value=pos_inf)
result = tvm.arith.int_set.union_lower_bound([set_0, set_1])
assert result.min_value.same_as(neg_inf)
assert result.max_value.same_as(pos_inf)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_iter_affine_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from xml import dom
import tvm
import tvm.testing
from tvm.tir import floormod, floordiv
def ifuse(inputs, pred_extent=None):
"""Fuse iterators"""
value, extent = 0, 1
for i, ext in inputs:
value = value * ext + i
extent = extent * ext
return value, extent if pred_extent is None else pred_extent
def isplit(axis, factor):
"""Split iterators"""
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
return [
(fld(axis[0], factor), fld(axis[1] + (factor - 1), factor)),
(flm(axis[0], factor), factor),
]
def var_dom(iters):
"""Get domains of iterators"""
return {var: tvm.ir.Range(0, ext) for var, ext in iters}
def convert_iter_expr(expr):
return tvm.arith.normalize_iter_map_to_expr(expr)
def assert_iter_sum_pattern(
expect_dict, dom_map, predicate=True, check_level="surjective", simplify_trivial_iterators=True
):
keys = list(expect_dict.keys())
res = tvm.arith.detect_iter_map(
keys,
dom_map,
predicate=predicate,
check_level=check_level,
simplify_trivial_iterators=simplify_trivial_iterators,
)
indices = res.indices
assert len(indices) == len(keys), res.errors
for i, input_iter in enumerate(keys):
spec = expect_dict[input_iter]
(
extent,
base,
) = spec[0:2]
scale = spec[2] if len(spec) > 2 else 1
expect_iter = spec[3] if len(spec) > 3 else None
sum_expr = indices[i]
assert isinstance(sum_expr, tvm.arith.IterSumExpr)
if extent == 1:
assert len(sum_expr.args) == 0
else:
assert len(sum_expr.args) == 1
tvm.testing.assert_prim_expr_equal(sum_expr.args[0].extent, extent)
tvm.testing.assert_prim_expr_equal(sum_expr.args[0].scale, scale)
tvm.testing.assert_prim_expr_equal(sum_expr.base, base)
if expect_iter is not None:
if not isinstance(expect_iter, tvm.arith.IterMapExpr):
sum_expr = convert_iter_expr(sum_expr)
tvm.ir.assert_structural_equal(sum_expr, expect_iter)
def assert_iter_sum_failure(iters, dom_map, predicate=True, check_level="surjective"):
res = tvm.arith.detect_iter_map(
list(iters), dom_map, predicate=predicate, check_level=check_level
).indices
assert len(res) == 0
def test_trivial():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
dom_map = var_dom([(x, 3), (y, 4), (z, 1)])
assert_iter_sum_pattern({x: (3, 0), y: (4, 0), 3: (1, 3)}, dom_map)
assert_iter_sum_pattern({x: (3, 0), 3: (1, 3)}, dom_map)
# not independent
assert_iter_sum_failure([x, x, 3], dom_map)
assert_iter_sum_pattern(
{x: (3, 0), y: (4, 0)}, dom_map, check_level="bijective", simplify_trivial_iterators=True
)
assert_iter_sum_pattern(
{x: (3, 0), y: (4, 0)}, dom_map, check_level="bijective", simplify_trivial_iterators=False
)
assert_iter_sum_failure([x, z], dom_map, check_level="bijective")
def test_fuse():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
c = tvm.tir.SizeVar("c", "int32")
c0 = tvm.tir.SizeVar("c0", "int32")
assert_iter_sum_pattern({y * 3 + 1 + c + x: (12, 1 + c)}, var_dom([(x, 3), (y, 4)]))
assert_iter_sum_pattern({ifuse([(x, 3), (y, 4)])[0]: (12, 0)}, var_dom([(x, 3), (y, 4)]))
# fuse with symbolic factor
assert_iter_sum_pattern({(y + 1) * c + x: (4 * c, c)}, var_dom([(x, c), (y, 4)]))
# duplication
assert_iter_sum_failure([y * 3 + x, y], var_dom([(x, 3), (y, 4)]))
assert_iter_sum_failure([y, x + 1, y], var_dom([(x, 3), (y, 4)]))
# factor mismatch
assert_iter_sum_failure([y * 4 + x], var_dom([(x, 3), (y, 4)]))
# simple stride pattern
assert_iter_sum_pattern({x * 4 + y * 2: (6, 0, 2, (x * 2 + y) * 2)}, var_dom([(x, 3), (y, 2)]))
# simple stride pattern with symbolic
assert_iter_sum_pattern(
{x * 2 * c0 + y * 2: (3 * c0, 0, 2, (x * c0 + y) * 2)}, var_dom([(x, 3), (y, c0)])
)
def test_split():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
c0 = tvm.tir.SizeVar("c0", "int32")
c1 = tvm.tir.SizeVar("c1", "int32")
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
assert_iter_sum_pattern({fld(x, 3): (8, 0), flm(x, 3) * 2 + c1: (3, c1, 2)}, var_dom([(x, 24)]))
assert_iter_sum_pattern(
{fld(x, 6): (4, 0), fld(flm(x, 6), 2): (3, 0), flm(x, 2): (2, 0)}, var_dom([(x, 24)])
)
# simple symbolic bound
# TODO(tvm-team) improve symbolic divisible check to enable
# more complicated symbolic bound
assert_iter_sum_pattern({fld(x, c0): (c1, 0), flm(x, c0): (c0, 0)}, var_dom([(x, c1 * c0)]))
assert_iter_sum_pattern({fld(x * 2, 4): (4, 0, 1), flm(x * 2, 4): (2, 0, 2)}, var_dom([(x, 8)]))
assert_iter_sum_pattern(
{
fld(x * 2, 4) * 4 + flm(x * 2, 4): (8, 0, 2),
},
var_dom([(x, 8)]),
)
assert_iter_sum_failure([fld(x, flm(flm(y, 8), 6))], var_dom([(x, 24), (y, 8)]))
# domain of x is undefined
assert_iter_sum_pattern(
{fld(flm(x, 49) + y, 49): (1, fld(flm(x, 49) + y, 49))}, var_dom([(y, 1)])
)
def test_compound():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
xo, xi = isplit((x, 10), 5)
yo, yi = isplit((y, 9), 3)
z = ifuse([yo, xo, yi])
# reconstruct the pattern manually
mx = tvm.arith.IterMark(x, 10)
my = tvm.arith.IterMark(y, 9)
xoscale = 3
yoscale = 6
yiscale = 1
mxo = tvm.arith.IterSplitExpr(mx, 5, 2, xoscale)
myo = tvm.arith.IterSplitExpr(my, 3, 3, yoscale)
myi = tvm.arith.IterSplitExpr(my, 1, 3, yiscale)
mz = tvm.arith.IterMark(tvm.arith.IterSumExpr([myo, mxo, myi], 0), 18)
sz = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(mz, 1, 18, 1)], 0)
assert_iter_sum_pattern({z[0]: (18, 0, 1, sz), xi[0]: (5, 0)}, var_dom([(x, 10), (y, 9)]))
def test_predicate():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
# available contraints
# upper bound only
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y < 128
)
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y <= 127
)
# lower bound only
assert_iter_sum_pattern(
{x * 10 + y: (124, 6)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y > 5
)
assert_iter_sum_pattern(
{x * 10 + y: (124, 6)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y >= 6
)
# lower bound + upper bound
assert_iter_sum_pattern(
{x * 10 + y: (122, 6)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.And(x * 10 + y > 5, x * 10 + y < 128),
)
assert_iter_sum_pattern(
{x * 10 + y: (122, 6)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.And(x * 10 + y >= 6, x * 10 + y <= 127),
)
# constraint on one fused iter
i = tvm.tir.Var("i", "int32")
j = tvm.tir.Var("j", "int32")
k = tvm.tir.Var("k", "int32")
assert_iter_sum_pattern(
{i * 8 + j * 2 + k: (88, 1)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(1 <= j * 2 + k, j * 2 + k < 9),
)
# constraint on single var
assert_iter_sum_pattern({i: (10, 0)}, var_dom([(i, 48)]), predicate=i < 10)
# iterations are subparts of constraint, invalid case 1
assert_iter_sum_failure(
[i, j, k],
var_dom([(i, 128), (j, 128), (k, 128)]),
predicate=tvm.tir.all(i * 16384 + j * 128 + k < 100),
)
# iterations are subparts of constraint, invalid case 2
assert_iter_sum_failure(
[i * 128 + j, k],
var_dom([(i, 128), (j, 128), (k, 128)]),
predicate=i * 16384 + j * 128 + k < 100,
)
# irrelavant predicate
assert_iter_sum_pattern({i + j: (1, j)}, var_dom([(i, 1)]), predicate=j <= 24)
# constraint on nested fused iters
assert_iter_sum_pattern(
{i * 8 + j * 2 + k: (22, 3)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(
1 <= j * 2 + k, j * 2 + k < 9, 3 <= i * 8 + j * 2 + k, i * 8 + j * 2 + k < 25
),
)
# duplicate constraint on one fused iter
assert_iter_sum_pattern(
{i * 6 + j * 2 + k: (66, 2)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(1 <= j * 2 + k, 2 <= j * 2 + k, j * 2 + k < 8, j * 2 + k < 9),
)
# duplicate constraint on nested fused iters
assert_iter_sum_pattern(
{i * 6 + j * 2 + k: (15, 3)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(
1 <= j * 2 + k,
2 <= j * 2 + k,
j * 2 + k < 8,
j * 2 + k < 9,
3 <= i * 6 + j * 2 + k,
i * 6 + j * 2 + k < 25,
1 <= i * 6 + j * 2 + k,
i * 6 + j * 2 + k < 18,
),
)
# constraint on non-disjoint fused iters should fail
assert_iter_sum_failure(
[i * 8 + j * 2 + k],
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(2 <= j * 2 + k, 0 <= i * 4 + j),
)
# constraint on many disjoint fused iters, case 1
# i4 * 6 + i5 in [3, 9), extent=6 (= scale of i2)
# i2 * 30 + i3 * 15 in [30, 90), extent=60 (= scale of i1)
# i1 * 60 in [60, 240), extent=180 (= scale of i0)
i0 = tvm.tir.Var("i0", "int32")
i1 = tvm.tir.Var("i1", "int32")
i2 = tvm.tir.Var("i2", "int32")
i3 = tvm.tir.Var("i3", "int32")
i4 = tvm.tir.Var("i4", "int32")
i5 = tvm.tir.Var("i5", "int32")
assert_iter_sum_pattern(
{i0 * 180 + i1 * 60 + i2 * 30 + i3 * 15 + i4 * 6 + i5: (540, 93)},
var_dom([(i0, 3), (i1, 4), (i2, 3), (i3, 2), (i4, 3), (i5, 6)]),
predicate=tvm.tir.all(1 <= i1, 2 <= i2 * 2 + i3, 3 <= i4 * 6 + i5),
)
# constraint on many disjoint fused iters, case 2
assert_iter_sum_pattern(
{i0 * 45 + i1 * 45 + i2 * 9 + i3 * 4 + i4: (135, 28)},
var_dom([(i0, 3), (i1, 2), (i2, 5), (i3, 3), (i4, 4)]),
predicate=tvm.tir.all(
3 <= i1 * 5 + i2, i1 * 5 + i2 < 8, 1 <= i3 * 4 + i4, i3 * 4 + i4 < 10
),
)
# constraint on split iters
assert_iter_sum_pattern(
{i % 16: (7, 3), i // 16: (8, 4)},
var_dom([(i, 1024)]),
predicate=tvm.tir.all(3 <= i % 16, i % 16 < 10, 4 <= i // 16, i // 16 < 12),
check_level="bijective",
)
# constraint on split iters, nested case 1
assert_iter_sum_pattern(
{(i * 32 + j) % 16: (7, 3)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(3 <= (i * 32 + j) % 16, (i * 32 + j) % 16 < 10),
)
# constraint on split iters, nested case 2
assert_iter_sum_failure(
[
(i * 32 + j) % 16,
],
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 32),
check_level="bijective",
)
assert_iter_sum_pattern(
{(i * 32 + j) % 16: (16, 0)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 32),
)
assert_iter_sum_pattern(
{(i * 32 + j - 1) % 16: (16, 0), (i * 32 + j - 1) // 16: (4, 0)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 64),
)
# non-standard form of predicate
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 < 128 - y
)
# duplicate constraint
assert_iter_sum_pattern(
{x * 10 + y: (64, 0)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.all(x * 10 + y < 128, x * 10 + y < 64),
)
# useless constraint
assert_iter_sum_pattern(
{x * 10 + y: (130, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y < 140
)
i1 = tvm.tir.Var("i1", "int32")
i2 = tvm.tir.Var("i2", "int32")
i3 = tvm.tir.Var("i3", "int32")
i4 = tvm.tir.Var("i4", "int32")
assert_iter_sum_pattern(
{i1 * 20 + i2 * 10 + i3 * 3 + i4: (128, 0)},
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 10,
)
),
)
# wrong constraint
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 7,
)
),
)
# incompatible constraint
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 10,
i1 * 4 + i3 < 20,
)
),
)
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i1 * 4 + i3 < 20,
)
),
)
# zero iter
xo = tvm.tir.Var("xo", "int32")
xi = tvm.tir.Var("xi", "int32")
y = tvm.tir.Var("y", "int32")
assert_iter_sum_pattern(
{xo * 129 + xi: (128, 0), y: (128, 0)},
var_dom([(xo, 1), (xi, 129), (y, 128)]),
predicate=xo * 129 + xi < 128,
)
# strided iteration predicate
assert_iter_sum_pattern(
{xo * 16 + xi * 4: (10, 0, 4)},
var_dom([(xo, 3), (xi, 4)]),
predicate=xo * 4 + xi < 10,
)
def convert_division(divisions):
if divisions is None or len(divisions) == 0:
return []
res = []
for division in divisions[:-1]:
res.append(
[
tvm.arith.normalize_iter_map_to_expr(division[0].source),
tvm.arith.normalize_iter_map_to_expr(division[1].source),
]
)
res.append([divisions[-1][0].extent, divisions[-1][1].extent])
return res
def create_iter(name, extent):
return tvm.tir.Var(name, "int32"), extent
def test_subspace_division():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
c = tvm.tir.SizeVar("c", "int32")
# simple 1.1
res = tvm.arith.subspace_divide(
[z * 12 + y * 3 + x + c], var_dom([(x, 3), (y, 4), (z, 5)]), [x]
)
res = convert_division(res)
assert len(res) == 2
tvm.ir.assert_structural_equal(res[0][0], z * 4 + y)
tvm.ir.assert_structural_equal(res[0][1], x + c)
# simple 1.2
res = tvm.arith.subspace_divide(
[z * 12 + y * 3 + x + c], var_dom([(x, 3), (y, 4), (z, 5)]), [x], z * 4 + y < 18
)
res = convert_division(res)
assert len(res) == 2
tvm.ir.assert_structural_equal(res[0][0], z * 4 + y)
tvm.ir.assert_structural_equal(res[0][1], x + c)
tvm.ir.assert_structural_equal(res[1][0], z * 4 + y < 18)
tvm.ir.assert_structural_equal(res[1][1], True)
# compound 1
i0 = create_iter("i0", 4)
j0 = create_iter("j0", 8)
i3 = create_iter("i3", 2)
i1, i2 = isplit(j0, 4)
k0 = ifuse([i0, i1])
k1 = ifuse([i2, i3])
# compound 1.1
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]])
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], (i0[0] * 2) + floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], floormod(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][1], i3[0])
assert_iter_sum_pattern
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0, j0])).indices
assert len(res2) == 2
# compound 1.2
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [j0[0], i3[0]])
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], i0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], (floormod(j0[0], 4) * 2) + i3[0])
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([j0, i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0])).indices
assert len(res2) == 2
# compound 1.3
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i0[0], i3[0]])
res = convert_division(res)
assert len(res) == 0
# compound 1.4
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]], k0[0] < 7)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], (i0[0] * 2) + floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], floormod(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][1], i3[0])
tvm.ir.assert_structural_equal(res[2][0], (i0[0] * 2) + floordiv(j0[0], 4) < 7)
tvm.ir.assert_structural_equal(res[2][1], True)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0, j0])).indices
assert len(res2) == 2
# compound 1.5
res = tvm.arith.subspace_divide(
[k0[0], k1[0]], var_dom([i0, j0, i3]), [j0[0], i3[0]], k1[0] < 7
)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], i0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], (floormod(j0[0], 4) * 2) + i3[0])
tvm.ir.assert_structural_equal(res[2][0], True)
tvm.ir.assert_structural_equal(res[2][1], (floormod(j0[0], 4) * 2) + i3[0] < 7)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([j0, i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0])).indices
assert len(res2) == 2
# compound 1.6
res = tvm.arith.subspace_divide(
[k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]], tvm.tir.all(k0[0] < 7, k1[0] < 7)
)
res = convert_division(res)
assert len(res) == 0
# compound 2
j0 = create_iter("j0", 4)
l0 = create_iter("l0", 2)
l1 = create_iter("l1", 6)
j3 = create_iter("j3", 3)
k0 = ifuse([l0, l1])
i1, j2 = isplit(k0, 3)
j1, i1 = isplit(i1, 2)
i0 = ifuse([j0, j1])
i2 = ifuse([j2, j3])
# compound 2.1
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l1[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], (j0[0] * 2) + l0[0])
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(l1[0], 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l1[0], 3) * 3) + j3[0])
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1], res[2][1]], var_dom([l1, j3])).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0, l0])).indices
assert len(res2) == 3
# compound 2.2
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l0[0], l1[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], j0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(l0[0] * 6 + l1[0], 6))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(floormod(l0[0] * 6 + l1[0], 6), 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l0[0] * 6 + l1[0], 3) * 3) + j3[0])
res1 = tvm.arith.detect_iter_map(
[res[0][1], res[1][1], res[2][1]], var_dom([l0, l1, j3])
).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0])).indices
assert len(res2) == 3
# compound 2.3
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l0[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 0
# compound 2.4
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]],
var_dom([j0, l0, l1, j3]),
[l1[0], j3[0]],
tvm.tir.all(i0[0] < 7, i2[0] < 8),
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], (j0[0] * 2) + l0[0])
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(l1[0], 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l1[0], 3) * 3) + j3[0])
tvm.ir.assert_structural_equal(res[3][0], (j0[0] * 2) + l0[0] < 7)
tvm.ir.assert_structural_equal(res[3][1], (floormod(l1[0], 3) * 3) + j3[0] < 8)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1], res[2][1]], var_dom([l1, j3])).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0, l0])).indices
assert len(res2) == 3
# compound 2.5
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [j3[0]], i2[0] < 8
)
res = convert_division(res)
assert len(res) == 0
def test_complex():
n0 = create_iter("n0", 2)
n1 = create_iter("n1", 4)
m0 = ifuse([n0, n1], 6)
m1 = create_iter("m1", 3)
l0 = create_iter("l0", 4)
l1 = create_iter("l1", 8)
l2 = ifuse([m0, m1], 16)
l3 = create_iter("l3", 32)
k0, k4 = isplit(l0, 2)
k1, k5 = isplit(l1, 2)
k2, k6 = isplit(l2, 4)
k3, k7 = isplit(l3, 4)
j0 = ifuse([k0, k1], 7)
j1 = ifuse([k2, k3])
j2 = ifuse([k4, k5])
j3 = ifuse([k6, k7], 15)
i0 = ifuse([j0, j1], 200)
i1 = ifuse([j2, j3], 50)
n0_mark = tvm.arith.IterMark(n0[0], n0[1])
n1_mark = tvm.arith.IterMark(n1[0], n1[1])
l0_mark = tvm.arith.IterMark(l0[0], l0[1])
l1_mark = tvm.arith.IterMark(l1[0], l1[1])
m1_mark = tvm.arith.IterMark(m1[0], m1[1])
l3_mark = tvm.arith.IterMark(l3[0], l3[1])
m0_expr = tvm.arith.IterSumExpr(
[
tvm.arith.IterSplitExpr(n0_mark, 1, n0[1], 4),
tvm.arith.IterSplitExpr(n1_mark, 1, n1[1], 1),
],
0,
)
m0_mark = tvm.arith.IterMark(m0_expr, 6)
l2_expr = tvm.arith.IterSumExpr(
[tvm.arith.IterSplitExpr(m0_mark, 1, 6, 3), tvm.arith.IterSplitExpr(m1_mark, 1, m1[1], 1)],
0,
)
l2_mark = tvm.arith.IterMark(l2_expr, 16)
k0_expr = tvm.arith.IterSplitExpr(l0_mark, 2, 2, 4)
k1_expr = tvm.arith.IterSplitExpr(l1_mark, 2, 4, 1)
k2_expr = tvm.arith.IterSplitExpr(l2_mark, 4, 4, 8)
k3_expr = tvm.arith.IterSplitExpr(l3_mark, 4, 8, 1)
k4_expr = tvm.arith.IterSplitExpr(l0_mark, 1, 2, 30)
k5_expr = tvm.arith.IterSplitExpr(l1_mark, 1, 2, 15)
k6_expr = tvm.arith.IterSplitExpr(l2_mark, 1, 4, 4)
k7_expr = tvm.arith.IterSplitExpr(l3_mark, 1, 4, 1)
j0_expr = tvm.arith.IterSumExpr([k0_expr, k1_expr], 0)
j0_mark = tvm.arith.IterMark(j0_expr, 7)
i0_expr = tvm.arith.IterSumExpr(
[tvm.arith.IterSplitExpr(j0_mark, 1, 7, 32), k2_expr, k3_expr], 0
)
j3_expr = tvm.arith.IterSumExpr([k6_expr, k7_expr], 0)
j3_mark = tvm.arith.IterMark(j3_expr, 15)
i1_expr = tvm.arith.IterSumExpr(
[k4_expr, k5_expr, tvm.arith.IterSplitExpr(j3_mark, 1, 15, 1)], 0
)
i0_mark = tvm.arith.IterMark(i0_expr, i0[1])
i1_mark = tvm.arith.IterMark(i1_expr, i1[1])
i0_final = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(i0_mark, 1, i0[1], 1)], 0)
i1_final = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(i1_mark, 1, i1[1], 1)], 0)
assert_iter_sum_pattern(
{i0[0]: (200, 0, 1, i0_final), i1[0]: (50, 0, 1, i1_final)},
var_dom([l0, l1, n0, n1, m1, l3]),
predicate=tvm.tir.all(
i0[0] < 200, i1[0] < 50, m0[0] < 6, l2[0] < 16, j0[0] < 7, j3[0] < 15
),
)
# wrong constraint
assert_iter_sum_failure(
[i0[0], i1[0]],
var_dom([l0, l1, n0, n1, m1, l3]),
tvm.tir.all(i0[0] < 200, i1[0] < 50, m0[0] < 9, l2[0] < 16, j0[0] < 7, j3[0] < 14),
)
# subspace_division
res = tvm.arith.subspace_divide(
[i0[0], i1[0]],
var_dom([l0, l1, n0, n1, m1, l3]),
[n0[0], n1[0], m1[0], l3[0]],
tvm.tir.all(m0[0] < 6, l2[0] < 16, j0[0] < 7, j3[0] < 15),
)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], floordiv(l0[0], 2) * 4 + floordiv(l1[0], 2))
tvm.ir.assert_structural_equal(
res[0][1], (floordiv((n0[0] * 4 + n1[0]) * 3 + m1[0], 4) * 8) + floordiv(l3[0], 4)
)
tvm.ir.assert_structural_equal(res[1][0], ((floormod(l0[0], 2) * 2) + floormod(l1[0], 2)))
tvm.ir.assert_structural_equal(
res[1][1], ((floormod(((n0[0] * 4 + n1[0]) * 3 + m1[0]), 4) * 4) + floormod(l3[0], 4))
)
tvm.ir.assert_structural_equal(res[2][0], (floordiv(l0[0], 2) * 4) + floordiv(l1[0], 2) < 7)
tvm.ir.assert_structural_equal(
res[2][1],
tvm.tir.all(
n0[0] * 4 + n1[0] < 6,
(n0[0] * 4 + n1[0]) * 3 + m1[0] < 16,
floormod(((n0[0] * 4 + n1[0]) * 3 + m1[0]), 4) * 4 + floormod(l3[0], 4) < 15,
),
)
assert_iter_sum_pattern(
{res[0][1]: (32, 0), res[1][1]: (15, 0)}, var_dom([n0, n1, m1, l3]), res[2][1]
)
assert_iter_sum_pattern({res[0][0]: (8, 0), res[1][0]: (4, 0)}, var_dom([l0, l1]))
def test_normalize_iter_map_to_expr():
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
xo, xi = isplit((x, 10), 5)
yo, yi = isplit((y, 9), 3)
z = ifuse([yo, xo, yi])
res = tvm.arith.detect_iter_map([z[0], xi[0]], var_dom([(x, 10), (y, 9)]))
tvm.ir.assert_structural_equal(
tvm.arith.normalize_iter_map_to_expr(res.indices[0]),
fld(y, 3) * 6 + fld(x, 5) * 3 + flm(y, 3),
)
tvm.ir.assert_structural_equal(tvm.arith.normalize_iter_map_to_expr(res.indices[1]), flm(x, 5))
# iter mark wrap a complex expr
split = tvm.arith.IterSplitExpr(tvm.arith.IterMark(x * y + 1, 1024), 1, 1024, 1)
tvm.ir.assert_structural_equal(tvm.arith.normalize_iter_map_to_expr(split), x * y + 1)
def test_inverse_affine_iter_map():
analyzer = tvm.arith.Analyzer()
l0 = create_iter("l0", 64)
l1 = create_iter("l1", 64)
l2 = create_iter("l2", 64)
# simple case
l0_0, l0_1 = isplit(l0, 16)
l1_0, l1_1 = isplit(l1, 4)
l0_1_l1_1_fused = ifuse([l0_1, l1_1])
iter_map = tvm.arith.detect_iter_map(
[l0_1_l1_1_fused[0], l0_0[0], l1_0[0]], var_dom([l0, l1])
).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 2
l0_inverse = floordiv(outputs[0], 4) + outputs[1] * 16
l1_inverse = floormod(outputs[0], 4) + outputs[2] * 4
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
assert analyzer.can_prove_equal(res[l1[0]], l1_inverse)
# compound case
l0_0, l0_1 = isplit(l0, 16)
l1_0, l1_1 = isplit(l1, 4)
l2_1, l2_2 = isplit(l2, 4)
l2_0, l2_1 = isplit(l2_1, 4)
l0_1_l2_1_l1_1_l2_0_fused = ifuse([l0_1, l2_1, l1_1, l2_0])
iter_map = tvm.arith.detect_iter_map(
[l0_1_l2_1_l1_1_l2_0_fused[0], l0_0[0], l2_2[0], l1_0[0]], var_dom([l0, l1, l2])
).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 3
l0_inverse = floordiv(outputs[0], 64) + outputs[1] * 16
l1_inverse = floormod(floordiv(outputs[0], 4), 4) + outputs[3] * 4
l2_inverse = (
floormod(outputs[0], 4) * 16 + floormod(floordiv(outputs[0], 16), 4) * 4 + outputs[2]
)
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
assert analyzer.can_prove_equal(res[l1[0]], l1_inverse)
assert analyzer.can_prove_equal(res[l2[0]], l2_inverse)
# diamond-shape DAG
l0_0, l0_1 = isplit(l0, 16)
l1 = ifuse([l0_1, l0_0])
l1_0, l1_1 = isplit(l1, 8)
l2 = ifuse([l1_1, l1_0])
iter_map = tvm.arith.detect_iter_map([l2[0]], var_dom([l0])).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 1
l1_inverse = floormod(outputs[0], 8) * 8 + floordiv(outputs[0], 8)
l0_inverse = floormod(l1_inverse, 4) * 16 + floordiv(l1_inverse, 4)
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
def test_inverse_affine_map_trivial_iter():
analyzer = tvm.arith.Analyzer()
l0 = create_iter("l0", 64)
l1 = create_iter("l1", 64)
iter_map = tvm.arith.detect_iter_map([0, l0[0], l1[0]], var_dom([l0, l1])).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
# output_0 is expected to be constant and it is not included in the inverse map
assert len(res) == 2
assert analyzer.can_prove_equal(res[l0[0]], outputs[1])
assert analyzer.can_prove_equal(res[l1[0]], outputs[2])
def test_free_variables():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
# illegal iter if z is within dom
assert_iter_sum_failure([z * 19 + y * 3 + x], var_dom([(x, 3), (y, 3), (z, 3)]))
# iter is valid if z is free, even there are linear forms of z
assert_iter_sum_pattern(
{z * 19 + y * 3 + x: (9, z * 19)},
var_dom(
[
(x, 3),
(y, 3),
]
),
)
assert_iter_sum_pattern(
{z * z + y * 3 + x: (9, z * z)},
var_dom(
[
(x, 3),
(y, 3),
]
),
)
def test_padding():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
# left padding only, offset divisible
sum = 64 + y
dom_map = var_dom([(y, 192)])
assert_iter_sum_pattern(
{fld(sum, 32): (6, 2, 1), flm(sum, 32): (32, 0, 1)},
dom_map,
check_level="bijective",
)
# left padding only, offset non-divisible
sum = 80 + y
dom_map = var_dom([(y, 176)])
assert_iter_sum_pattern(
{fld(sum, 32): (6, 2, 1)},
dom_map,
)
assert_iter_sum_pattern(
{flm(fld(sum, 2), 16): (16, 0, 1), flm(sum, 2): (2, 0, 1)},
dom_map,
)
assert_iter_sum_failure({fld(sum, 32), flm(sum, 32)}, dom_map)
assert_iter_sum_failure({fld(sum, 32), fld(sum, 4)}, dom_map)
# right padding only, offset divisible
sum = x * 32 + y * 8
dom_map = var_dom([(x, 5), (y, 4)])
assert_iter_sum_pattern(
{fld(sum, 16): (10, 0, 1), flm(sum, 16): (2, 0, 8)},
dom_map,
)
assert_iter_sum_failure({fld(sum, 5)}, dom_map)
# right padding only, offset non-divisible
dom_map = var_dom([(x, 26)])
assert_iter_sum_pattern(
{fld(x, 15): (2, 0, 1)},
dom_map,
)
assert_iter_sum_pattern(
{flm(fld(x, 3), 5): (5, 0, 1), flm(x, 3): (3, 0, 1)},
dom_map,
)
# padding constants on both side
sum = x + 71
dom_map = var_dom([(x, 45)])
assert_iter_sum_pattern({fld(sum, 32): (2, 2, 1)}, dom_map)
assert_iter_sum_pattern(
{flm(fld(x, 4), 8): (8, 0, 1), flm(x, 4): (4, 0, 1)},
dom_map,
)
# padding for free iteration part
sum = x * 360 + y
dom_map = var_dom([(y, 360)])
assert_iter_sum_pattern({fld(sum, 16): (23, fld(x * 360 - flm(x, 2) * 8, 16), 1)}, dom_map)
assert_iter_sum_pattern({flm(x * 360 + y, 16): (16, 0, 1)}, dom_map)
# multiple split with same mark offset, could
# be surjective on missing (padded // LCM)
assert_iter_sum_pattern(
{
flm(x + 10, 3): (3, 0),
flm(fld(x + 10, 3), 4): (4, 0),
flm(fld(fld(x + 10, 3), 4), 5): (5, 0),
},
var_dom([(x, 240)]),
)
assert_iter_sum_failure(
{
flm(x + 10, 3),
flm(fld(x + 10, 3), 4),
flm(fld(fld(x + 10, 3), 4), 5),
fld(fld(fld(x + 10, 3), 4), 5),
},
var_dom([(x, 240)]),
)
# different offsets on splits
assert_iter_sum_pattern(
{
flm(x + 1, 3): (3, 0),
flm(fld(x + 10, 3) + 2, 4): (4, 0),
flm(fld(fld(x + 10, 3), 4) + 3, 5): (5, 0),
},
var_dom([(x, 240)]),
)
# original extent is smaller than the divident
# it is not surjective wrt to the region [0, 16)
assert_iter_sum_failure({flm(x, 16)}, var_dom([(x, 3)]))
def test_overlapped_fuse():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
a = tvm.tir.Var("x", "int32")
b = tvm.tir.Var("y", "int32")
# non-bijective fuse of two
assert_iter_sum_pattern(
{
x * 7 + y: (22, 0, 1),
},
var_dom([(x, 3), (y, 8)]),
check_level="surjective",
)
assert_iter_sum_failure([x * 7 + y], var_dom([(x, 3), (y, 8)]), check_level="bijective")
# non-bijective fuse of three
assert_iter_sum_pattern(
{
x * 18 + y * 7 + z: (40, 0, 1),
},
var_dom([(x, 2), (y, 3), (z, 8)]),
check_level="surjective",
)
assert_iter_sum_failure([x * 7 + y], var_dom([(x, 2), (y, 3), (z, 8)]), check_level="bijective")
# negative scale fusion is not allowed
assert_iter_sum_failure([x * -7 + y], var_dom([(x, 3), (y, 8)]), check_level="surjective")
assert_iter_sum_failure([x * 7 - y], var_dom([(x, 3), (y, 8)]), check_level="surjective")
# with predicate
assert_iter_sum_pattern(
{
a * 40 + b * 20 + x * 18 + y * 3 + z: (125, 6, 1),
},
var_dom([(a, 3), (b, 2), (x, 2), (y, 6), (z, 8)]),
predicate=tvm.tir.all(z < 4, 1 < x * 6 + y, x * 6 + y < 10),
check_level="surjective",
)
# stride=1 kernel
assert_iter_sum_pattern(
{x + a: (230, 0, 1)}, var_dom([(x, 224), (a, 7)]), check_level="surjective"
)
# do not allow both strided and overlapped
assert_iter_sum_failure([5 * x + 2 * y], var_dom([(x, 4), (y, 3)]), check_level="surjective")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_modular_set.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_cast():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int8")
m = analyzer.modular_set((x * 3).astype("uint32"))
assert m.coeff == 3
assert m.base == 0
m = analyzer.modular_set((x * 3 + 1).astype("float32").astype("int32"))
assert m.coeff == 3
assert m.base == 1
def test_add_sub():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x", "int64"), te.var("y", "int64")
m = analyzer.modular_set(x * 6 + y * 4)
assert m.coeff == 2
assert m.base == 0
analyzer.bind(y, x * 4 + 1)
m = analyzer.modular_set(1 - y)
assert m.coeff == 4
assert m.base == 0
def test_mul():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set((x * 4 + 2) * (y * 6 + 1))
assert m.coeff == 4
assert m.base == 2
def test_floormod():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set(tvm.tir.floormod(x * 128 + y * 4, 256))
assert m.coeff == 4
assert m.base == 0
def test_div_shift():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
# not sure if x is non-negative
tdiv = tvm.tir.truncdiv
m = analyzer.modular_set(tdiv(x * 4 + 2, 2))
assert m.coeff == 1
assert m.base == 0
# right shift always round down so it is fine
m = analyzer.modular_set((x * 4 + 2) >> 1)
assert m.coeff == 2
assert m.base == 1
fld = tvm.te.floordiv
m = analyzer.modular_set(fld(x * 4 + 2, 2))
assert m.coeff == 2
assert m.base == 1
# x is non-negative
analyzer.update(x, tvm.arith.ConstIntBound(0, 100))
m = analyzer.modular_set(tdiv(x * 4 + 2, 2))
assert m.coeff == 2
assert m.base == 1
def test_mod():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
fmod = tvm.tir.floormod
# not sure if x is non-negative
m = analyzer.modular_set(tmod(x * 4 + 1, 4))
assert m.coeff == 1
assert m.base == 0
# no need to be positive if base == 0
m = analyzer.modular_set(tmod(x * 4, 4))
assert m.coeff == 4
assert m.base == 0
# floor mod tests
m = analyzer.modular_set(fmod(x * 4 + 3, 2))
assert m.coeff == 2
assert m.base == 1
m = analyzer.modular_set(fmod(x * 4 + 3, 8))
assert m.coeff == 4
assert m.base == 3
# x is non-negative
analyzer.update(x, tvm.arith.ConstIntBound(0, 100))
m = analyzer.modular_set(tmod(x * 4 + 3, 2))
assert m.coeff == 2
assert m.base == 1
def test_min_max_select():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set(tvm.te.min(x * 3, y * 9))
assert m.coeff == 3
assert m.base == 0
m = analyzer.modular_set(tvm.te.max(x * 3 + 1, y * 9 + 4))
assert m.coeff == 3
assert m.base == 1
m = analyzer.modular_set(tvm.tir.Select(x > 0, x * 3 + 1, y * 9 + 2))
assert m.coeff == 1
assert m.base == 0
def test_mix_index():
a = te.var("a")
b = te.var("b")
analyzer = tvm.arith.Analyzer()
tdiv = tvm.tir.truncdiv
m = analyzer.modular_set(a * 4 + b * 6 + 7)
assert m.coeff == 2
assert m.base == 1
m = analyzer.modular_set((a * 4 + 1) * (b * 8 + 3))
assert m.coeff == 4
assert m.base == 3
m = analyzer.modular_set(tdiv(a * 4 + 1, b * 8 + 3))
assert m.coeff == 1
assert m.base == 0
m = analyzer.modular_set((a * 4 + 1) * tdiv(b * 8, 4))
assert m.coeff == 2
assert m.base == 0
m = analyzer.modular_set((a * 12 + 1) - (b * 3 * 7 + 2))
assert m.coeff == 3
assert m.base == 2
m = analyzer.modular_set(a * 12 + tvm.te.min(b * 3 * 7, 2))
assert m.coeff == 1
assert m.base == 0
def test_constraint_scope():
a = te.var("a")
b = te.var("b")
analyzer = tvm.arith.Analyzer()
tmod = tvm.tir.truncmod
with analyzer.constraint_scope(tmod(b, 4) == 2):
m = analyzer.modular_set(b + 1)
assert m.coeff == 4
assert m.base == 3
with analyzer.constraint_scope(tmod(a, 2) == 1):
m = analyzer.modular_set(b + a * 2)
assert m.coeff == 4
assert m.base == 0
m = analyzer.modular_set(b + a * 2)
assert m.coeff == 2
assert m.base == 0
m = analyzer.modular_set(b + 1)
assert m.coeff == 1
assert m.base == 0
def test_intersect():
a = te.var("a")
analyzer = tvm.arith.Analyzer()
tmod = tvm.tir.truncmod
with analyzer.constraint_scope(tmod(a, 4) == 1):
with analyzer.constraint_scope(tmod(a, 3) == 1):
m = analyzer.modular_set(a)
assert m.coeff == 12
assert m.base == 1
with analyzer.constraint_scope(tmod(a, 3) == 2):
with analyzer.constraint_scope(tmod(a, 5) == 3):
with analyzer.constraint_scope(tmod(a, 7) == 2):
m = analyzer.modular_set(a)
assert m.coeff == 105
assert m.base == 23
def test_let():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
y = te.var("y")
m = analyzer.modular_set(tvm.tir.Let(x, y * 10, x + 1))
assert m.coeff == 10
assert m.base == 1
def test_bitwise_and():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
y = te.var("y")
# RHS of bitwise_and is 2^p - 1
m = analyzer.modular_set((x * 16 + y * 4) & 31)
assert m.coeff == 4
assert m.base == 0
# arbitrary RHS
m = analyzer.modular_set((x * 16 + y * 4) & 17)
assert m.coeff == 1
assert m.base == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_narrow_predicate_expression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import tir
from tvm.runtime import convert
i = tir.Var("i", "int32")
j = tir.Var("j", "int32")
n = tir.Var("n", "int32")
m = tir.Var("m", "int32")
b = tir.Var("b", "bool")
buf = tir.decl_buffer(16, "int32", "buf")
tir_false = tir.IntImm("bool", False)
tir_true = tir.IntImm("bool", True)
before, expected = tvm.testing.parameters(
# General arithmatic
[tir_true, tir_true],
[tir_false, tir_false],
[b, b],
[i > 5, i > 5],
[i > n, i > 7],
[i < n, i < 0],
[i <= n, i <= 0],
[i >= n, i >= 7],
[n > i, convert(0) > i],
[n < i, convert(7) < i],
[n <= i, convert(7) <= i],
[n >= i, convert(0) >= i],
[i == n, tir.all(i <= 0, convert(7) <= i)],
[n == i, tir.all(convert(7) <= i, i <= 0)],
[i != n, tir.any(i < 0, convert(7) < i)],
[n != i, tir.any(convert(7) < i, i < 0)],
[i // 4 > n, i // 4 > 7],
[n < i // 4, convert(7) < i // 4],
[(i + n) // 4 > 0, tir.Add(i, 0) // 4 > 0],
[(i + n) // 4 == 0, tir.all(tir.Add(i, 7) // 4 <= 0, convert(0) <= tir.Add(i, 0) // 4)],
[i + n < 10, i + 7 < 10],
[i - n < 10, tir.Sub(i, 0) < 10],
[tir.Not(i < n), tir.Not(i < 7)],
# Use of FloorMod should make the narrowing strategy bail out, as
# it is non-monotonic.
[i % 8 == n, tir_false],
# Ensure that dividing by a free parameter doesn't generate a
# divide-by-zero to be triggered later.
[i // n == 0, tir_false],
### Buffer handling
[buf.vload(0) > 0, tir_false],
[buf.vload(0) > i, tir_false],
[buf.vload(i) > 0, tir_false],
[tir.And(buf.vload(i) > 0, i <= 0), tir.And(tir_false, i <= 0)],
[tir.Or(buf.vload(i) > 0, i <= n), tir.Or(tir_false, i <= 0)],
[tir.Or(tir.Not(buf.vload(i) > 0), i <= n), tir.Or(tir_false, i <= 0)],
)
def test_narrow_expression(before, expected):
ranges = {n: tvm.ir.Range(0, 8)}
after = tvm.arith._ffi_api.NarrowPredicateExpression(before, ranges)
if expected is None:
assert after is None
else:
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_rewrite_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
class RewriteChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, expected):
res = self.analyzer.rewrite_simplify(data)
assert tvm.ir.structural_equal(res, expected), "data={}, res={}, expected={}".format(
data, res, expected
)
def test_vector_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
# Add rules
ck.verify(tvm.tir.Ramp(x, 1, 4) + tvm.tir.Ramp(y, 2, 4), tvm.tir.Ramp(x + y, 3, 4))
ck.verify(tvm.tir.Ramp(x, 1, 2) + y, tvm.tir.Ramp(x + y, 1, 2))
ck.verify(y + tvm.tir.Ramp(x, 1, 2), tvm.tir.Ramp(y + x, 1, 2))
ck.verify(y.astype("int32x2") + x.astype("int32x2"), (y + x).astype("int32x2"))
ck.verify(tvm.tir.Broadcast(0, 4) + y, tvm.tir.Broadcast(y, 4))
ck.verify(
tvm.tir.Ramp(x, 1, 4).astype("float32x4") + tvm.tir.Broadcast(0.0, 4),
tvm.tir.Ramp(x, 1, 4).astype("float32x4"),
)
# Sub rules
ck.verify(tvm.tir.Ramp(x, 4, 4) - tvm.tir.Ramp(y, 2, 4), tvm.tir.Ramp(x - y, 2, 4))
ck.verify(tvm.tir.Ramp(x, 1, 2) - y, tvm.tir.Ramp(x - y, 1, 2))
ck.verify(y - tvm.tir.Ramp(x, 1, 2), tvm.tir.Ramp(y - x, -1, 2))
ck.verify(y.astype("int32x2") - x.astype("int32x2"), (y - x).astype("int32x2"))
# Mul rules
ck.verify(y.astype("int32x2") * x.astype("int32x2"), (y * x).astype("int32x2"))
ck.verify(tvm.tir.Ramp(x, 4, 4) * 2, tvm.tir.Ramp(x * 2, 8, 4))
ck.verify(2 * tvm.tir.Ramp(x, 4, 4), tvm.tir.Ramp(x * 2, 8, 4))
ck.verify(tvm.tir.Broadcast(0, 4) * x, tvm.tir.Broadcast(0, 4))
ck.verify(tvm.tir.Broadcast(0.0, 4) * x, tvm.tir.Broadcast(0.0, 4))
## DivMod rules
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# truc div
ck.verify(tdiv(y.astype("int32x2"), x.astype("int32x2")), tdiv(y, x).astype("int32x2"))
ck.verify(tdiv(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Ramp(tdiv(x, 2), 2, 4))
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(tdiv(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), (x).astype("int32x4"))
ck.verify(tdiv(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8), tdiv(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8))
# truc mod
ck.verify(tmod(y.astype("int32x2"), x.astype("int32x2")), tmod(y, x).astype("int32x2"))
ck.verify(tmod(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Broadcast(tmod(x, 2), 4))
ck.verify(tmod(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), tvm.tir.Ramp(1, 1, 4))
ck.verify(tmod(tvm.tir.Ramp(x * 8 + 1, 15, 4), 8), tmod(tvm.tir.Ramp(1, 15, 4), 8))
# floor div
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck.analyzer.update(x, tvm.arith.ConstIntBound(-10, 1000), override=True)
ck.verify(fld(y.astype("int32x2"), x.astype("int32x2")), fld(y, x).astype("int32x2"))
ck.verify(fld(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Ramp(fld(x, 2), 2, 4))
ck.verify(fld(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), (x).astype("int32x4"))
ck.verify(fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8), fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8))
ck.verify(fld(tvm.tir.Ramp(x, 8, 5), tvm.tir.Broadcast(4, 5)), tvm.tir.Ramp(fld(x, 4), 2, 5))
ck.verify(
fld(tvm.tir.Ramp(flm(x * 4, 256), 1, 4), tvm.tir.Broadcast(8, 4)),
tvm.tir.Broadcast(fld(flm(x * 4, 256), 8), 4),
)
ck.verify(
fld(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
)
ck.verify(fld(tvm.tir.Ramp(x * 8, 1, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Broadcast(x * 2, 4))
ck.verify(
fld(tvm.tir.Ramp(x * 8, 3, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x * 8, 3, 4), tvm.tir.Broadcast(4, 4)),
)
ck.verify(
fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), tvm.tir.Broadcast(4, 4)),
)
ck.verify(
fld(tvm.tir.Ramp(x * 4, 1, 4), tvm.tir.Broadcast(64, 4)), tvm.tir.Broadcast(fld(x, 16), 4)
)
ck.verify(
fld(tvm.tir.Ramp(x * 8, 2, 4), tvm.tir.Broadcast(64, 4)), tvm.tir.Broadcast(fld(x, 8), 4)
)
ck.verify(
fld(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
fld(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
) # Example negative case: x = 15; [60, 61, 62, 63, 64] / 64 = [0, 0, 0, 0, 1]
ck.verify(
fld(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
fld(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
) # Example negative case: x = 15; [63, 64, 65, 66] % 64 = [0, 1, 1, 1]
ck.verify(
fld(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
fld(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
) # Example negative case: x = 9; [63, 70, 77, 84] % 64 = [0, 1, 1, 1]
# floor mod
ck.verify(flm(y.astype("int32x2"), x.astype("int32x2")), flm(y, x).astype("int32x2"))
ck.verify(flm(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Broadcast(flm(x, 2), 4))
ck.verify(flm(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), tvm.tir.Ramp(1, 1, 4))
ck.verify(flm(tvm.tir.Ramp(x * 8 + 1, 15, 4), 8), flm(tvm.tir.Ramp(1, 15, 4), 8))
ck.verify(flm(tvm.tir.Ramp(x, 8, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Broadcast(flm(x, 4), 4))
ck.verify(
flm(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
flm(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
)
ck.verify(flm(tvm.tir.Ramp(x * 8, 1, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Ramp(0, 1, 4))
ck.verify(
flm(tvm.tir.Ramp(x * 8, 1, 5), tvm.tir.Broadcast(4, 5)),
flm(tvm.tir.Ramp(0, 1, 5), tvm.tir.Broadcast(4, 5)),
)
ck.verify(
flm(tvm.tir.Ramp(x * 8 + 7, 1, 4), tvm.tir.Broadcast(4, 4)),
flm(tvm.tir.Ramp(3, 1, 4), tvm.tir.Broadcast(4, 4)),
)
ck.verify(
flm(tvm.tir.Ramp(x * 4, 1, 4), tvm.tir.Broadcast(64, 4)), tvm.tir.Ramp(flm(x * 4, 64), 1, 4)
)
ck.verify(
flm(tvm.tir.Ramp(x * 8, 2, 4), tvm.tir.Broadcast(64, 4)), tvm.tir.Ramp(flm(x * 8, 64), 2, 4)
)
ck.verify(
flm(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
flm(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
) # Example negative case: x = 15; [60, 61, 62, 63, 64] % 64 = [60, 61, 62, 63, 0]
ck.verify(
flm(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
flm(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
) # Example negative case: x = 15; [63, 64, 65, 66] % 64 = [63, 0, 1, 2]
ck.verify(
flm(tvm.tir.Ramp(x * 2, 1, 8), tvm.tir.Broadcast(20, 8)),
flm(tvm.tir.Ramp(x * 2, 1, 8), tvm.tir.Broadcast(20, 8)),
) # Example negative case: x = 9; [18, 19, 20, ..., 25] % 20 = [18, 19, 0, 1, ..., 5]
ck.verify(
flm(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
flm(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
) # Example negative case: x = 9; [63, 70, 77, 84] % 64 = [63, 6, 13, 20]
# Min/Max rules
vx = te.var("vx", dtype="int32x2")
vc = te.var("vc", dtype="uint1")
ck.verify(
tvm.te.min(y.astype("int32x2"), x.astype("int32x2")), tvm.te.min(y, x).astype("int32x2")
)
ck.verify(
tvm.te.min(tvm.te.min(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.te.min(vx, tvm.te.min(y, x).astype("int32x2")),
)
ck.verify(
tvm.te.max(y.astype("int32x2"), x.astype("int32x2")), tvm.te.max(y, x).astype("int32x2")
)
ck.verify(
tvm.te.max(tvm.te.max(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.te.max(vx, tvm.te.max(y, x).astype("int32x2")),
)
## Logical rules
ck.verify(y.astype("int32x2").equal(x.astype("int32x2")), (y.equal(x)).astype("uint1x2"))
ck.verify(
tvm.tir.NE(y.astype("int32x2"), (x.astype("int32x2"))), (tvm.tir.NE(y, x)).astype("uint1x2")
)
ck.verify(y.astype("int32x2") > x.astype("int32x2"), (x < y).astype("uint1x2"))
ck.verify(y.astype("int32x2") >= x.astype("int32x2"), (x <= y).astype("uint1x2"))
ck.verify(y.astype("int32x2") < x.astype("int32x2"), (y < x).astype("uint1x2"))
ck.verify(y.astype("int32x2") <= x.astype("int32x2"), (y <= x).astype("uint1x2"))
ck.verify(
tvm.tir.And(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.tir.And(y <= x, vc)).astype("uint1x2"),
)
ck.verify(
tvm.tir.Or(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.tir.Or(y <= x, vc)).astype("uint1x2"),
)
def test_select_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
# Add rules
ck.verify(
tvm.tir.Select(x < 0, y, 0) + tvm.tir.Select(x < 0, 1, z), tvm.tir.Select(x < 0, y + 1, z)
)
ck.verify(
tvm.tir.Select(x < 0, y, 1) - tvm.tir.Select(x < 0, 1, z),
tvm.tir.Select(x < 0, y + (-1), 1 - z),
)
ck.verify(tvm.tir.Select(x < 0, y, z) - y, tvm.tir.Select(x < 0, 0, z - y))
ck.verify(tvm.tir.Select(x < 0, y, z) - z, tvm.tir.Select(x < 0, y - z, 0))
ck.verify(
tvm.te.min(tvm.tir.Select(x < 0, y, 0), tvm.tir.Select(x < 0, 1, z)),
tvm.tir.Select(x < 0, tvm.te.min(y, 1), tvm.te.min(0, z)),
)
ck.verify(
tvm.te.max(tvm.tir.Select(x < 0, y, 0), tvm.tir.Select(x < 0, 1, z)),
tvm.tir.Select(x < 0, tvm.te.max(y, 1), tvm.te.max(0, z)),
)
ck.verify(tvm.tir.Select(x * 3 + 1 != 0, y, z), y)
ck.verify(tvm.tir.Select(x * 3 + 1 == 0, y, z), z)
ck.verify(tvm.tir.Select(x > 0, y + 1, y + 1), y + 1)
def test_add_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck.verify(x + (y - x), y)
ck.verify(x - (y + 1) + (y + 1), x)
ck.verify((x - 10) + (10 - z), x - z)
ck.verify((x - y) + (z - x), z - y)
ck.verify(tvm.te.min(x, y - z) + z, tvm.te.min(x + z, y))
ck.verify(tvm.te.min(x - z, y) + z, tvm.te.min(x, y + z))
ck.verify(tvm.te.max(x, y - 10) + 10, tvm.te.max(x + 10, y))
ck.verify(tvm.te.max(x - 11, y) + 11, tvm.te.max(x, y + 11))
ck.verify(tvm.te.max(x, y * 2) + tvm.te.min(x, y * 2), x + y * 2)
ck.verify(tvm.te.min(x, y * 2) + tvm.te.max(x, y * 2), x + y * 2)
ck.verify(tvm.te.max(x, y + 2) + (-2), tvm.te.max(x + (-2), y))
ck.verify(tvm.te.min(x, y + 2) + (-2), tvm.te.min(x + (-2), y))
ck.verify(tvm.te.min(x + 2, y + 3) + (-2), tvm.te.min(x, y + 1))
ck.verify(tvm.te.max(0, 1 - x * 4) + x * 4, tvm.te.max(x * 4, 1))
ck.verify(tvm.te.max(2 - x * 4, 0) + x * 4, tvm.te.max(x * 4, 2))
ck.verify(tvm.te.min(0, 1 - x * 4) + x * 4, tvm.te.min(x * 4, 1))
ck.verify(tvm.te.min(2 - x * 4, 0) + x * 4, tvm.te.min(x * 4, 2))
ck.verify(x * y + x * 10, x * (y + 10))
ck.verify(y * x + x * 10, x * (y + 10))
ck.verify(y * x + 10 * x, x * (y + 10))
ck.verify(x * y + 10 * x, x * (y + 10))
ck.verify((2 * z) + tvm.te.min(x, y - (2 * z)), tvm.te.min(x + (z * 2), y))
ck.verify(y * x + x, x * (y + 1))
ck.verify(x * y + x, x * (y + 1))
ck.verify((x + 10) + 13, x + 23)
ck.verify((x + 10) + (13 + z), x + z + 23)
ck.verify(x * y + 10 * x, x * (y + 10))
ck.verify(y * x + x * 3, x * (y + 3))
ck.verify(x + 3 + y, x + y + 3)
ck.verify((3 - y) + x, x - y + 3)
# canonicalization
ck.verify(x + 2 + 3 + 4 + x, x * 2 + 9)
ck.verify(x + 2 + 3 + 4 + x * 3, x * 4 + 9)
# DivMod rules
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# truc div
ck.verify(y * tmod(x, 8) + 10 * tmod(x, 8), tmod(x, 8) * (y + 10))
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1, 1000), override=True)
ck.verify(tdiv(x, 8) * 8 + tmod(x, 8), x)
# floor div
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck.verify(y * flm(x, 8) + 10 * flm(x, 8), flm(x, 8) * (y + 10))
ck.verify(fld(x, 8) * 8 + flm(x, 8), x)
ck.verify(fld(flm(x, 2) + 7, 2) + fld(x, 2), fld(x + 7, 2))
def test_sub_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
a, b = tvm.tir.Any(), tvm.tir.Any()
ck.verify(x + y - y, x)
ck.verify(x + y - x, y)
ck.verify(x - (y + x), 0 - y)
ck.verify(x - (x + y), 0 - y)
ck.verify(tvm.te.min(x, y) - x, tvm.te.min(0, y - x))
ck.verify(tvm.te.min(x, y) - y, tvm.te.min(x - y, 0))
ck.verify(tvm.te.max(x, y) - x, tvm.te.max(0, y - x))
ck.verify(tvm.te.max(x, y) - y, tvm.te.max(x - y, 0))
ck.verify(x - tvm.te.min(x, y), tvm.te.max(0, x - y))
ck.verify(y - tvm.te.min(x, y), tvm.te.max(y - x, 0))
ck.verify(x - tvm.te.max(x, y), tvm.te.min(0, x - y))
ck.verify(y - tvm.te.max(x, y), tvm.te.min(y - x, 0))
# mul co-efficient foldng
ck.verify(x - x, 0)
ck.verify(a - a, 0)
ck.verify(a - b, a - b)
ck.verify(x * y - x, x * (y + (-1)))
ck.verify(x * y - 10 * x, x * (y + (-10)))
ck.verify(y * x - x * z, x * (y - z))
ck.verify(y * x - z * x, x * (y - z))
ck.verify(x + 10 - 20, x + (-10))
# 4-operands pattern
ck.verify((x + y) - (x + z), y - z)
ck.verify((y + x) - (x + z), y - z)
ck.verify((x + y) - (z + x), y - z)
ck.verify((y + x) - (z + x), y - z)
ck.verify(tvm.te.min(x + y, z) - x, tvm.te.min(y, z - x))
ck.verify(tvm.te.min(y + x, z) - x, tvm.te.min(y, z - x))
ck.verify(tvm.te.min(z, x + y) - x, tvm.te.min(z - x, y))
ck.verify(tvm.te.min(z, y + x) - x, tvm.te.min(z - x, y))
ck.verify(tvm.te.max(x + y, z) - x, tvm.te.max(y, z - x))
ck.verify(tvm.te.max(y + x, z) - x, tvm.te.max(y, z - x))
ck.verify(tvm.te.max(z, x + y) - x, tvm.te.max(z - x, y))
ck.verify(tvm.te.max(z, y + x) - x, tvm.te.max(z - x, y))
ck.verify(x - tvm.te.min(x + y, z), tvm.te.max(0 - y, x - z))
ck.verify(x - tvm.te.min(y + x, z), tvm.te.max(0 - y, x - z))
ck.verify(x - tvm.te.min(z, x + y), tvm.te.max(x - z, 0 - y))
ck.verify(x - tvm.te.min(z, y + x), tvm.te.max(x - z, 0 - y))
ck.verify(tvm.te.min(x, y) - tvm.te.min(y, x), 0)
ck.verify(tvm.te.max(x, y) - tvm.te.max(y, x), 0)
ck.verify(tvm.te.min(x, y) - tvm.te.min(x + 10, y + 10), -10)
ck.verify(tvm.te.min(x + 10, y + 1) - tvm.te.min(x, y - 9), 10)
# DivMod patterns
# truc div
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(x - tdiv(x, 3) * 3, tmod(x, 3))
ck.verify(tdiv(x + 5, 3) - tdiv(x, 3), tdiv(tmod(x, 3) + 5, 3))
ck.verify(tdiv(x + 5, 3) - tdiv(x + 1, 3), tdiv(tmod(x + 1, 3) + 4, 3))
ck.verify(y - tdiv(y, (-5)) * (-5), tmod(y, 5))
ck.verify(tdiv(y, 3) * 3 - y, 0 - tmod(y, 3))
ck.verify(y - tdiv(y - 6, 5) * 5, tmod(y + (-6), 5) + 6)
ck.verify(tdiv(y - 6, 5) * 5 - y, (-6) - tmod(y + (-6), 5))
ck.verify(y - tdiv(y + z, 5) * 5, tmod(y + z, 5) - z)
ck.verify(tdiv(y + z, 5) * 5 - y, z - tmod(y + z, 5))
ck.verify(y - tdiv(y - z, 5) * 5, tmod(y - z, 5) + z)
ck.verify(tdiv(y - z, 5) * 5 - y, 0 - tmod(y - z, 5) - z)
ck.verify(y * 3 - tdiv(y, 2) * 6, tmod(y, 2) * 3)
ck.verify(tdiv(y, 3) * 6 - y * 2, tmod(y, 3) * (-2))
ck.verify(y * 5 - tdiv(y + z, 2) * 10, (tmod(y + z, 2) - z) * 5)
ck.verify(y * 5 - tdiv(y - z, 2) * 10, (tmod(y - z, 2) + z) * 5)
ck.verify(tdiv(y + z, 3) * 6 - y * 2, (z - tmod(y + z, 3)) * 2)
ck.verify(tdiv(y - z, 3) * 6 - y * 2, (0 - tmod(y - z, 3) - z) * 2)
ck.verify(5 * y - tdiv(y + z, 2) * 10, (tmod(y + z, 2) - z) * 5)
ck.verify(5 * y - 10 * tdiv(y - z, 2), (tmod(y - z, 2) + z) * 5)
ck.verify(6 * tdiv(y + z, 3) - y * 2, (z - tmod(y + z, 3)) * 2)
ck.verify(tdiv(y - z, 3) * 6 - 2 * y, (0 - tmod(y - z, 3) - z) * 2)
# floor div
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1000, 1000), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(-1000, 1000), override=True)
ck.verify(x - fld(x, 3) * 3, flm(x, 3))
ck.verify(fld(x + 5, 3) - fld(x, 3), fld(flm(x, 3) + 5, 3))
ck.verify(fld(x + 5, 3) - fld(x + 2, 3), fld(flm(x + 2, 3), 3) + 1)
ck.verify(fld(y, 3) * 3 - y, 0 - flm(y, 3))
ck.verify(y - fld(y - 6, 5) * 5, flm(y + (-6), 5) + 6)
ck.verify(fld(y - 6, 5) * 5 - y, (-6) - flm(y + (-6), 5))
ck.verify(y - fld(y + z, 5) * 5, flm(y + z, 5) - z)
ck.verify(fld(y + z, 5) * 5 - y, z - flm(y + z, 5))
ck.verify(y - fld(y - z, 5) * 5, flm(y - z, 5) + z)
ck.verify(fld(y - z, 5) * 5 - y, 0 - flm(y - z, 5) - z)
ck.verify(y * 3 - fld(y, 2) * 6, flm(y, 2) * 3)
ck.verify(fld(y, 3) * 6 - y * 2, flm(y, 3) * (-2))
ck.verify(y * 5 - fld(y + z, 2) * 10, (flm(y + z, 2) - z) * 5)
ck.verify(y * 5 - fld(y - z, 2) * 10, (flm(y - z, 2) + z) * 5)
ck.verify(fld(y + z, 3) * 6 - y * 2, (z - flm(y + z, 3)) * 2)
ck.verify(fld(y - z, 3) * 6 - y * 2, (0 - flm(y - z, 3) - z) * 2)
ck.verify(5 * y - fld(y + z, 2) * 10, (flm(y + z, 2) - z) * 5)
ck.verify(5 * y - 10 * fld(y - z, 2), (flm(y - z, 2) + z) * 5)
ck.verify(6 * fld(y + z, 3) - y * 2, (z - flm(y + z, 3)) * 2)
ck.verify(fld(y - z, 3) * 6 - 2 * y, (0 - flm(y - z, 3) - z) * 2)
def test_mul_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck.verify((x + 2) * 3, x * 3 + 6)
ck.verify((x * 2) * 3, x * 6)
ck.verify(tvm.te.min(x, y) * tvm.te.max(x, y), x * y)
ck.verify(tvm.te.max(x, y) * tvm.te.min(x, y), x * y)
ck.verify((x - y) * (-2), (y - x) * 2)
def test_div_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
ck.verify(tdiv(x, x), 1)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.analyzer.update(z, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(tdiv(tdiv(x, 2), 3), tdiv(x, 6))
ck.verify(tdiv(tdiv(x, 2) + 1, 3), tdiv(x + 2, 6))
ck.verify(tdiv(x * 2, 4), tdiv(x, 2))
ck.verify(tdiv(x * 4, 2), x * 2)
ck.verify(tdiv(x * 4 + y, 2), x * 2 + tdiv(y, 2))
ck.verify(tdiv(tvm.te.min(x * 6, y), 2), tvm.te.min(x * 3, tdiv(y, 2)))
ck.verify(tdiv(tvm.te.max(x * 6, y), 2), tvm.te.max(x * 3, tdiv(y, 2)))
ck.verify(tdiv(y + x * 4, 2), tdiv(y, 2) + x * 2)
ck.verify(tdiv(tvm.te.min(y, x * 6), 2), tvm.te.min(tdiv(y, 2), x * 3))
ck.verify(tdiv(tvm.te.max(y, x * 6), 2), tvm.te.max(tdiv(y, 2), x * 3))
# 3-operands
ck.verify(tdiv(x * 6 + y + z, 2), x * 3 + tdiv(y + z, 2))
ck.verify(tdiv(x * 6 - y + (y + 3), 2), x * 3 + 1)
ck.verify(tdiv(x * 6 + (y + 3) - y, 2), x * 3 + 1)
ck.verify(tdiv(y + x * 6 + z, 2), x * 3 + tdiv(y + z, 2))
ck.verify(tdiv(x + 4, 2), tdiv(x, 2) + 2)
ck.verify(tdiv(x + y, x), tdiv(y, x) + 1)
ck.verify(tdiv(y + x, x), tdiv(y, x) + 1)
ck.verify(tdiv((x + y) + z, x), tdiv(y + z, x) + 1)
ck.verify(tdiv((y + x) + z, x), tdiv(y + z, x) + 1)
ck.verify(tdiv(y + (x + z), x), tdiv(y + z, x) + 1)
ck.verify(tdiv(y + (z + x), x), tdiv(y + z, x) + 1)
ck.verify(tdiv(x * y, y), x)
ck.verify(tdiv(y * x, y), x)
ck.verify(tdiv(x * z + y, z), x + tdiv(y, z))
ck.verify(tdiv(z * x + y, z), x + tdiv(y, z))
ck.verify(tdiv(y + x * z, z), tdiv(y, z) + x)
ck.verify(tdiv(y + z * x, z), tdiv(y, z) + x)
def test_floordiv_index_simplify():
# short name for floordiv
fld = tvm.te.floordiv
flm = tvm.te.floormod
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck.verify(fld(fld(x, 2), 3), fld(x, 6))
ck.verify(fld(fld(x, 2) + 1, 3), fld(x + 2, 6))
ck.verify(fld(x - flm(x, 21), 21), fld(x, 21))
ck.verify(fld(x * 2, 4), fld(x, 2))
ck.verify(fld(x * 4, 2), x * 2)
ck.verify(fld(x * 8 + 7, 16), fld(x, 2))
ck.verify(fld(x * 8 + 39, 16), fld(x, 2) + 2)
ck.verify(fld(x * 8 - 1, 16), fld(x * 8 + -1, 16))
ck.verify(fld(x * 8 - 9, 16), fld(x, 2) + -1)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 7), override=True)
ck.verify(fld(x * 360 + y, 16), x * 22)
ck.verify(fld(x * 360 + y, 25), x * 14)
ck.verify(fld(x * 360 - 8, 25), fld(x * 360 + -8, 25))
ck.verify(fld(x * 4 + y, 2), x * 2 + fld(y, 2))
ck.verify(fld(tvm.te.min(x * 6, y), 2), tvm.te.min(x * 3, fld(y, 2)))
ck.verify(fld(tvm.te.max(x * 6, y), 2), tvm.te.max(x * 3, fld(y, 2)))
ck.verify(fld(y + x * 4, 2), x * 2 + fld(y, 2))
ck.verify(fld(tvm.te.min(y, x * 6), 2), tvm.te.min(fld(y, 2), x * 3))
ck.verify(fld(tvm.te.max(y, x * 6), 2), tvm.te.max(fld(y, 2), x * 3))
# 3-operands
ck.verify(fld(x * 6 + y + z, 2), x * 3 + fld(y + z, 2))
ck.verify(fld(x * 6 - y + (y + 3), 2), x * 3 + 1)
ck.verify(fld(x * 6 + (y + 3) - y, 2), x * 3 + 1)
ck.verify(fld(y + x * 6 + z, 2), x * 3 + fld(y + z, 2))
ck.verify(fld(x + 4, 2), fld(x, 2) + 2)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(fld(x + y, x), fld(y, x) + 1)
ck.verify(fld(y + x, x), fld(y, x) + 1)
ck.verify(fld((x + y) + z, x), fld(y + z, x) + 1)
ck.verify(fld((y + x) + z, x), fld(y + z, x) + 1)
ck.verify(fld(y + (x + z), x), fld(y + z, x) + 1)
ck.verify(fld(y + (z + x), x), fld(y + z, x) + 1)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.analyzer.update(z, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.verify(fld(x * y, y), x)
ck.verify(fld(y * x, y), x)
ck.verify(fld(x * z + y, z), x + fld(y, z))
ck.verify(fld(z * x + y, z), x + fld(y, z))
ck.verify(fld(y + x * z, z), fld(y, z) + x)
ck.verify(fld(y + z * x, z), fld(y, z) + x)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 31), override=True)
ck.analyzer.update(z, tvm.arith.ConstIntBound(0, 3), override=True)
ck.verify(fld(x * 32 + y, 64), fld(x, 2))
ck.verify(fld(x * 128 + y * 4 + z, 512), fld(x, 4))
def test_mod_index_simplify():
ck = RewriteChecker()
x, y, nx, ny, z = te.var("x"), te.var("y"), te.var("nx"), te.var("ny"), te.var("z")
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1000), override=True)
ck.analyzer.update(nx, tvm.arith.ConstIntBound(-1000, 0), override=True)
ck.analyzer.update(ny, tvm.arith.ConstIntBound(-1000, 0), override=True)
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
ck.verify(tmod(x * 10, 2), 0)
ck.verify(tmod(x * 10 + y, 2), tmod(y, 2))
ck.verify(tmod(x + 10, 2), tmod(x, 2))
ck.verify(tmod(x + y * 10, 2), tmod(x, 2))
ck.verify(tmod(x * 10 + 1 + y * 2 + 2, 2), 1)
ck.verify(tmod(x * 10, -2), 0)
ck.verify(tmod(x * 10 + y, -2), tmod(y, 2))
ck.verify(tmod(x + 10, -2), tmod(x, 2))
ck.verify(tmod(x + y * 10, -2), tmod(x, 2))
ck.verify(tmod(x * 10 + 1 + y * 2 + 2, -2), 1)
ck.verify(tmod(x * (-10), 2), 0)
ck.verify(tmod(x * (-10) + y, 2), tmod(x * (-10) + y, 2))
ck.verify(tmod(x + (-10), 2), tmod(x + (-10), 2))
ck.verify(tmod(x + y * (-10), 2), tmod(x + y * (-10), 2))
ck.verify(tmod(x * (-10), -2), 0)
ck.verify(tmod(nx * 10, 2), 0)
ck.verify(tmod(nx * (-10) + y, 2), tmod(y, 2))
ck.verify(tmod(x + ny * (-10), 2), tmod(x, 2))
ck.verify(tmod(nx * (-10) + 1 + ny * (-2) + 2, 2), 1)
ck.verify(tmod(nx * 10, -2), 0)
ck.verify(tmod(nx * (-10) + y, -2), tmod(y, 2))
ck.verify(tmod(x + ny * (-10), -2), tmod(x, 2))
def test_floormod_index_simplify():
# short name for floordiv
flm = tvm.te.floormod
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck = RewriteChecker()
x, y, nx, ny, z = te.var("x"), te.var("y"), te.var("nx"), te.var("ny"), te.var("z")
ck.verify(flm(x * 10, 2), 0)
ck.verify(flm(x * 9600, 6400), flm(x * 3200, 6400))
ck.verify(flm(x * 10 + y, 2), flm(y, 2))
ck.verify(flm(x * 360 + y, 16), flm(x * 8 + y, 16))
ck.verify(flm(x + 10, 2), flm(x, 2))
ck.verify(flm(x + y * 10, 2), flm(x, 2))
ck.verify(flm(x + y * 360, 16), flm(x + y * 8, 16))
ck.verify(flm(x * 10 + 1 + y * 2 + 2, 2), 1)
ck.verify(flm(x * (-10), 2), 0)
ck.verify(flm(x * (-10) + y, 2), flm(y, 2))
ck.verify(flm(x + (-10), 2), flm(x, 2))
ck.verify(flm(x + y * (-10), 2), flm(x, 2))
ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 31), override=True)
ck.verify(flm(x * 32 + y, 64), flm(x, 2) * 32 + y)
ck.verify(flm(x * 32 - y, 64), flm(x * 32 - y, 64))
def test_min_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
fld = tvm.te.floordiv
flm = tvm.te.floormod
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# const int bound
ck.verify(tvm.te.min(tmod(x, 2), tmod(y, 2) + 10), tmod(x, 2))
ck.verify(tvm.te.min(flm(x, 2), flm(y, 2) + 10), flm(x, 2))
ck.verify(tvm.te.min(x + 1, x + 10), x + 1)
ck.verify(tvm.te.min(x + 111, x + 10), x + 10)
ck.verify(tvm.te.min(x + 1, x), x)
ck.verify(tvm.te.min(x, x + 2), x)
ck.verify(tvm.te.min(1 - x, 2 - x), 1 - x)
ck.verify(tvm.te.min(3 - x, 2 - x), 2 - x)
ck.verify(tvm.te.min(tvm.te.max(x, y), tvm.te.min(x, y)), tvm.te.min(x, y))
ck.verify(tvm.te.min(tvm.te.max(x, y), tvm.te.min(y, x)), tvm.te.min(x, y))
ck.verify(tvm.te.min(tvm.te.max(x, y), x), x)
ck.verify(tvm.te.min(tvm.te.max(y, x), x), x)
ck.verify(tvm.te.min(tvm.te.min(x, y), x), tvm.te.min(x, y))
ck.verify(tvm.te.min(tvm.te.min(x, y), y), tvm.te.min(x, y))
ck.verify(tvm.te.min(x, tvm.te.max(x, y)), x)
ck.verify(tvm.te.min(x, tvm.te.max(y, x)), x)
ck.verify(tvm.te.min(x, tvm.te.min(x, y)), tvm.te.min(x, y))
ck.verify(tvm.te.min(y, tvm.te.min(x, y)), tvm.te.min(x, y))
ck.verify(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), y), tvm.te.min(tvm.te.min(x, y), z))
ck.verify(
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), y),
tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2),
)
ck.verify(
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), z * 2), y),
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), z * 2),
)
ck.verify(tvm.te.min(tvm.te.max(x, y), tvm.te.max(x, z)), tvm.te.max(tvm.te.min(y, z), x))
ck.verify(tvm.te.min(tvm.te.max(x, y), tvm.te.max(z, x)), tvm.te.max(tvm.te.min(y, z), x))
ck.verify(tvm.te.min(tvm.te.max(y, x), tvm.te.max(x, z)), tvm.te.max(tvm.te.min(y, z), x))
ck.verify(tvm.te.min(tvm.te.max(y, x), tvm.te.max(z, x)), tvm.te.max(tvm.te.min(y, z), x))
ck.verify(tvm.te.min(y + x, z + x), tvm.te.min(y, z) + x)
ck.verify(tvm.te.min(y + x, x + z), tvm.te.min(y, z) + x)
ck.verify(tvm.te.min(x + y, z + x), tvm.te.min(y, z) + x)
ck.verify(tvm.te.min(x + y, x + z), tvm.te.min(y, z) + x)
ck.verify(tvm.te.min(x - y, x - z), x - tvm.te.max(y, z))
ck.verify(tvm.te.min(y - x, z - x), tvm.te.min(y, z) - x)
ck.verify(tvm.te.min(tvm.te.min(x, 1), 10), tvm.te.min(x, 1))
ck.verify(tvm.te.min(tvm.te.min(x, 11), 10), tvm.te.min(x, 10))
ck.verify(tvm.te.min(x * 3, 9), tvm.te.min(x, 3) * 3)
ck.verify(tvm.te.min(x * 2, 0), tvm.te.min(x, 0) * 2)
ck.verify(tvm.te.min(0 - x * 2, 0), tvm.te.max(x, 0) * -2)
ck.verify(tvm.te.min(3 - x, 2), 3 - tvm.te.max(x, 1))
ck.verify(tvm.te.min(x * (-2), -4), tvm.te.max(x, 2) * -2)
ck.verify(tvm.te.min(x * (-2), 4), tvm.te.max(x, -2) * -2)
ck.verify(tvm.te.min(x * (0), 4), 0)
ck.verify(tvm.te.min(x * (0), -4), -4)
# DivMod rules
# truc div
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000))
ck.verify(tvm.te.min(tdiv(x + 3, 4) * 4, x), x)
ck.verify(tvm.te.min(tdiv(x + 3, 4) * 4, tvm.te.max(x, 4)), tvm.te.max(x, 4))
ck.verify(tvm.te.min(x, tdiv(x + 3, 4) * 4), x)
ck.verify(tvm.te.min(tvm.te.max(x, 4), tdiv(x + 3, 4) * 4), tvm.te.max(x, 4))
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1000, 1000), True)
ck.verify(tvm.te.min(tdiv(x, 10), tdiv(y, 10)), tdiv(tvm.te.min(x, y), 10))
ck.verify(tvm.te.min(tdiv(x, (-10)), tdiv(y, (-10))), tdiv(tvm.te.max(x, y), (-10)))
# floor div
ck.analyzer.update(x, tvm.arith.ConstIntBound(-1000, 1000), True)
ck.verify(tvm.te.min(fld(x + 3, 4) * 4, x), x)
ck.verify(tvm.te.min(fld(x + 3, 4) * 4, tvm.te.max(x, 4)), tvm.te.max(x, 4))
ck.verify(tvm.te.min(x, fld(x + 3, 4) * 4), x)
ck.verify(tvm.te.min(x, fld(x, 4) * 4), fld(x, 4) * 4)
ck.verify(tvm.te.min(tvm.te.max(x, 4), fld(x + 3, 4) * 4), tvm.te.max(x, 4))
ck.verify(tvm.te.min(fld(x, 10), fld(y, 10)), fld(tvm.te.min(x, y), 10))
ck.verify(tvm.te.min(fld(x, (-10)), fld(y, (-10))), fld(tvm.te.max(x, y), (-10)))
def test_max_index_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
flm = tvm.te.floormod
fld = tvm.te.floordiv
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# const int bound
ck.verify(tvm.te.max(tmod(x, 2), tmod(y, 2) + 10), tmod(y, 2) + 10)
ck.verify(tvm.te.max(flm(x, 2), flm(y, 2) + 10), flm(y, 2) + 10)
ck.verify(tvm.te.max(x + 1, x + 10), x + 10)
ck.verify(tvm.te.max(x + 111, x + 10), x + 111)
ck.verify(tvm.te.max(x + 1, x), x + 1)
ck.verify(tvm.te.max(x, x + 2), x + 2)
ck.verify(tvm.te.max(1 - x, 2 - x), 2 - x)
ck.verify(tvm.te.max(3 - x, 2 - x), 3 - x)
ck.verify(tvm.te.max(tvm.te.min(x, y), tvm.te.max(x, y)), tvm.te.max(x, y))
ck.verify(tvm.te.max(tvm.te.min(x, y), tvm.te.max(y, x)), tvm.te.max(x, y))
ck.verify(tvm.te.max(tvm.te.min(x, y), x), x)
ck.verify(tvm.te.max(tvm.te.min(y, x), x), x)
ck.verify(tvm.te.max(tvm.te.max(x, y), x), tvm.te.max(x, y))
ck.verify(tvm.te.max(tvm.te.max(x, y), y), tvm.te.max(x, y))
ck.verify(tvm.te.max(x, tvm.te.min(x, y)), x)
ck.verify(tvm.te.max(x, tvm.te.min(y, x)), x)
ck.verify(tvm.te.max(x, tvm.te.max(x, y)), tvm.te.max(x, y))
ck.verify(tvm.te.max(y, tvm.te.max(x, y)), tvm.te.max(x, y))
ck.verify(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), y), tvm.te.max(tvm.te.max(x, y), z))
ck.verify(
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), y),
tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2),
)
ck.verify(
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), z * 2), y),
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), z * 2),
)
ck.verify(tvm.te.max(tvm.te.min(x, y), tvm.te.min(x, z)), tvm.te.min(tvm.te.max(y, z), x))
ck.verify(tvm.te.max(tvm.te.min(x, y), tvm.te.min(z, x)), tvm.te.min(tvm.te.max(y, z), x))
ck.verify(tvm.te.max(tvm.te.min(y, x), tvm.te.min(x, z)), tvm.te.min(tvm.te.max(y, z), x))
ck.verify(tvm.te.max(tvm.te.min(y, x), tvm.te.min(z, x)), tvm.te.min(tvm.te.max(y, z), x))
ck.verify(tvm.te.max(y + x, z + x), tvm.te.max(y, z) + x)
ck.verify(tvm.te.max(y + x, x + z), tvm.te.max(y, z) + x)
ck.verify(tvm.te.max(x + y, z + x), tvm.te.max(y, z) + x)
ck.verify(tvm.te.max(x + y, x + z), tvm.te.max(y, z) + x)
ck.verify(tvm.te.max(x - y, x - z), x - tvm.te.min(y, z))
ck.verify(tvm.te.max(y - x, z - x), tvm.te.max(y, z) - x)
ck.verify(tvm.te.max(tvm.te.max(x, 1), 10), tvm.te.max(x, 10))
ck.verify(tvm.te.max(tvm.te.max(x, 11), 10), tvm.te.max(x, 11))
ck.verify(tvm.te.max(x * 3, 9), tvm.te.max(x, 3) * 3)
ck.verify(tvm.te.max(3 - x, 1), 3 - tvm.te.min(x, 2))
ck.verify(tvm.te.max(x * 2, 0), tvm.te.max(x, 0) * 2)
ck.verify(tvm.te.max(0 - x * 2, 0), tvm.te.min(x, 0) * -2)
ck.verify(tvm.te.max(x * (-2), -4), tvm.te.min(x, 2) * -2)
ck.verify(tvm.te.max(x * (-2), 4), tvm.te.min(x, -2) * -2)
ck.verify(tvm.te.max(x * (0), 4), 4)
ck.verify(tvm.te.max(x * (0), -4), 0)
# DivMod rules
# truc div
ck.verify(tvm.te.max(tdiv(x, 10), tdiv(y, 10)), tdiv(tvm.te.max(x, y), 10))
ck.verify(tvm.te.max(tdiv(x, (-10)), tdiv(y, (-10))), tdiv(tvm.te.min(x, y), (-10)))
ck.verify(tvm.te.max(tdiv(x + 3, 4) * 4, x), tdiv(x + 3, 4) * 4)
# floordiv
ck.verify(tvm.te.max(fld(x, 10), fld(y, 10)), fld(tvm.te.max(x, y), 10))
ck.verify(tvm.te.max(fld(x, (-10)), fld(y, (-10))), fld(tvm.te.min(x, y), (-10)))
ck.verify(tvm.te.max(fld(x + 3, 4) * 4, x), fld(x + 3, 4) * 4)
ck.verify(tvm.te.max(fld(x, 4) * 4, x), x)
ck.verify(tvm.te.max(x, fld(x, 4) * 4), x)
def test_cmp_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
flm = tvm.te.floormod
fld = tvm.te.floordiv
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
# const int bound
ck.verify((tmod(x, 2) + 10).equal(0), tvm.tir.const(0, "bool"))
ck.verify(tvm.tir.NE(tmod(x, 2) + 10, 0), tvm.tir.const(1, "bool"))
ck.verify(tmod(x, 2) + 10 > 1, tvm.tir.const(1, "bool"))
ck.verify(tmod(x, 2) + 10 <= 1, tvm.tir.const(0, "bool"))
ck.verify(flm(x, 2) + 2 > 1, tvm.tir.const(1, "bool"))
ck.verify(flm(x, 2) + 10 <= 1, tvm.tir.const(0, "bool"))
ck.verify(x * 3 + 10 == 0, tvm.tir.const(0, "bool"))
ck.verify(x * 3 + 10 != 0, tvm.tir.const(1, "bool"))
# canonicalization
ck.verify((x - 10).equal(0), x.equal(10))
ck.verify((10 - x).equal(0), x.equal(10))
ck.verify((x * y).equal(0), tvm.tir.Or(x.equal(0), y.equal(0)))
# cmp bound
ck.verify(x + y < x + z, y < z)
ck.verify(x + y < z + x, y < z)
ck.verify(y + x < x + z, y < z)
ck.verify(y + x < z + x, y < z)
ck.verify(y - x < z - x, y < z)
ck.verify(x - y < x - z, z < y)
ck.verify(x < z + x, tvm.tir.LT(0, z))
ck.verify(x < x + z, tvm.tir.LT(0, z))
ck.verify(100 < x + 1, tvm.tir.LT(99, x))
ck.verify(1 < 100 - x, tvm.tir.LT(x, 99))
ck.verify(x * 3 < y * 3, x < y)
ck.verify(x * (-3) < y * (-3), y < x)
ck.verify(x * 3 >= y * 3, y <= x)
ck.verify(x * 4 >= 2, tvm.tir.LE(1, x))
ck.verify(x * 2 >= 50, tvm.tir.LE(25, x))
ck.verify(x * 4 <= 2, x <= 0)
ck.verify((0 - x * 3) <= 0, tvm.tir.LE(0, x))
ck.verify((0 - x * 3) >= 0, tvm.tir.LE(x, 0))
ck.verify(2 * x <= 0, x <= 0)
ck.verify(x * 2 >= 3, tvm.tir.LE(2, x))
ck.verify(x * 2 >= 2, tvm.tir.LE(1, x))
ck.verify(x * 2 >= 1, tvm.tir.LE(1, x))
ck.verify(x * 2 >= 0, tvm.tir.LE(0, x))
ck.verify(x * 2 >= -1, tvm.tir.LE(0, x))
ck.verify(x * 2 >= -2, tvm.tir.LE(-1, x))
ck.verify(x * 2 >= -3, tvm.tir.LE(-1, x))
ck.verify(x * 2 <= 3, tvm.tir.LE(x, 1))
ck.verify(x * 2 <= 2, tvm.tir.LE(x, 1))
ck.verify(x * 2 <= 1, tvm.tir.LE(x, 0))
ck.verify(x * 2 <= 0, tvm.tir.LE(x, 0))
ck.verify(x * 2 <= -1, tvm.tir.LE(x, -1))
ck.verify(x * 2 <= -2, tvm.tir.LE(x, -1))
ck.verify(x * 2 <= -3, tvm.tir.LE(x, -2))
ck.verify(x * (-2) >= 3, tvm.tir.LE(x, -2))
ck.verify(x * (-2) >= 2, tvm.tir.LE(x, -1))
ck.verify(x * (-2) >= 1, tvm.tir.LE(x, -1))
ck.verify(x * (-2) >= 0, tvm.tir.LE(x, 0))
ck.verify(x * (-2) >= -1, tvm.tir.LE(x, 0))
ck.verify(x * (-2) >= -2, tvm.tir.LE(x, 1))
ck.verify(x * (-2) >= -3, tvm.tir.LE(x, 1))
ck.verify(x * (-2) <= 3, tvm.tir.LE(-1, x))
ck.verify(x * (-2) <= 2, tvm.tir.LE(-1, x))
ck.verify(x * (-2) <= 1, tvm.tir.LE(0, x))
ck.verify(x * (-2) <= 0, tvm.tir.LE(0, x))
ck.verify(x * (-2) <= -1, tvm.tir.LE(1, x))
ck.verify(x * (-2) <= -2, tvm.tir.LE(1, x))
ck.verify(x * (-2) <= -3, tvm.tir.LE(2, x))
# DivMod rules
# truc div
ck.verify(tdiv(x, 2) < 3, x < 6)
ck.verify(3 < tdiv(x, 2), tvm.tir.LT(7, x))
ck.verify(tdiv(x, 3) >= 0, tvm.tir.LE(-2, x))
ck.verify(tdiv(x, 2) >= 1, tvm.tir.LE(2, x))
ck.verify(tdiv(x, 2) >= 0, tvm.tir.LE(-1, x))
ck.verify(tdiv(x, 2) >= -1, tvm.tir.LE(-3, x))
ck.verify(tdiv(x, 2) <= 1, tvm.tir.LE(x, 3))
ck.verify(tdiv(x, 2) <= 0, tvm.tir.LE(x, 1))
ck.verify(tdiv(x, 2) <= -1, tvm.tir.LE(x, -2))
ck.verify(tdiv(x, 4) * 4 < x, tvm.tir.LT(0, tmod(x, 4)))
ck.verify(tdiv(x, 4) * 4 >= x, tvm.tir.LE(tmod(x, 4), 0))
ck.verify(tdiv(x, 4) * 4 < x + y, tvm.tir.LT(0, tmod(x, 4) + y))
ck.verify(tdiv(x, 4) * 4 < x - y, tvm.tir.LT(y, tmod(x, 4)))
ck.verify(tdiv(x + 2, 4) * 4 >= x, tvm.tir.LE(tmod(x + 2, 4), 2))
ck.verify(tdiv(x + 2, 4) * 4 >= x + y, tvm.tir.LE(tmod(x + 2, 4) + y, 2))
ck.verify(tdiv(x + 2, 4) * 4 >= x - y, tvm.tir.LE(tmod(x + 2, 4) + (-2), y))
# floor div
ck.verify(fld(x, 2) < 3, x < 6)
ck.verify(3 < fld(x, 2), tvm.tir.LT(7, x))
ck.verify(-3 < fld(x, 2), tvm.tir.LT(-5, x))
ck.verify(fld(x, 3) >= 0, tvm.tir.LE(0, x))
ck.verify(fld(x, 2) >= 1, tvm.tir.LE(2, x))
ck.verify(fld(x, 2) >= 0, tvm.tir.LE(0, x))
ck.verify(fld(x, 2) >= -1, tvm.tir.LE(-2, x))
ck.verify(fld(x, 2) <= 1, tvm.tir.LE(x, 3))
ck.verify(fld(x, 2) <= 0, tvm.tir.LE(x, 1))
ck.verify(fld(x, 2) <= -1, tvm.tir.LE(x, -1))
ck.verify(fld(x, 4) * 4 < x, tvm.tir.LT(0, flm(x, 4)))
ck.verify(fld(x, 4) * 4 >= x, tvm.tir.EQ(flm(x, 4), 0))
ck.verify(fld(x, 4) * 4 < x + y, tvm.tir.LT(0, flm(x, 4) + y))
ck.verify(fld(x, 4) * 4 < x - y, tvm.tir.LT(y, flm(x, 4)))
ck.verify(fld(x + 2, 4) * 4 >= x, tvm.tir.LE(flm(x + 2, 4), 2))
ck.verify(fld(x + 2, 4) * 4 >= x + y, tvm.tir.LE(flm(x + 2, 4) + y, 2))
ck.verify(fld(x + 2, 4) * 4 >= x - y, tvm.tir.LE(flm(x + 2, 4) + (-2), y))
# End DivMod Rules
ck.verify(tvm.te.min(x, 11) < 10, x < 10)
ck.verify(tvm.te.min(x, 8) < 10, tvm.tir.const(1, "bool"))
ck.verify(tvm.te.max(8, x) > 10, tvm.tir.LT(10, x))
ck.verify(x + 1 < tvm.te.max(8, x), x < 7)
ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 10), override=True)
ck.analyzer.update(y, tvm.arith.ConstIntBound(-10, 0), override=True)
ck.analyzer.update(z, tvm.arith.ConstIntBound(-5, 5), override=True)
ck.verify(x < 11, tvm.tir.const(1, "bool"))
ck.verify(x <= 10, tvm.tir.const(1, "bool"))
ck.verify(z <= 5, tvm.tir.const(1, "bool"))
ck.verify(x + y <= 10, tvm.tir.const(1, "bool"))
ck.verify(x + y >= -10, tvm.tir.const(1, "bool"))
ck.verify(z - 5 <= y + 10, tvm.tir.const(1, "bool"))
ck.verify(tvm.tir.all(x > -1, z <= x + 5), tvm.tir.const(1, "bool"))
ck.verify(x * y <= 0, tvm.tir.const(1, "bool"))
ck.verify((x + 1) * (y - 1) < 0, tvm.tir.const(1, "bool"))
ck.verify(y * y >= 0, tvm.tir.const(1, "bool"))
ck.verify(x * 6 <= -3, tvm.tir.const(0, "bool"))
ck.verify(tmod(y - 1, 3) == 0, tmod(y + (-1), 3) == 0)
def test_logical_simplify():
ck = RewriteChecker()
x, y, z = te.var("x"), te.var("y"), te.var("z")
ck.verify(tvm.tir.And(tvm.tir.EQ(x, y), tvm.tir.NE(x, y)), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(tvm.tir.NE(x, y), tvm.tir.EQ(x, y)), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x > 1, tvm.tir.Not(x > 1)), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x <= y, y < x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(y < x, x <= y), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x < 1, 0 < x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x < 0, 1 < x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x < 1, 1 <= x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x <= 1, 1 < x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(1 <= x, x < 1), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(1 < x, x <= 1), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x <= 1, 2 <= x), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(2 <= x, x <= 1), tvm.tir.const(False, "bool"))
ck.verify(tvm.tir.And(x == 1, x != 2), x == 1)
ck.verify(tvm.tir.Or(tvm.tir.EQ(x, y), tvm.tir.NE(x, y)), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(tvm.tir.NE(x, y), tvm.tir.EQ(x, y)), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x > y, tvm.tir.Not(x > y)), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x <= y, y < x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(y < x, y >= x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x < 1, 0 < x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(0 < x, x < 1), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x < 1, 1 <= x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x <= 1, 1 < x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(1 <= x, x < 1), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(1 < x, x <= 1), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x <= 1, 2 <= x), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(2 <= x, x <= 1), tvm.tir.const(True, "bool"))
ck.verify(tvm.tir.Or(x != 1, x == 2), x != 1)
def test_let_simplify():
ck = RewriteChecker()
x, y = te.var("x"), te.var("y")
z = tvm.tir.Let(x, 1, x + 1)
ck.verify(z + z, 4)
def test_cast_simplify():
ck = RewriteChecker()
x = te.var("x")
dtypes = ["float32", "float16", "int32", "int8", "bool"]
for dtype1 in dtypes:
ck.verify(tvm.tir.Cast(dtype1, x - x), tvm.tir.const(0, dtype1))
ck.verify(tvm.tir.Cast(dtype1, x == x), tvm.tir.const(1, dtype1))
for dtype2 in dtypes:
for i in [0, 1, 2, 3]:
if i > 1 and (dtype1 == "bool" or dtype2 == "bool"):
continue
ck.verify(tvm.tir.Cast(dtype1, tvm.tir.const(i, dtype2)), tvm.tir.const(i, dtype1))
def test_shift_left_simplify():
ck = RewriteChecker()
z = tvm.tir.op.call_intrin("int32", "tir.shift_left", 1, 10)
ck.verify(z, tvm.tir.const(1 << 10, "int32"))
def test_div_zero_simplify():
ck = RewriteChecker()
ramp = tvm.tir.Ramp(1, 1, 2)
broadcast = tvm.tir.Broadcast(0, 2)
with pytest.raises(tvm.error.TVMError) as cm:
ck.analyzer.rewrite_simplify(tvm.tir.Div(ramp, broadcast))
assert "division by zero" in str(cm.execption)
with pytest.raises(tvm.error.TVMError) as cm:
ck.analyzer.rewrite_simplify(tvm.tir.Mod(ramp, broadcast))
assert "division by zero" in str(cm.execption)
with pytest.raises(tvm.error.TVMError) as cm:
ck.analyzer.rewrite_simplify(tvm.tir.FloorDiv(ramp, broadcast))
assert "division by zero" in str(cm.execption)
with pytest.raises(tvm.error.TVMError) as cm:
ck.analyzer.rewrite_simplify(tvm.tir.FloorMod(ramp, broadcast))
assert "division by zero" in str(cm.execption)
def test_sub_bufferload():
ck = RewriteChecker()
buf = tvm.tir.decl_buffer([1], dtype="float32")
load = tvm.tir.BufferLoad(buf, [0])
expr = load - load
ck.verify(expr, 0.0)
def test_if_then_else_simplify():
ck = RewriteChecker()
x = te.var("x", "int32")
z = tvm.tir.if_then_else(x < 5, tvm.tir.if_then_else(x > 1, 1, 0), 0)
ck.verify(z, tvm.tir.if_then_else(tvm.tir.And(tvm.tir.LT(x, 5), tvm.tir.LT(1, x)), 1, 0))
z = tvm.tir.if_then_else(x > 2, tvm.tir.if_then_else(x > 1, 1, 0), 0)
ck.verify(z, tvm.tir.if_then_else(tvm.tir.LT(2, x), 1, 0))
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_solve_linear_equations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import sys
import pytest
import tvm
from tvm import te, arith, ir, tir, testing
def test_solution_consistency():
seed = random.randrange(sys.maxsize)
print(
"\nThis test is intentionally non-deterministic, "
"if it fails please report it in github issue together with this seed {}\n".format(seed)
)
random.seed(seed)
def _check(num_vars, num_formulas, coef=(-5, 5), bounds=(-20, 20)):
variables = [te.var("x" + str(i)) for i in range(num_vars)]
relations = []
for i in range(num_formulas):
s1 = sum([v * random.randint(coef[0], coef[1]) for v in variables])
s1 += random.randint(coef[0], coef[1])
s2 = sum([v * random.randint(coef[0], coef[1]) for v in variables])
s2 += random.randint(coef[0], coef[1])
if random.random() < 0.7:
op = tvm.tir.EQ
else:
# we also make sure it can correctly handle inequalities
op = random.choice([tvm.tir.LE, tvm.tir.LT, tvm.tir.GE, tvm.tir.GT])
relations.append(op(s1, s2))
vranges = {v: tvm.ir.expr.Range(bounds[0], bounds[1] + 1) for v in variables}
solution = arith.solve_linear_equations(relations, variables, vranges)
testing.check_int_constraints_trans_consistency(solution)
# leaving some variables as parameters should also be ok
for k in [1, 2]:
if len(variables) > k:
solution = arith.solve_linear_equations(relations, variables[:-k], vranges)
param_ranges = {v: vranges[v] for v in variables[-k:]}
testing.check_int_constraints_trans_consistency(solution, param_ranges)
for i in range(2):
_check(num_vars=1, num_formulas=1)
for i in range(2):
_check(num_vars=1, num_formulas=2)
for i in range(2):
_check(num_vars=2, num_formulas=1)
for i in range(2):
_check(num_vars=2, num_formulas=2)
for i in range(2):
_check(num_vars=2, num_formulas=3)
for i in range(3):
_check(num_vars=3, num_formulas=3, coef=(-2, 2))
for i in range(3):
_check(num_vars=3, num_formulas=4, coef=(-2, 2))
for i in range(3):
_check(num_vars=4, num_formulas=3, coef=(-1, 1))
for i in range(3):
_check(num_vars=10, num_formulas=2, coef=(-1, 1), bounds=(0, 4))
for i in range(3):
_check(num_vars=10, num_formulas=3, coef=(0, 1), bounds=(0, 4))
def test_empty_var_to_solve():
x, y = te.var("x"), te.var("y")
equations = [
tvm.tir.EQ(x + y, 20),
tvm.tir.EQ(x - y, 10),
]
solution = arith.solve_linear_equations(equations)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
assert len(solution.src.variables) == 0
assert len(solution.src.ranges) == 0
assert ir.structural_equal(solution.src.relations, equations)
assert ir.structural_equal(solution.src, solution.dst)
def test_unique_solution():
x, y = te.var("x"), te.var("y")
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 20),
tvm.tir.EQ(x - y, 10),
],
[x, y],
)
assert list(solution.dst.variables) == []
assert ir.structural_equal(solution.src_to_dst[x], 15)
assert ir.structural_equal(solution.src_to_dst[y], 5)
def test_low_rank():
x, y, z = te.var("x"), te.var("y"), te.var("z")
ranges = {}
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y + z, 15),
tvm.tir.EQ(x + y, 10),
],
[x, y, z],
ranges,
)
[n0] = solution.dst.variables
assert ir.structural_equal(solution.src_to_dst[x], n0 + 10)
assert ir.structural_equal(solution.src_to_dst[y], -n0)
assert ir.structural_equal(solution.src_to_dst[z], 5)
def test_infer_range():
x, y = te.var("x"), te.var("y")
ranges = {
x: tvm.ir.Range.from_min_extent(-5, 10),
y: tvm.ir.Range.from_min_extent(0, 10),
}
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 0),
],
[x, y],
ranges,
)
[n0] = solution.dst.variables
assert ir.structural_equal(solution.src_to_dst[x], n0)
assert ir.structural_equal(solution.src_to_dst[y], -n0)
# inferred from y's range
assert ir.structural_equal(solution.dst.ranges[n0].min, -9)
assert ir.structural_equal(solution.dst.ranges[n0].extent, 10)
# additional inequality is added into the system for x
[ineq] = solution.dst.relations
assert isinstance(ineq, tvm.tir.LE)
assert ir.structural_equal(ineq.a, -5)
assert ir.structural_equal(ineq.b, n0)
def test_ill_formed():
x, y = te.var("x"), te.var("y")
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 0),
tvm.tir.EQ(x - y, 0),
tvm.tir.EQ(x, 5),
],
[x, y],
{},
)
assert list(solution.dst.variables) == []
[rel] = solution.dst.relations
assert ir.structural_equal(rel, False)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_arith_solve_linear_inequality.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import sys
import pytest
import tvm
from tvm import te, arith, ir, tir, testing
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11458")
def test_solution_consistency():
seed = random.randrange(sys.maxsize)
print(
"\nThis test is intentionally non-deterministic, "
"if it fails please report it in github issue together with this seed {}\n".format(seed)
)
random.seed(seed)
def _check(variables, formulas, coef=(-5, 5), bounds=(-20, 20)):
vs = [te.var("x" + str(i)) for i in range(variables)]
fs = []
for i in range(formulas):
s1 = sum([v * random.randint(coef[0], coef[1]) for v in vs])
s1 += random.randint(coef[0], coef[1])
s2 = sum([v * random.randint(coef[0], coef[1]) for v in vs])
s2 += random.randint(coef[0], coef[1])
op = random.choice([tir.expr.EQ, tir.expr.LE, tir.expr.LT, tir.expr.GE, tir.expr.GT])
fs.append(op(s1, s2))
vranges = {v: tvm.ir.expr.Range(bounds[0], bounds[1] + 1) for v in vs}
before = te.all(tir.const(1, "bool"), *fs)
after = arith._ffi_api.SolveInequalitiesAsCondition(vs, vranges, fs)
after = te.all(tir.const(1, "bool"), *after)
testing.check_bool_expr_is_true(before == after, vranges)
solution = arith.solve_linear_inequalities(fs, vs, vranges, deskew_range=True)
testing.check_int_constraints_trans_consistency(solution)
for i in range(3):
_check(1, 1)
for i in range(3):
_check(1, 2)
for i in range(3):
_check(2, 1)
for i in range(3):
_check(2, 2)
for i in range(3):
_check(2, 3)
# Somewhere here coefficients in the results become too large, leading to overflow,
# so we use smaller initial coefficients
for i in range(5):
_check(3, 3, coef=(-2, 2))
for i in range(5):
_check(3, 4, coef=(-2, 2))
for i in range(5):
_check(4, 3, coef=(-1, 1))
for i in range(5):
_check(10, 2, coef=(-1, 1), bounds=(0, 4))
for i in range(5):
_check(10, 3, coef=(0, 1), bounds=(0, 4))
def test_dual_variable():
x, y = te.var("x"), te.var("y")
variables = [x, y]
ranges = {
x: tvm.ir.Range(-100, 100),
y: tvm.ir.Range(0, 10),
}
problem = [
tvm.tir.LE(x + y, 20),
tvm.tir.GE(x - y, 10),
]
# solution as conditions
solution = arith._ffi_api.SolveInequalitiesAsCondition(variables, ranges, problem)
assert ir.structural_equal(solution[0], x >= (y + 10))
assert ir.structural_equal(solution[1], x <= (20 - y))
assert ir.structural_equal(solution[2], y >= 0)
assert ir.structural_equal(solution[3], y <= 5)
# solve and get the ranges
solution = arith.solve_linear_inequalities(problem, variables, ranges)
# 0 <= y <=5
assert solution.ranges[y].min == 0
assert solution.ranges[y].extent == 6
# y + 10 <= x <= 20 - y
assert ir.structural_equal(solution.ranges[x].min, y + 10)
assert solution.ranges[x].extent == 11 # max(10 - 2y)
# deskew the solved ranges to be starting from zero
solution = arith.solve_linear_inequalities(problem, variables, ranges, deskew_range=True)
[x_new, y_new] = solution.dst.variables
[rel] = solution.dst.relations
assert ir.structural_equal(rel, (y_new * 2) + x_new <= 10)
assert ir.structural_equal(solution.dst.ranges[x_new].min, 0)
assert ir.structural_equal(solution.dst.ranges[x_new].extent, 11)
assert ir.structural_equal(solution.dst.ranges[y_new].min, 0)
assert ir.structural_equal(solution.dst.ranges[y_new].extent, 6)
assert ir.structural_equal(solution.src_to_dst[x], x_new + (y_new + 10))
assert ir.structural_equal(solution.src_to_dst[y], y_new)
assert ir.structural_equal(solution.dst_to_src[x_new], x - y - 10)
assert ir.structural_equal(solution.dst_to_src[y_new], y)
def test_equal():
x, y = te.var("x"), te.var("y")
problem = [
tvm.tir.GE(x + y, 10),
tvm.tir.GE(x - y, 2),
tvm.tir.LE(x, 6),
]
solution = arith.solve_linear_inequalities(problem, [x, y])
assert solution.ranges[x].min == 6
assert solution.ranges[x].extent == 1
assert solution.ranges[y].min == 4
assert solution.ranges[y].extent == 1
solution = arith.solve_linear_inequalities(problem, [x, y], deskew_range=True)
assert len(solution.dst.variables) == 0
assert len(solution.dst.ranges) == 0
assert len(solution.dst.relations) == 0
assert solution.src_to_dst[x] == 6
assert solution.src_to_dst[y] == 4
def test_multi_equal():
x, y, z = te.var("x"), te.var("y"), te.var("z")
problem = [
tvm.tir.LE(x, 6),
tvm.tir.GE(x, 6),
tvm.tir.GE(x - z * y, 0),
tvm.tir.LE(x - z * y, 0),
]
solution = arith.solve_linear_inequalities(problem, [x, y, z])
assert solution.ranges[x].min == 6
assert solution.ranges[x].extent == 1
assert len(solution.relations) == 3
assert ir.structural_equal(solution.relations[0], x == z * y)
assert isinstance(solution.relations[1], tvm.tir.LE)
assert solution.relations[1].b == 0
assert isinstance(solution.relations[2], tvm.tir.LE)
assert solution.relations[2].b == 0
# (z*y - 6) <= 0 && (6 - z*y) <= 0
ana = tvm.arith.Analyzer()
assert ana.simplify(solution.relations[1].a + solution.relations[2].a) == 0
assert ir.structural_equal(solution.relations[1].a, (z * y - 6)) or ir.structural_equal(
solution.relations[2].a, (z * y - 6)
)
solution = arith.solve_linear_inequalities(problem, [x, y, z], deskew_range=True)
assert solution.src_to_dst[y] == y
assert solution.src_to_dst[z] == z
assert solution.src_to_dst[x] == 6
def test_no_solution():
x = te.var("x0")
vranges = {x: tvm.ir.Range.from_min_extent(-20, 41)}
problem = [-x - 4 <= -5 * x + 2, x * 4 + 5 <= x * 5]
solution = arith.solve_linear_inequalities(problem, [x], vranges, deskew_range=True)
assert list(solution.dst.variables) == []
[rel] = solution.dst.relations
assert ir.structural_equal(rel, False)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
solution = arith.solve_linear_inequalities(problem, [x], vranges)
assert len(solution.variables) == 0
assert len(solution.ranges) == 0
[rel] = solution.relations
assert not rel
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_compute_dag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test ComputeDAG (replay, infer bound)"""
import json
import pickle
import tvm
from tvm import topi
from tvm import auto_scheduler, te
from tvm.testing.auto_scheduler import (
get_tiled_matmul,
invalid_compute_definition,
matmul_auto_scheduler_test,
parallel_matmul_auto_scheduler_test,
)
def test_apply_steps():
dag, s = get_tiled_matmul()
dag.print_python_code_from_state(s)
sch, tensors = dag.apply_steps_from_state(s)
tvm.lower(sch, tensors, simple_mode=True)
def test_infer_bound():
dag, s = get_tiled_matmul()
s = dag.infer_bound_from_state(s)
def test_estimate_flop():
N = 512
A, B, C = matmul_auto_scheduler_test(N, N, N)
dag = auto_scheduler.ComputeDAG([A, B, C])
assert abs(dag.flop_ct - 2 * N**3) < 0.5
D = topi.nn.relu(C)
dag = auto_scheduler.ComputeDAG([A, B, D])
assert abs(dag.flop_ct - (2 * N**3 + N * N)) < 0.5
# should not count the comparison operations in padding
E = topi.nn.pad(C, [1, 1])
dag = auto_scheduler.ComputeDAG([A, B, E])
assert abs(dag.flop_ct - 2 * N**3) < 0.5
F = te.compute((N, N), lambda i, j: E[i, j], name="F", attrs={"FLOP": 1234})
dag = auto_scheduler.ComputeDAG([A, B, F])
assert abs(dag.flop_ct - (2 * N**3 + 1234)) < 0.5
A = te.placeholder((N, N), dtype="float32", name="A")
F = te.compute((N, N), lambda i, j: te.if_then_else(A[i, j] > 0, A[i, j], 0))
dag = auto_scheduler.ComputeDAG([A, F])
assert abs(dag.flop_ct - N**2) < 0.5
def test_stage_order():
"""Test if the stage order is preserved when recovering a DAG."""
N = 512
A, B, C, D, E = parallel_matmul_auto_scheduler_test(N)
sch = te.create_schedule([D.op, E.op])
(D_local,) = sch.cache_write([D], "local")
(E_local,) = sch.cache_write([E], "local")
sch.cache_read(A, "shared", [D_local])
sch.cache_read(B, "shared", [D_local])
sch.cache_read(A, "shared", [E_local])
sch.cache_read(C, "shared", [E_local])
dag = auto_scheduler.ComputeDAG(sch)
stage_ops_1 = dag.get_init_state().stage_ops
# 3 placeholder, 4 x.shared, 2 {D,E}.local, 2 {D,E} compute
assert len(stage_ops_1) == 11
# Cache read stage should follow the source stage
for idx, op in enumerate(stage_ops_1):
if op.name == "A":
assert (
stage_ops_1[idx + 1].name == "A.d.shared"
and stage_ops_1[idx + 2].name == "A.shared"
)
elif op.name in ["B", "C"]:
assert stage_ops_1[idx + 1].name == "%s.shared" % op.name
# Apply the same schedule to Ansor state and it should have the same stage order
dag = auto_scheduler.ComputeDAG([A, B, C, D, E])
state = dag.get_init_state()
D_local = state.cache_write(D, "local")
E_local = state.cache_write(E, "local")
state.cache_read(A, "shared", [D_local])
state.cache_read(B, "shared", [D_local])
state.cache_read(A, "shared", [E_local])
state.cache_read(C, "shared", [E_local])
stage_ops_2 = state.stage_ops
assert len(stage_ops_1) == len(stage_ops_2)
# Cache read stage should follow the source stage
for op1, op2 in zip(stage_ops_1, stage_ops_2):
assert op1.name == op2.name
# Serialize and deserialize the ComputeDAG constructed by a list of tensor ops.
loaded_dag = pickle.loads(pickle.dumps(dag))
assert str(loaded_dag.get_init_state()) == str(dag.get_init_state())
assert len(loaded_dag.get_init_state().stage_ops) == len(dag.get_init_state().stage_ops)
# Serialize and deserialize the search task. Note that we intentionally skip hardware_params
# to test if the default one is serialized along with other attributes as well.
task = auto_scheduler.SearchTask(
compute_dag=dag, workload_key=json.dumps(("test-key",)), target=tvm.target.Target("llvm")
)
task2 = pickle.loads(pickle.dumps(task))
assert '["test-key"]' in auto_scheduler.workload_registry.WORKLOAD_FUNC_REGISTRY
assert str(task.compute_dag.get_init_state()) == str(task2.compute_dag.get_init_state())
assert len(task.compute_dag.get_init_state().stage_ops) == len(
task2.compute_dag.get_init_state().stage_ops
)
assert task.workload_key == task2.workload_key
assert str(task.target) == str(task2.target)
assert task.hardware_params.num_cores == task2.hardware_params.num_cores
assert task.hardware_params.vector_unit_bytes == task2.hardware_params.vector_unit_bytes
assert task.hardware_params.cache_line_bytes == task2.hardware_params.cache_line_bytes
def test_invalid_compute_dag():
failed = False
try:
A, B = invalid_compute_definition()
auto_scheduler.ComputeDAG([A, B])
except tvm.TVMError:
failed = True
assert failed
if __name__ == "__main__":
test_apply_steps()
test_infer_bound()
test_estimate_flop()
test_stage_order()
test_invalid_compute_dag()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test cost models"""
import tempfile
import numpy as np
import tvm
from tvm import auto_scheduler
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
def get_sample_records(number):
"""Generate a list of random MeasureInput and MeasureResult pairs"""
N = 128
task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target="llvm")
policy = auto_scheduler.SketchPolicy(task, verbose=0)
states = policy.sample_initial_population()[:number]
inputs = [auto_scheduler.MeasureInput(task, s) for s in states]
results = [
auto_scheduler.MeasureResult([np.random.uniform(0.5, 1.0)], 0, "", 0.1, 0)
for _ in range(len(inputs))
]
return task, inputs, results
def test_random_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.RandomModel()
model.update(inputs, results)
scores = model.predict(task, [x.state for x in inputs])
assert len(scores) == len(inputs)
def test_xgb_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.XGBModel(num_warmup_sample=-1)
model.update(inputs, results)
preds = model.predict(task, [x.state for x in inputs])
assert len(preds) == len(inputs)
costs = [np.mean([x.value for x in res.costs]) for res in results]
throughputs = np.min(costs) / costs
# test regression quality
rmse = np.sqrt(np.mean([np.square(pred - label) for pred, label in zip(preds, throughputs)]))
assert rmse <= 0.3
# test loading a record file
tmpdir = tvm.contrib.utils.tempdir()
tmpfile = tmpdir.relpath("test1")
auto_scheduler.save_records(tmpfile, inputs, results)
model.update_from_file(tmpfile)
# test model serialization
tmpfile = tmpdir.relpath("test2")
model.save(tmpfile)
model.load(tmpfile)
if __name__ == "__main__":
test_random_model()
test_xgb_model()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_evolutionary_search.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test evolutionary search. """
import tvm
import pytest
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
from tvm import auto_scheduler, te
from tvm.auto_scheduler.cost_model.cost_model import PythonBasedModel
def test_mutate_tile_size():
"""
The test case initializes evo search with a batch of "bad" states and check whether
the search algorithm can find "good" states by mutating the "bad" states.
This unit test has been tested with 1,000 runs with no failures, meaning that
the failure rate is less than 0.1%.
"""
class MockCostModel(PythonBasedModel):
"""A mock cost model that rates 1 only for the states with tile_k=2."""
@staticmethod
def is_good_state(state):
for line in str(state).split("\n"):
if line.find("k.1") != -1 and line.find("(0,2)") != -1:
return True
return False
def predict(self, task, states):
scores = []
for state in states:
scores.append(1 if self.is_good_state(state) else 0)
return scores
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(10, 10, 4), target=tvm.target.Target("llvm")
)
policy = auto_scheduler.SketchPolicy(task, program_cost_model=MockCostModel(), verbose=0)
states = policy.sample_initial_population()[:50]
bad_states = []
for state in states:
if not MockCostModel.is_good_state(state):
bad_states.append(state)
new_states = policy.evolutionary_search(bad_states, 50)
found = False
for state in new_states:
if MockCostModel.is_good_state(state):
found = True
break
assert found
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11440")
def test_mutate_parallel():
"""
The test case initializes evo search with a batch of "bad" states and check whether
the search algorithm can find "good" states by mutating the "bad" states.
"""
class MockCostModel(PythonBasedModel):
@staticmethod
def is_good_state(state):
for line in str(state).split("\n"):
if (
line.find("parallel i.0@ (0") != -1
or line.find("parallel [email protected]@ (0") != -1
or line.find("parallel [email protected]@i.1@ (0") != -1
):
return True
return False
def predict(self, task, states):
scores = []
for state in states:
scores.append(1 if self.is_good_state(state) else 0)
return scores
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(1024, 1024, 1024), target="llvm"
)
policy = auto_scheduler.SketchPolicy(task, program_cost_model=MockCostModel(), verbose=0)
found = False
retry_ct = 0
while retry_ct < 10 and not found:
states = policy.sample_initial_population()[:100]
bad_states = []
for state in states:
if not MockCostModel.is_good_state(state):
bad_states.append(state)
new_states = policy.evolutionary_search(bad_states, 50)
for state in new_states:
if MockCostModel.is_good_state(state):
found = True
break
retry_ct += 1
assert found
if __name__ == "__main__":
test_mutate_tile_size()
test_mutate_parallel()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test feature extraction"""
import math
import tempfile
import tvm
from tvm import te, auto_scheduler, relay
from tvm.script import tir as T
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
def fequal(a, b):
return math.fabs(a - b) < 1e-6
def test_cpu_matmul():
dag = auto_scheduler.ComputeDAG(matmul_auto_scheduler_test(512, 512, 512))
s = dag.get_init_state()
C = s.stage_ops[2]
i, j, k = s[C].iters
io, ii = s.split(C, i, [16])
jo, ji = s.split(C, j, [8])
s.reorder(C, [io, jo, k, ji, ii])
s.vectorize(C, ji)
s.parallel(C, io)
s.parallel(C, jo)
s.unroll(C, k)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
stage_0 = fea[0]
assert len(stage_0) == len(names), "%d vs %d" % (len(stage_0), len(names))
fea_dict = {}
for name, value in zip(names, stage_0):
fea_dict[name] = value
for name in ["B0", "B1", "B2"]:
if fequal(fea_dict[name + ".acc_type.kReadWrite"], 1.0):
c_name = name
if fequal(fea_dict[name + ".acc_type.kRead"], 1.0):
if fequal(fea_dict[name + ".stride"], 0.0):
b_name = name
else:
a_name = name
"""
lowered IR:
Placeholder: A, B
parallel i.0 (0,32)
parallel j.0 (0,64)
unroll k (0,512)
vectorize j.1 (0,8)
for i.1 (0,16)
C...] = A[...] * B[...]
"""
# check touched memory in bytes, touched unique memory in bytes, reuse distance, etc.
assert fequal(fea_dict[c_name + ".bytes"], math.log2(512**3 * 4 + 1))
assert fequal(fea_dict[b_name + ".unique_bytes"], math.log2(512**2 * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_iter"], math.log2(8 * 16 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_bytes"], math.log2((8 * 16 + 8 + 16) * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_ct"], math.log2(512 + 1))
# check annotations
assert fequal(fea_dict["unroll_num"], math.log2(1 + 1))
# assert fequal(fea_dict["unroll_type.kPosInnerReduce"], 1.0)
assert fequal(fea_dict["vec_num"], math.log2(1 + 1))
assert fequal(fea_dict["parallel_num"], math.log2(2 + 1))
assert fequal(fea_dict["parallel_prod"], math.log2((512 * 512 / 16 / 8) + 1))
def test_cpu_fusion():
def fusion_test(N, M):
A = te.placeholder((N, M), name="A")
B = te.compute((N, M), lambda i, j: A[i][j], name="B")
C = te.compute((N, M), lambda i, j: B[i][j], name="C")
return [A, B, C]
dag = auto_scheduler.ComputeDAG(fusion_test(64, 32))
s = dag.get_init_state()
s.compute_at(1, 2, s.stages[2].iters[1])
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
"""
lowered IR:
Placeholder: A
for i (0,64)
for j (0,32)
for ii (1)
for jj (1)
B[...] = A[...]
C[...] = B[...]
"""
# check reuse distance and reuse type after fusion
found = False
for stage_fea in fea:
for i, (name, value) in enumerate(zip(names, stage_fea)):
if "reuse_type.kSerialMultipleReadWrite" in name and value > 0.5:
# reuse distance in #iter
assert fequal(stage_fea[i + 2], 1.0)
# reuse distance in bytes
assert fequal(stage_fea[i + 3], math.log2(16 + 1))
found = True
assert found
def test_gpu_feature():
# Use records to build a complicated GPU program
json_records = "\n".join(
(
"""{"i": [["[\\"matmul_auto_scheduler_test\\", 512, 512, 512]", "cuda"], [[], [["CHW", 2, "local"], ["SP", 2, 0, 512, [1, 16, 32, 1], 1], ["SP", 2, 5, 512, [4, 1, 1, 16], 1], ["SP", 2, 10, 512, [1, 2], 1], ["RE", 2, [0, 5, 1, 6, 2, 7, 10, 11, 3, 8, 12, 4, 9]], ["FSP", 3, 0, 1, 3], ["FSP", 3, 4, 2, 3], ["RE", 3, [0, 4, 1, 5, 2, 6, 3, 7]], ["FU", 2, [0, 1]], ["FU", 3, [0, 1]], ["FU", 2, [1, 2]], ["FU", 3, [1, 2]], ["FU", 2, [2, 3]], ["FU", 3, [2, 3]], ["CA", 2, 3, 2], ["CHR", 1, "shared", [2]], ["CA", 2, 3, 3], ["FU", 2, [0, 1]], ["FFSP", 2, 0, [1, 2], 1, 1], ["AN", 2, 1, 6], ["CHR", 0, "shared", [3]], ["CA", 1, 4, 3], ["FU", 1, [0, 1]], ["FFSP", 1, 0, [1, 2], 1, 1], ["AN", 1, 1, 6], ["AN", 5, 0, 5], ["AN", 5, 1, 4], ["AN", 5, 2, 6], ["PR", 4, 0, "auto_unroll_max_step$1024"]]]], "r": [[0.00536798], 0, 2.49277, 1585564852], "v": "v0.1"}""",
)
)
# load states
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(json_records)
f.flush()
inputs, _ = auto_scheduler.RecordReader(f.name).read_lines()
inp = inputs[0]
task = auto_scheduler.SearchTask(
workload_key=inp.task.workload_key,
target=inp.task.target,
hardware_params=auto_scheduler.HardwareParams(
100000, 16, 64, 1 << 30, 1 << 30, 1 << 30, 1 << 30, 1 << 30
),
)
state = task.compute_dag.infer_bound_from_state(inputs[0].state)
fea = auto_scheduler.feature.get_per_store_features_from_states([state], task)[0]
names = auto_scheduler.feature.get_per_store_feature_names()
# build feature dict
fea_dicts = []
for i in range(len(fea)):
tmp_dict = {}
for j in range(len(names)):
tmp_dict[names[j]] = fea[i][j]
fea_dicts.append(tmp_dict)
"""
lowered IR:
Placeholder: A, B
blockIdx.x [email protected]@ (0,8)
vthread [email protected]@ (0,4)
threadIdx.x [email protected]@ (0,16)
C.local auto_unroll: 1024
for k.0 (0,256)
for ax0@[email protected] (0,8)
threadIdx.x ax0@[email protected] (0,16)
B.shared = ...
for ax0@[email protected] (0,64)
threadIdx.x ax0@[email protected] (0,16)
A.shared = ...
for i_c.3 (0,32)
for k.2 (0,2)
for j_c.4 (0,16)
C.local = ...
for i.3 (0,32)
for j.3 (0,16)
C = ...
"""
# check gpu-related features
assert fequal(fea_dicts[0]["blockIdx_x_len"], math.log2(8 + 1))
assert fequal(fea_dicts[0]["vthread_len"], math.log2(4 + 1))
assert fequal(fea_dicts[1]["threadIdx_x_len"], math.log2(16 + 1))
assert fequal(fea_dicts[0]["threadIdx_y_len"], math.log2(1 + 1))
assert fequal(fea_dicts[2]["blockIdx_z_len"], math.log2(1 + 1))
assert fequal(fea_dicts[0]["is_gpu"], 1.0)
@T.prim_func
def tir_matmul(
A: T.Buffer[(16384,), "float32"],
B: T.Buffer[(16384,), "float32"],
C: T.Buffer[(16384,), "float32"],
) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(A, [128, 128], dtype="float32", data=A.data)
T.preflattened_buffer(B, [128, 128], dtype="float32", data=B.data)
T.preflattened_buffer(C, [128, 128], dtype="float32", data=C.data)
# body
for x, y in T.grid(128, 128):
C[x * 128 + y] = T.float32(0)
for k in T.serial(128):
C[x * 128 + y] = C[x * 128 + y] + A[x * 128 + k] * B[y * 128 + k]
def test_primfunc_without_lowering():
features = auto_scheduler.feature.named_features_from_primfunc(tir_matmul)
assert features["float_mad"].shape == (1,)
# featurization does not handle multiple-add right now, so they are split out
assert abs(features["float_addsub"][0] - 128 * 128 * 128) < 10
assert abs(features["float_mul"][0] - 128 * 128 * 128) < 10
for i in range(0, 3):
assert abs(features[f"B{i}.unique_bytes"][0] - 128 * 128 * 4) < 10 # 4 bytes per float32
def test_primfunc_lowered():
# Lower tir function so all passes get applied
f = tvm.lower(tir_matmul)
features = auto_scheduler.feature.named_features_from_primfunc(f["main"])
assert features["float_mad"].shape == (1,)
# featurization does not handle multiple-add right now, so they are split out
assert abs(features["float_addsub"][0] - 128 * 128 * 128) < 10
assert abs(features["float_mul"][0] - 128 * 128 * 128) < 10
for i in range(0, 3):
assert abs(features[f"B{i}.unique_bytes"][0] - 128 * 128 * 4) < 10 # 4 bytes per float32
def test_dense_lowered():
a = relay.var("a", relay.TensorType((128, 128), "float32"))
b = relay.var("b", relay.TensorType((128, 128), "float32"))
c = relay.nn.dense(a, b)
mod = tvm.IRModule.from_expr(relay.Function([a, b], c))
target = "llvm"
comp = relay.vm.VMCompiler()
mod, params = comp.optimize(mod, params={}, target=target)
for name, func in mod.functions.items():
if name.name_hint != "main":
break
features = auto_scheduler.feature.named_features_from_primfunc(func)
# featurization does not handle multiple-add right now, so they are split out
assert features["float_addsub"].sum() >= 128 * 128 * 128
assert features["float_mul"].sum() >= 128 * 128 * 128
total_bytes_loaded = 0
for i in range(0, 4):
total_bytes_loaded += features[f"B{i}.unique_bytes"].sum()
assert total_bytes_loaded > 2 * 128 * 128 * 4 # 4 bytes per float32
@T.prim_func
def negative_extent(A: T.Buffer[(1,), "float32"]):
for j in range(0, -1):
A[j] = A[j] + 1.0
def test_negative_extent():
features = auto_scheduler.feature.named_features_from_primfunc(negative_extent)
assert features["B0.unique_bytes"] == 0
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_fusion()
test_gpu_feature()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_layout_rewrite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test AutoScheduler Layout Rewrite"""
import tempfile
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import topi
from tvm import auto_scheduler, te
from tvm.testing.auto_scheduler import get_tiled_matmul, matmul_auto_scheduler_test
def test_apply_steps_with_layout_rewrite():
dag, s = get_tiled_matmul()
_, bufs = dag.apply_steps_from_state(s)
assert bufs[1].shape[0] == 512
assert bufs[1].shape[1] == 512
_, bufs = dag.apply_steps_from_state(
s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED
)
assert bufs[1].shape[0] == 4
assert bufs[1].shape[1] == 8
assert bufs[1].shape[2] == 4
assert bufs[1].shape[3] == 4
assert bufs[1].shape[4] == 512
_, bufs = dag.apply_steps_from_state(
s, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE
)
assert bufs[1].shape[0] == 512
assert bufs[1].shape[1] == 512
def test_apply_steps_with_layout_rewrite_corner_case():
A, B, C = matmul_auto_scheduler_test(1, 1, 1)
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
s.compute_root(C)
i_j_fused = s.fuse(C, [s[C].iters[0], s[C].iters[1]])
s.parallel(C, i_j_fused)
_, bufs = dag.apply_steps_from_state(
s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED
)
@tvm.testing.requires_llvm
def test_correctness_layout_rewrite_rewrite_for_preTransformed():
N = 16
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target)
dag = task.compute_dag
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
search_policy = auto_scheduler.SketchPolicy(task)
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=100,
runner=measure_ctx.runner,
verbose=2,
early_stopping=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task.tune(tuning_options, search_policy=search_policy)
inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target)
s, bufs = dag.apply_steps_from_state(
inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED
)
s_ref, bufs_ref = dag.apply_steps_from_state(inp.state)
np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs]
np_args_ref = [np.array(x) for x in np_args]
weight = np_args_ref[1]
# infer shape for the rewritten layout
if len(weight.shape) >= 6:
# For cpu tile structure SSRSRS
base = len(weight.shape) - 6
red_dim = weight.shape[2 + base] * weight.shape[4 + base]
out_dim = weight.shape[3 + base] * weight.shape[5 + base]
for i in range(base + 2):
out_dim *= weight.shape[i]
new_order = (
[
2 + base,
4 + base,
]
+ list(range(base + 2))
+ [
3 + base,
5 + base,
]
)
np_args_ref[1] = np_args_ref[1].transpose(new_order)
np_args_ref[1] = np_args_ref[1].reshape((red_dim, out_dim))
func = tvm.build(s, bufs, target=target)
func_ref = tvm.build(s_ref, bufs_ref, target=target)
dev = tvm.device(str(target))
dev_ref = tvm.cpu()
args = [tvm.nd.array(x, device=dev) for x in np_args]
args_ref = [tvm.nd.array(x, device=dev_ref) for x in np_args_ref]
dev.sync()
func(*args)
func_ref(*args_ref)
dev.sync()
tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3)
tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3)
del measure_ctx
@tvm.testing.requires_llvm
def test_correctness_layout_rewrite_insert_transform_stage():
N = 128
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target)
dag = task.compute_dag
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
search_policy = auto_scheduler.SketchPolicy(task)
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=2,
runner=measure_ctx.runner,
verbose=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task.tune(tuning_options, search_policy=search_policy)
inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target)
s, bufs = dag.apply_steps_from_state(
inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE
)
s_ref, bufs_ref = dag.apply_steps_from_state(inp.state)
np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs]
func = tvm.build(s, bufs, target=target)
func_ref = tvm.build(s_ref, bufs_ref, target=target)
dev = tvm.device(str(target))
dev_ref = tvm.cpu()
args = [tvm.nd.array(x, device=dev) for x in np_args]
args_ref = [tvm.nd.array(x, device=dev_ref) for x in np_args]
dev.sync()
func(*args)
func_ref(*args_ref)
dev.sync()
tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3)
tvm.testing.assert_allclose(args[1].numpy(), args_ref[1].numpy(), atol=1e-3, rtol=1e-3)
tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3)
del measure_ctx
if __name__ == "__main__":
test_apply_steps_with_layout_rewrite()
test_apply_steps_with_layout_rewrite_corner_case()
test_correctness_layout_rewrite_rewrite_for_preTransformed()
test_correctness_layout_rewrite_insert_transform_stage()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_loop_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test loop state and schedule primitives"""
import numpy as np
import tvm
from tvm import auto_scheduler, te
from tvm import topi
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
)
def test_split_fuse_reorder_annotation():
A, B, C = matmul_auto_scheduler_test(N=512, M=512, K=512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
i, j, k = s0[C].iters
assert i.range.extent == 512
io, ii = s0.split(C, i, [16])
assert s0[C].iters[0] == io
assert s0[C].iters[1] == ii
assert io.range.extent == 32
assert ii.range.extent == 16
jo, ji = s0.split(C, j, [8])
assert jo.range.extent == 64
assert ji.range.extent == 8
s0.reorder(C, [io, jo, k, ji, ii])
assert s0[C].iters[2].range.extent == 512
fused_it = s0.fuse(C, [io, jo])
assert fused_it.range.extent == 2048
s1 = dag.get_init_state()
i, j, _ = s1[C].iters
i1, i2, i3 = s1.split(C, i, [8, 2])
j1, j2, j3 = s1.split(C, j, [32, 8], False)
assert s1[C].iters[0].range.extent == 32
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 2
assert s1[C].iters[3].range.extent == 32
assert s1[C].iters[4].range.extent == 8
assert s1[C].iters[5].range.extent == 2
res = s1.bind(C, i1, "blockIdx.x")
assert res == s1[C].iters[0]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["blockIdx.x"]
res = s1.bind(C, i2, "vthread")
assert res == s1[C].iters[1]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vthread"]
res = s1.bind(C, i3, "threadIdx.y")
assert res == s1[C].iters[2]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["threadIdx.y"]
res = s1.parallel(C, j1)
assert res == s1[C].iters[3]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["parallel"]
res = s1.unroll(C, j2)
assert res == s1[C].iters[4]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["unroll"]
res = s1.vectorize(C, j3)
assert res == s1[C].iters[5]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vectorize"]
def test_compute_at_root_inline():
dag = auto_scheduler.ComputeDAG(
conv2d_nchw_bn_relu_auto_scheduler_test(
N=1, H=224, W=224, CI=3, CO=64, kernel_size=7, strides=2, padding=3
)
)
s0 = dag.get_init_state()
# data, padding, kernel = 0, 1, 2
conv = s0.stage_ops[3]
# bias = 4
bias_add = s0.stage_ops[5]
# bn_scale = 6
bn_mul = s0.stage_ops[7]
# bn_offset = 8
bn_add = s0.stage_ops[9]
relu = s0.stage_ops[10]
s0.compute_inline(bn_add)
assert s0[bn_add].compute_at == 1
s0.compute_inline(bn_mul)
assert s0[bn_mul].compute_at == 1
s0.compute_inline(bias_add)
assert s0[bias_add].compute_at == 1
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_at(conv, relu, s0[relu].iters[2])
assert s0[conv].compute_at == 2
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 1
assert s0[conv].iters[2].range.extent == 1
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_root(bn_mul)
assert s0[bn_mul].compute_at == 0
s0.compute_root(conv)
assert s0[conv].compute_at == 0
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
def test_cache_read_write():
N, H, W, CO, CI, KH, KW, strides, padding = 4, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
data = te.placeholder((N, CI, H, W), name="Data")
kernel_data = te.placeholder((CO, CI, KH, KW), name="Kernel_data")
k0, k1 = te.compute(
kernel_data.shape,
lambda *i: (kernel_data(*i) + 1, kernel_data(*i) / 2),
name="Kernel_split",
)
kernel = te.compute(kernel_data.shape, lambda *i: k0(*i) + k1(*i), name="Kernel")
conv = topi.nn.conv2d_nchw(data, kernel, strides, padding, dilation=1)
relu = topi.nn.relu(conv)
add = topi.add(data, relu)
dag = auto_scheduler.ComputeDAG([data, kernel_data, add])
s0 = dag.get_init_state()
pad_temp = s0.stage_ops[1]
kernel_split = s0.stage_ops[3]
# 0: init state
ori_its = s0[add].iters
its = s0.split(add, s0[add].iters[0], [2])
s0.reorder(add, [its[0], ori_its[1], its[1], ori_its[2], ori_its[3]])
s0.compute_inline(relu)
# 1: simple cache_write with compute_at
conv_global = s0.cache_write(conv, "global")
s0.compute_at(conv_global, conv, s0[conv].iters[3])
# 2: simple cache_read with compute_at
kernel_global = s0.cache_read(kernel, "global", [conv_global])
s0.compute_at(kernel_global, conv_global, s0[conv_global].iters[4])
"""
Placeholder: Data, Kernel_data
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
# 3: two level cache_read with compute_at
# preparing for GPU's shared memory & local memory
pad_temp_global = s0.cache_read(pad_temp, "global", [conv_global])
pad_temp_shared = s0.cache_read(pad_temp_global, "shared", [conv_global])
s0.compute_at(pad_temp_global, conv_global, s0[conv_global].iters[2])
s0.compute_at(pad_temp_shared, conv_global, s0[conv_global].iters[4])
# 4: cache_read with multi readers
# This stage cannot be compute at to its consumer
s0.cache_read(data, "global", [pad_temp, add])
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
assert s1[pad_temp_global].iters[0].range.extent == 1
assert s1[pad_temp_global].iters[1].range.extent == 512
assert s1[pad_temp_global].iters[2].range.extent == 3
assert s1[pad_temp_global].iters[3].range.extent == 3
assert s1[pad_temp_shared].iters[0].range.extent == 1
assert s1[pad_temp_shared].iters[1].range.extent == 1
assert s1[pad_temp_shared].iters[2].range.extent == 3
assert s1[pad_temp_shared].iters[3].range.extent == 3
# 5: cache_write with multi outputs
# TVM's cache_write actually has a bug with this case:
#
# After schedule.cache_write, TVM generate one new stage:
# From: kernel_data -> kernel_split -> kernel
# To: kernel_data -> kernel_split_global -> kernel_split -> kernel
#
# But with topo sort analyse, we get:
# // kernel_data -> kernel_split_global -> kernel_split -> kernel
# \ /
# ----------------> kernel_split ---------------->
#
# TODO(jcf94): Seems there's bug with the input/output tensor. Such multi outputs case
# should be unusual, so we make some hack on DoCacheWrite. This should be fixed later.
kernel_split_global = s0.cache_write(kernel_split, "global")
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0_c (0,512)
for i1_c (0,512)
for i2_c (0,3)
for i3_c (0,3)
Kernel_split.global = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
assert len(s0[kernel_split].iters) == len(s0[kernel_split_global].iters)
for it0, it1 in zip(s0[kernel_split].iters, s0[kernel_split_global].iters):
assert it0.range == it1.range
def test_follow_split_follow_fused_split():
A, B, C = matmul_auto_scheduler_test(512, 512, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
C_global = s0.cache_write(C, "global")
its0 = s0.split(C, s0[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s0.transform_steps) - 1
for level in range(1, 6):
tmp = s0.copy()
tmp.follow_split(C_global, tmp[C_global].iters[0], split_step0, level)
for i in range(0, level):
assert tmp[C].iters[i].range.extent == tmp[C_global].iters[i].range.extent
its1 = s0.split(C, s0[C].iters[5], [2, 2, 4, 8])
split_step1 = len(s0.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
s0.reorder(C, its)
for i in range(0, 5):
s0.fuse(C, [s0[C].iters[i], s0[C].iters[i + 1]])
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, False
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[0].range.extent
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, True
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[1].range.extent
def test_rfactor():
A, B, C = matmul_auto_scheduler_test(8, 8, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
ko, ki = s0.split(C, s0[C].iters[2], [16])
s1 = s0.copy()
C_r = s1.rfactor(C, ko, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_o (0,32)
for k_i (0,16)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_o_v (0,32)
C.repl = ...
"""
assert s1[C_r].iters[0].range.extent == 8
assert s1[C_r].iters[1].range.extent == 8
assert s1[C_r].iters[2].range.extent == 32
assert s1[C_r].iters[3].range.extent == 16
assert s1[C].iters[0].range.extent == 8
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 32
s2 = s0.copy()
C_r = s2.rfactor(C, ki, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_i (0,16)
for k_o (0,32)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_i_v (0,16)
C.repl = ...
"""
assert s2[C_r].iters[0].range.extent == 8
assert s2[C_r].iters[1].range.extent == 8
assert s2[C_r].iters[2].range.extent == 16
assert s2[C_r].iters[3].range.extent == 32
assert s2[C].iters[0].range.extent == 8
assert s2[C].iters[1].range.extent == 8
assert s2[C].iters[2].range.extent == 16
if __name__ == "__main__":
test_split_fuse_reorder_annotation()
test_compute_at_root_inline()
test_cache_read_write()
test_follow_split_follow_fused_split()
test_rfactor()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test measurement and log serialization. """
import json
import multiprocessing
import numpy as np
import tvm
from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm.testing
import pickle
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
from tvm.auto_scheduler import workload_registry
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
# Test in-memory record processing.
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
# Only check the workload_key for simplification.
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
# Test file-based record processing.
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Split
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
# Reorder
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
# Fuse
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
# Parallel
s.parallel(C, s[C].iters[0])
# Thread bind(The blockIdx & threadIdx are used in GPU, just for record testing here)
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
# Unroll
s.unroll(C, s[C].iters[4])
# Vectorize
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Cache Write
C_shared = s.cache_write(C, "shared")
# Compute At
s.compute_at(C_shared, C, s[C].iters[0])
# Cache Read
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
# Compute Inline
s.compute_inline(AA)
# Compute Root
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
# Follow Split
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
# Follow Fused Split
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Rfactor
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
# Pragma
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
# StorageAlign
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_workload_dis_factor():
calc = auto_scheduler.utils.calc_workload_dis_factor
decode = auto_scheduler.utils.decode_workload_key
# Identical
target_wkl_key = json.dumps(
["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"]
)
assert calc(decode(target_wkl_key), decode(target_wkl_key)) == 1
# Compatible with a factor
wkl_key = json.dumps(["func1", [1, 3, 112, 112], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == 8 * 2 * 2
# Incompatible argument with zeros
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [1, 1], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [0, 0], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible non-integter argument
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "int8"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible function
wkl_key = json.dumps(["func2", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible due to non-dividable factor
wkl_key = json.dumps(["func1", [8, 3, 223, 223], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_dag_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
tensors = [A, B, E]
dag = auto_scheduler.ComputeDAG(tensors)
key = workload_registry.register_workload_tensors(dag.workload_key(), tensors)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key=key, target=target)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_workload_serialization():
key = tvm.auto_scheduler.utils.get_func_name(matmul_auto_scheduler_test)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_rpc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target=tvm.target.Target("llvm", "llvm -mtriple=aarch64-linux-gnu"),
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target.host) == str(inp.task.target.host)
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_local_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(timeout=10)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_rpc_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_search_policy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import random
import multiprocessing
import numpy as np
import tempfile
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.auto_scheduler.utils import get_const_tuple
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
import multiprocessing
class CustomMeasureCallback(auto_scheduler.measure.PythonBasedMeasureCallback):
"""A simple Python-based callback for testing."""
def callback(self, policy, inputs, results):
assert isinstance(policy, auto_scheduler.search_policy.SearchPolicy)
for inp, res in zip(inputs, results):
assert isinstance(inp, auto_scheduler.MeasureInput)
assert isinstance(res, auto_scheduler.MeasureResult)
def search_common(
task=None,
target="llvm",
search_policy="sketch",
runner="local",
num_measure_trials=100,
cost_model=auto_scheduler.RandomModel(),
init_search_callbacks=None,
):
if task is None:
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(64, 64, 64), target=target
)
target = task.target
print("Test search policy '%s' for '%s'" % (search_policy, target))
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
init_search_callbacks = init_search_callbacks or []
init_search_callbacks.append(auto_scheduler.PreloadMeasuredStates(log_file))
if search_policy == "empty":
search_policy = auto_scheduler.EmptyPolicy(task)
elif search_policy == "sketch":
search_policy = auto_scheduler.SketchPolicy(
task, program_cost_model=cost_model, init_search_callbacks=init_search_callbacks
)
else:
raise ValueError("Invalid policy: " + search_policy)
# Tune
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=num_measure_trials,
num_measures_per_round=2,
early_stopping=1,
runner=runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file), CustomMeasureCallback()],
)
task.tune(tuning_options=tuning_options, search_policy=search_policy)
# Compile with the best schedule
sch, args = task.apply_best(log_file)
mod = tvm.build(sch, args, target)
# Compile with naive schedule for correctness check
sch, args = task.compute_dag.apply_steps_from_state(task.compute_dag.init_state)
mod_ref = tvm.build(sch, args, "llvm")
ctx = tvm.device(str(target), 0)
np_arrays = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype) for x in args]
tvm_arrays = [tvm.nd.array(x, ctx) for x in np_arrays]
mod(*tvm_arrays)
actual = [x.numpy() for x in tvm_arrays]
tvm_arrays = [tvm.nd.array(x) for x in np_arrays]
mod_ref(*tvm_arrays)
expected = [x.numpy() for x in tvm_arrays]
for x, y in zip(actual, expected):
tvm.testing.assert_allclose(x, y, rtol=1e-5)
@tvm.testing.requires_llvm
def test_workload_registry_empty_policy():
search_common(search_policy="empty", num_measure_trials=2)
N = 64
target = "llvm"
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test_rename_1", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic():
search_common()
def sketch_search_policy_basic_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_sketch_search_policy_basic()
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=sketch_search_policy_basic_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_sketch_search_policy_xgbmodel():
search_common(cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner)
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_xgbmodel_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner, cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_llvm
@tvm.testing.requires_cuda
def test_sketch_search_policy_zero_rank():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
for target in ["llvm", "cuda"]:
task = auto_scheduler.SearchTask(
func=zero_rank_compute_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
task = auto_scheduler.SearchTask(
func=zero_rank_reduce_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
@tvm.testing.requires_llvm
def test_sketch_search_policy_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8])
ret.append([s1.state_object, -1])
return ret
search_common(
cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
if __name__ == "__main__":
test_workload_registry_empty_policy()
test_sketch_search_policy_basic()
test_sketch_search_policy_basic_spawn()
test_sketch_search_policy_xgbmodel()
test_sketch_search_policy_cuda_rpc_runner()
test_sketch_search_policy_cuda_xgbmodel_rpc_runner()
test_sketch_search_policy_zero_rank()
test_sketch_search_policy_custom_sketch()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_search_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import numpy as np
import tempfile
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.auto_scheduler.utils import get_const_tuple
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def test_search_task_add_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
test_input_1 = tvm.runtime.ndarray.empty((10, 20))
test_input_2 = tvm.runtime.ndarray.empty((30, 40, 50))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
"test_input_2": test_input_2,
},
task_inputs_overwrite=True,
)
assert len(task.task_input_names) == 3
assert task.task_input_names[0] == "test_input_0"
assert task.task_input_names[1] == "test_input_1"
assert task.task_input_names[2] == "test_input_2"
def test_search_task_record():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
# Log with no task input
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
# TODO(jcf94): Check the compute dag & hardware parameter
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={"test_input_0": test_input_0},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]"""
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(v5_log)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
def test_recover_measure_input_with_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
# Since this file is tests for search_task, we only check the search_task here
# Log with no task input
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}"""
measure_log = auto_scheduler.measure_record.load_record_from_string(v5_log)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
if __name__ == "__main__":
test_search_task_add_task_input()
test_search_task_record()
test_recover_measure_input_with_task_input()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_sketch_generation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test sketch generation. """
import sys
import tvm
import tvm.testing
import pytest
from tvm import te, auto_scheduler
from tvm.auto_scheduler import _ffi_api
from tvm.auto_scheduler.loop_state import Stage
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
double_matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
max_pool2d_auto_scheduler_test,
min_nm_auto_scheduler_test,
softmax_nm_auto_scheduler_test,
softmax_abcd_auto_scheduler_test,
conv2d_winograd_nhwc_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def generate_sketches(
workload_func, args, target, print_for_debug=False, init_search_callbacks=None
):
# NOTE: test_cpu_matmul_sketch and test_cpu_max_pool2d_sketch assume 4 cores to trigger all
# possible sketch generations.
task = auto_scheduler.SearchTask(
func=workload_func,
args=args,
target=target,
hardware_params=auto_scheduler.HardwareParams(num_cores=4, target=target),
)
policy = auto_scheduler.SketchPolicy(
task, verbose=0, init_search_callbacks=init_search_callbacks
)
return policy.generate_sketches(print_for_debug)
def assert_compute_at_condition(stage, condition):
assert stage.compute_at == Stage.COMPUTE_AT_TRANS_TABLE[condition]
def assert_is_tiled(stage):
assert _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_is_not_tiled(stage):
assert not _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_has_cache_write(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheWriteStage(state, stage_id)
def assert_has_cache_read(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheReadStage(state, stage_id)
def assert_has_rfactor(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasRfactorStage(state, stage_id)
def assert_has_cross_thread_reduction(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCrossThreadReduction(state, stage_id)
def test_cpu_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "llvm")
""" 3 multi-level tiling sketches
No.0 : Multi-level tiling
No.1 : Multi-level tiling with cache write on position 0
No.2 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
# Sketch 0
assert_is_tiled(sketches[0].stages[2])
# Sketch 1
assert_is_tiled(sketches[1].stages[2])
assert_has_cache_write(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[2], "iter")
# Sketch 2
assert_is_tiled(sketches[2].stages[2])
assert_has_cache_write(sketches[2], 2)
assert_compute_at_condition(sketches[2].stages[2], "iter")
assert sketches[1] != sketches[2]
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 512), "llvm")
""" 2 rfactor sketches + 3 multi-level tiling sketches
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Multi-level tiling
No.3 : Multi-level tiling with cache write on position 0
No.4 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 5
# Sketch 0
assert_has_rfactor(sketches[0], 2)
# Sketch 1
assert_has_rfactor(sketches[1], 2)
assert sketches[0] != sketches[1]
# Sketch 2
assert_is_tiled(sketches[2].stages[2])
# Sketch 3
assert_is_tiled(sketches[3].stages[2])
assert_has_cache_write(sketches[3], 2)
assert_compute_at_condition(sketches[3].stages[2], "iter")
# Sketch 4
assert_is_tiled(sketches[4].stages[2])
assert_has_cache_write(sketches[4], 2)
assert_compute_at_condition(sketches[4].stages[2], "iter")
assert sketches[3] != sketches[4]
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "llvm")
""" 3 multi-level tiling sketches for one matmul, so 3 * 3 = 9 sketches in total """
assert len(sketches) == 9
assert_is_tiled(sketches[8].stages[5])
def test_cpu_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Conv2d multi-level tiling with fusion on position 0
No.1 : Conv2d multi-level tiling with fusion on position 1
No.2 : Conv2d multi-level tiling without fusion
"""
assert len(sketches) == 3
# Sketch 0
assert_is_not_tiled(sketches[0].stages[1])
assert_is_tiled(sketches[0].stages[3])
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_compute_at_condition(sketches[0].stages[5], "inlined")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_is_tiled(sketches[0].stages[10])
# Sketch 1
assert_is_not_tiled(sketches[1].stages[1])
assert_is_tiled(sketches[1].stages[3])
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_compute_at_condition(sketches[1].stages[5], "inlined")
assert_compute_at_condition(sketches[1].stages[7], "inlined")
assert_compute_at_condition(sketches[1].stages[9], "inlined")
assert_is_tiled(sketches[1].stages[10])
# Sketch 2
assert_is_not_tiled(sketches[2].stages[1])
assert_is_tiled(sketches[2].stages[3])
assert_compute_at_condition(sketches[2].stages[3], "root")
assert_compute_at_condition(sketches[2].stages[5], "inlined")
assert_compute_at_condition(sketches[2].stages[7], "inlined")
assert_compute_at_condition(sketches[2].stages[9], "inlined")
assert_is_not_tiled(sketches[2].stages[10])
def test_cpu_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 1), "llvm")
""" 1 default sketch """
assert len(sketches) == 1
# Sketch 0
assert len(sketches[0].transform_steps) == 0
def test_cpu_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "llvm")
""" 2 rfactor sketches + 1 default sketch
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Default sketch
"""
assert len(sketches) == 3
# Sketch 0
assert_has_rfactor(sketches[0], 1)
# Sketch 1
assert_has_rfactor(sketches[1], 1)
assert sketches[0] != sketches[1]
# Sketch 2
assert len(sketches[2].transform_steps) == 0
def test_cpu_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (1, 1024), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketches + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketches + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
def test_cpu_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Bgemm multi-level tiling
No.1 : Bgemm multi-level tiling with cache write on position 0
No.2 : Bgemm multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
# Sketch 0
assert_is_not_tiled(sketches[0].stages[1])
assert_is_not_tiled(sketches[0].stages[2])
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_is_tiled(sketches[0].stages[6])
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_is_tiled(sketches[0].stages[8])
assert_is_not_tiled(sketches[0].stages[9])
# Sketch 1
assert_is_not_tiled(sketches[1].stages[1])
assert_is_not_tiled(sketches[1].stages[2])
assert_compute_at_condition(sketches[1].stages[3], "inlined")
assert_is_tiled(sketches[1].stages[4])
assert_is_tiled(sketches[1].stages[6])
assert_has_cache_write(sketches[1], 6)
assert_compute_at_condition(sketches[1].stages[6], "iter")
assert_compute_at_condition(sketches[1].stages[8], "inlined")
assert_is_tiled(sketches[1].stages[9])
assert_is_not_tiled(sketches[1].stages[10])
# Sketch 2
assert_is_not_tiled(sketches[2].stages[1])
assert_is_not_tiled(sketches[2].stages[2])
assert_compute_at_condition(sketches[2].stages[3], "inlined")
assert_is_tiled(sketches[2].stages[4])
assert_is_tiled(sketches[2].stages[6])
assert_has_cache_write(sketches[2], 6)
assert_compute_at_condition(sketches[2].stages[6], "iter")
assert_compute_at_condition(sketches[2].stages[8], "inlined")
assert_is_tiled(sketches[2].stages[9])
assert_is_not_tiled(sketches[2].stages[10])
assert sketches[1] != sketches[2]
def test_cpu_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "llvm")
""" 2 rfactor sketches + 1 multi-level tiling sketches """
assert len(sketches) == 3
def test_cpu_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8, 2])
ret.append([s1.state_object, -1])
return ret
sketches = generate_sketches(
matmul_auto_scheduler_test,
(512, 512, 512),
"llvm",
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
assert len(sketches) == 2
assert sketches[0].stages[2].iters[0].range.extent == 512
assert sketches[0].stages[2].iters[1].range.extent == 512
assert sketches[0].stages[2].iters[2].range.extent == 512
assert sketches[1].stages[2].iters[0].range.extent == 32
assert sketches[1].stages[2].iters[1].range.extent == 8
assert sketches[1].stages[2].iters[2].range.extent == 2
assert sketches[1].stages[2].iters[3].range.extent == 512
assert sketches[1].stages[2].iters[4].range.extent == 512
@tvm.testing.requires_cuda
def test_cuda_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "cuda")
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 0)
assert_compute_at_condition(sketches[0].stages[1], "iter")
assert_has_cache_read(sketches[0], 2)
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_has_cache_write(sketches[0], 4)
assert_is_tiled(sketches[0].stages[4])
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sketch """
assert len(sketches) == 2
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 2)
# Sketch 1
assert_has_cache_read(sketches[1], 0)
assert_compute_at_condition(sketches[1].stages[1], "iter")
assert_has_cache_read(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_has_cache_write(sketches[1], 4)
assert_is_tiled(sketches[1].stages[4])
assert_compute_at_condition(sketches[1].stages[4], "iter")
assert_is_tiled(sketches[1].stages[5])
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "cuda")
""" 1 multi-level tiling sketch for one matmul, so 1 x 1 = 1 sketch in total """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[5], "root")
assert_compute_at_condition(sketches[0].stages[6], "iter")
@tvm.testing.requires_cuda
def test_cuda_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "cuda"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 1)
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_has_cache_read(sketches[0], 3)
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_compute_at_condition(sketches[0].stages[11], "inlined")
assert_is_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 0), "cuda")
""" 1 default sketch """
assert len(sketches) == 1
assert len(sketches[0].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 default sketch """
assert len(sketches) == 2
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
# Sketch 1
assert len(sketches[1].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (2, 1024), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
# Sketch 1
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
# Sketch 2
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
# Sketch 3
assert_compute_at_condition(sketches[3].stages[2], "inlined")
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
# Sketch 1
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
# Sketch 2
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
# Sketch 3
assert_compute_at_condition(sketches[3].stages[2], "inlined")
@tvm.testing.requires_cuda
def test_cuda_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "cuda"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_has_cache_read(sketches[0], 4)
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_has_cache_read(sketches[0], 6)
assert_compute_at_condition(sketches[0].stages[7], "iter")
assert_is_tiled(sketches[0].stages[8])
assert_compute_at_condition(sketches[0].stages[8], "iter")
assert_has_cache_write(sketches[0], 8)
assert_compute_at_condition(sketches[0].stages[9], "root")
assert_is_tiled(sketches[0].stages[11])
assert_is_not_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sketch """
assert len(sketches) == 2
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_auto_scheduler_task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test task scheduler """
import tempfile
import multiprocessing
import numpy as np
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin():
tasks = []
for n in [2, 4, 8]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
num_trials_per_task = 2
# Tune all tasks
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=num_trials_per_task * len(tasks),
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(tasks, strategy="round-robin", callbacks=[])
task_scheduler.tune(tune_option, search_policy="sketch.random")
# Check the result of round robin
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
for task in tasks:
assert counters[task.workload_key] == num_trials_per_task
# test continuous tuning (restoring the status)
task_scheduler = auto_scheduler.TaskScheduler(
tasks, strategy="round-robin", load_log_file=log_file, callbacks=[]
)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=len(tasks),
num_measures_per_round=1,
)
task_scheduler.tune(tune_option, search_policy="sketch.random")
del measure_ctx
@tvm.testing.requires_llvm
def task_scheduler_round_robin_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_task_scheduler_round_robin()
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=task_scheduler_round_robin_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_task_scheduler_gradient():
tasks = []
for n in [2, 4]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
def objective_func(costs):
return 1e5 * costs[0]
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
n_trials = 5
# Tune all tasks
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=n_trials,
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(
tasks, objective_func=objective_func, callbacks=[]
)
# Forcely rewrite the initial values.
# This can make this test more stable on the slow CI machines
task_scheduler.best_costs = np.array([1e2, 1e-8])
task_scheduler.tune(tune_option, search_policy="sketch.random")
# Check the allocation results
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
assert counters[tasks[0].workload_key] == n_trials - 1
assert counters[tasks[1].workload_key] == 1
del measure_ctx
if __name__ == "__main__":
test_task_scheduler_round_robin()
test_task_scheduler_round_robin_spawn()
test_task_scheduler_gradient()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_autotvm_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test database"""
import copy
import logging
from tvm.autotvm import database
from tvm.autotvm.record import encode, MeasureResult
from tvm.testing.autotvm import get_sample_records
def test_save_load():
logging.info("test basic db load/save ...")
records = get_sample_records(3)
inp1, res1 = records[0]
inp2, res2 = records[1]
inp3, _ = records[2]
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1)
_db.save(inp2, res2)
load1 = _db.load(inp1)
load2 = _db.load(inp2)
load3 = _db.load(inp3)
assert load1 == res1
assert load2 == res2
assert load3 is None
assert load1 != load2
TRIAL_LIMIT = 2
def test_db_hash():
logging.info("test db hash check ...")
inp1, res1 = get_sample_records(1)[0]
inp2 = copy.deepcopy(inp1)
inp1.config.code_hash = "cafecafe"
inp2.config.code_hash = "dbffdbff"
res2l = list(tuple(res1))
# set timestamp
res2l[-1] = -1
res2 = MeasureResult(*res2l)
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1, extend=True)
_db.save(inp2, res2, extend=True)
load1 = _db.load(inp1)
load2 = _db.load(inp2)
assert load1 != load2
assert load1.timestamp != -1
assert load2.timestamp == -1
def test_db_latest_all():
logging.info("test db load w/ multiple results ...")
inp1, res1 = get_sample_records(1)[0]
lis1 = list(tuple(res1))
lis2 = list(tuple(res1))
lis3 = list(tuple(res1))
# set timestamp
lis1[-1] = 0.0
lis2[-1] = 1.1
lis3[-1] = 9999.9999
res1 = MeasureResult(*lis1)
res2 = MeasureResult(*lis2)
res3 = MeasureResult(*lis3)
_db = database.DummyDatabase()
_db.flush()
_db.save(inp1, res1, extend=True)
load1 = _db.load(inp1)
assert load1.timestamp == 0.0
_db.save(inp1, res2, extend=True)
load2 = _db.load(inp1)
assert load2.timestamp == 1.1
_db.save(inp1, res3, extend=True)
load3 = _db.load(inp1)
assert load3.timestamp == 9999.9999
load4 = _db.load(inp1, get_all=True)
assert encode(inp1, load4[0]) == encode(inp1, res1)
assert encode(inp1, load4[1]) == encode(inp1, res2)
assert encode(inp1, load4[2]) == encode(inp1, res3)
def test_db_filter():
logging.info("test db filter ...")
records = get_sample_records(5)
_db = database.DummyDatabase()
_db.flush()
for inp, result in records:
_db.save(inp, result)
records = _db.filter(lambda inp, ress: any(r.costs[0] <= 2 for r in ress))
assert len(records) == 2
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_save_load()
test_db_hash()
test_db_latest_all()
test_db_filter()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_autotvm_dispatch_context.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test dispatcher.
The dispatcher can choose which template to use according
to the parameters of workload"""
from tvm import autotvm
@autotvm.template("testing/dispatch_fallback")
def simple_template(a, b):
cfg = autotvm.get_config()
assert cfg.is_fallback
def test_fallback():
simple_template(2, 3)
if __name__ == "__main__":
test_fallback()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_autotvm_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test feature extraction"""
import numpy as np
import tvm
from tvm import te
from tvm.autotvm import feature
def test_iter_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_itervar_feature(s, [A, B, C], take_log=False)
expected = [
{
"_attr_": [128, 1, 128, 2097152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [128, -1, 16384, 128, 0, 0],
"B_0": [0, -1, 16384, 128, 0, 0],
"C_0": [128, -1, 16384, 128, 0, 0],
"C_1": [128, -1, 16384, 128, 0, 0],
},
{
"_attr_": [128, 2, 16384, 16384, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [0, -1, 128, 128, 0, 0],
"B_0": [1, -1, 16384, 1, 0, 0],
"C_0": [1, -1, 128, 128, 0, 0],
"C_1": [1, -1, 128, 128, 0, 0],
},
{
"_attr_": [128, 3, 2097152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [1, -1, 128, 1, 0, 0],
"B_0": [128, -1, 128, 1, 0, 0],
"C_1": [0, -1, 1, 128, 0, 0],
"C_2": [0, -1, 1, 128, 0, 0],
},
]
for ans, row in zip(expected, feas):
for pair in row:
if pair[0] not in ans:
continue
assert ans[pair[0]] == pair[1:], "%s: %s vs %s" % (pair[0], ans[pair[0]], pair[1:])
def test_curve_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_buffer_curve_sample_flatten(s, [A, B, C], sample_n=30)
# sample_n * #buffers * #curves * 2 numbers per curve
assert len(feas) == 30 * 3 * 4 * 2
def test_feature_shape():
"""test the dimensions of flatten feature are the same"""
N = 1024
n_sample = 100
def get_gemm_feature(target):
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
y, x = s[C].op.axis
axes = list(s[C].tile(y, x, 8, 8)) + [k]
perm = np.random.permutation(5)
axes = [axes[x] for x in perm]
s[C].reorder(*axes)
if "gpu" in target.keys:
pick = []
# filter out reduction axis
for i in range(len(perm)):
if perm[i] != 4:
pick.append(axes[i])
s[C].bind(pick[0], te.thread_axis("blockIdx.x"))
s[C].bind(pick[1], te.thread_axis("vthread"))
s[C].bind(pick[2], te.thread_axis("threadIdx.y"))
with target:
feas = feature.get_itervar_feature(s, [A, B, C])
feas = feature.flatten_itervar_feature(feas)
return feas
targets = [
tvm.target.cuda(),
tvm.target.mali(),
tvm.target.arm_cpu(),
]
for target in targets:
dim = len(get_gemm_feature(target))
for i in range(n_sample):
assert dim == len(get_gemm_feature(target)), (
"dimensions of feature do not match" " for different configurations"
)
if __name__ == "__main__":
test_iter_feature_gemm()
test_curve_feature_gemm()
test_feature_shape()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_autotvm_flop_calculator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test flop calculation"""
import tvm
from tvm import te
import numpy as np
from tvm.autotvm.task.task import compute_flop
def random_dtypes():
"""Return pair of (input, accumulator) dtypes"""
candidates = [("float32", "float32"), ("float16", "float32"), ("int8", "int32")]
return candidates[np.random.choice(len(candidates))]
def test_conv():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
K = te.placeholder((CO, CI, KH, KW), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
ci = te.reduce_axis((0, CI))
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
D[n][ci][h][w].astype(acc_dtype) * K[co][ci][h][w].astype(acc_dtype),
axis=[ci, kh, kw],
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * CI * KH * KW
def test_pack_gemm():
for i in range(5):
N, L, M = [np.random.randint(10, 128) * 4 for _ in range(3)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N, L), dtype=input_dtype)
B = te.placeholder((M, L), dtype=input_dtype)
k = te.reduce_axis((0, L))
bn = 4
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
A_pack = te.compute((N // bn, L, bn), lambda i, j, k: A[i * bn + k][j])
B_pack = te.compute((M // bn, L, bn), lambda i, j, k: B[i * bn + k][j])
C_pack = te.compute(
(N // bn, M // bn, bn, bn),
lambda i, j, ii, jj: te.sum(
A_pack[i, k, ii].astype(acc_dtype) * B_pack[j, k, jj].astype(acc_dtype), axis=[k]
),
)
C = te.compute(
(N, M), lambda i, j: C_pack[idxd(i, bn)][idxd(j, bn)][idxm(i, bn)][idxm(j, bn)]
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * L * M
def test_outer_dot():
for i in range(5):
N, M = [np.random.randint(10, 128) * 4 for _ in range(2)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N,), dtype=input_dtype)
B = te.placeholder((M,), dtype=input_dtype)
C = te.compute((N, M), lambda i, j: A[i].astype(acc_dtype) * B[j].astype(acc_dtype))
s = te.create_schedule([C.op])
assert compute_flop(s) == N * M
def test_max_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, _) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW), lambda n, co, h, w: tvm.te.max(D[n][co][h + kh][w + kw], axis=[kh, kw])
)
s = te.create_schedule([C.op])
assert compute_flop(s) == N * CO * OH * OW * KH * KW
def test_average_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
te.div(D[n][co][h + kh][w + kw].astype(acc_dtype), (KW * KH)), axis=[kh, kw]
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * KH * KW
def test_move():
"""No float number operation in simple move. So the estimator should raise an error"""
N = 1024
A = te.placeholder((N,))
C = te.compute((N,), lambda i: A[i])
s = te.create_schedule([C.op])
try:
compute_flop(s)
assert False
except RuntimeError:
pass
if __name__ == "__main__":
test_conv()
test_pack_gemm()
test_outer_dot()
test_move()
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.