file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/integration/test_ewise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test elementwise integration."""
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.contrib import nvcc
@tvm.testing.requires_gpu
def test_exp():
"""Test scheduling and running exponent."""
# graph
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_a = te.placeholder((arr_length_tvm,), name="A")
placeholder_b = te.compute(placeholder_a.shape, lambda *i: te.exp(placeholder_a(*i)), name="B")
schedule = te.create_schedule(placeholder_b.op)
# create iter var and assign them tags.
num_thread = 8
axis1, axis2 = schedule[placeholder_b].split(placeholder_b.op.axis[0], factor=num_thread)
schedule[placeholder_b].bind(axis1, te.thread_axis("blockIdx.x"))
schedule[placeholder_b].bind(axis2, te.thread_axis("threadIdx.x"))
# one line to build the function.
def check_device(device, host="stackvm"):
if not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fexp = tvm.build(schedule, [placeholder_a, placeholder_b], device, host, name="myexp")
dev = tvm.device(device, 0)
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros(arr_length, dtype=placeholder_b.dtype), dev)
fexp(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), np.exp(buff_a.numpy()), rtol=1e-5)
check_device("opencl -device=intel_graphics")
check_device("cuda", "llvm")
check_device("vulkan")
@tvm.testing.requires_gpu
def test_fmod():
"""Test scheduling and running fmod."""
# graph
def run(dtype):
size_var_n = te.size_var("n")
placeholder_a = te.placeholder((size_var_n,), name="A", dtype=dtype)
placeholder_b = te.placeholder((size_var_n,), name="B", dtype=dtype)
result_c = te.compute(
placeholder_a.shape, lambda *i: te.fmod(placeholder_a(*i), placeholder_b(*i)), name="C"
)
schedule = te.create_schedule(result_c.op)
# create iter var and assign them tags.
num_thread = 8
axis0, axis1 = schedule[result_c].split(result_c.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
schedule[result_c].bind(axis0, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis1, te.thread_axis("threadIdx.x"))
fmod = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c], device, name="myfmod"
)
# launch the kernel.
value_n = 1024
a_np = (np.random.uniform(size=value_n) * 256).astype(placeholder_a.dtype)
b_np = (np.random.uniform(size=value_n) * 256).astype(placeholder_b.dtype)
# "fix" the values in a and b to avoid the result being too small
b_np += (b_np < 2.0) * 2
a_np[np.abs(np.fmod(a_np, b_np)) < 1] += 1
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros(value_n, dtype=result_c.dtype), dev)
ftimer = fmod.time_evaluator(fmod.entry_name, dev, number=1)
_ = ftimer(buff_a, buff_b, buff_c).mean
np.testing.assert_allclose(
buff_c.numpy(), np.mod(buff_a.numpy(), buff_b.numpy()), rtol=1e-5
)
check_device("cuda")
check_device("opencl -device=intel_graphics")
check_device("metal")
run("float32")
@tvm.testing.requires_gpu
def test_multiple_cache_write():
"""Test multiple cache writes."""
# graph
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_a0 = te.placeholder((arr_length_tvm,), name="A0", dtype="float32")
placeholder_a1 = te.placeholder((arr_length_tvm,), name="A1", dtype="float32")
result_b0, result_b1 = te.compute(
(arr_length_tvm,),
lambda *i: (
placeholder_a0(*i) + placeholder_a1(*i),
placeholder_a0(*i) * placeholder_a1(*i),
),
name="B",
)
result_c = te.compute((arr_length_tvm,), lambda *i: result_b0(*i) + result_b1(*i), name="C")
schedule = te.create_schedule(result_c.op)
# create iter var and assign them tags.
num_thread = 8
cache_b0, _ = schedule.cache_write([result_b0, result_b1], "local")
axis0, axis1 = schedule[result_c].split(result_c.op.axis[0], factor=num_thread)
schedule[result_b0].compute_at(schedule[result_c], axis0)
schedule[cache_b0].compute_at(schedule[result_c], axis0)
schedule[result_c].bind(axis0, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis1, te.thread_axis("threadIdx.x"))
# one line to build the function.
def check_device(device, host="stackvm"):
if not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
return
func = tvm.build(
schedule,
[placeholder_a0, placeholder_a1, result_c],
device,
host,
name="multiple_cache_write",
)
dev = tvm.device(device, 0)
# launch the kernel.
buff_a0 = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a0.dtype), dev)
buff_a1 = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a1.dtype), dev)
buff_c = tvm.nd.array(np.zeros(arr_length, dtype=result_c.dtype), dev)
func(buff_a0, buff_a1, buff_c)
tvm.testing.assert_allclose(
buff_c.numpy(),
buff_a0.numpy() + buff_a1.numpy() + (buff_a0.numpy() * buff_a1.numpy()),
rtol=1e-5,
)
check_device("cuda", "llvm")
check_device("vulkan")
check_device("opencl")
def test_log_pow_llvm():
"""Test log pow using llvm to lower."""
# graph
size_var_n = te.size_var("n")
placeholder_a = te.placeholder((size_var_n,), name="A")
result_b = te.compute(
placeholder_a.shape, lambda *i: te.power(te.log(placeholder_a(*i)), 2.0), name="B"
)
schedule = te.create_schedule(result_b.op)
# create iter var and assign them tags.
schedule[result_b].split(result_b.op.axis[0], factor=32)
# one line to build the function.
if not tvm.testing.device_enabled("llvm"):
return
flog = tvm.build(schedule, [placeholder_a, result_b], "llvm", name="mylog")
dev = tvm.cpu(0)
# launch the kernel.
size_var_n = 1028
buff_a = tvm.nd.array(np.random.uniform(size=size_var_n).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros(size_var_n, dtype=result_b.dtype), dev)
repeat = 10
ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat)
res = ftimer(buff_a, buff_b)
assert len(res.results) == repeat
tvm.testing.assert_allclose(buff_b.numpy(), np.power(np.log(buff_a.numpy()), 2.0), rtol=1e-5)
@tvm.testing.uses_gpu
def test_popcount():
"""Test popcount."""
def run(dtype):
# graph
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(1024)
placeholder_a = te.placeholder((arr_length_tvm,), name="A", dtype=dtype)
placeholder_b = te.compute(
placeholder_a.shape, lambda *i: tvm.tir.popcount(placeholder_a(*i)), name="B"
)
schedule = te.create_schedule(placeholder_b.op)
# simple schedule
num_thread = 8
axis1, axis2 = schedule[placeholder_b].split(placeholder_b.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
schedule[placeholder_b].bind(axis1, te.thread_axis("blockIdx.x"))
schedule[placeholder_b].bind(axis2, te.thread_axis("threadIdx.x"))
func = tvm.build(schedule, [placeholder_a, placeholder_b], device)
# launch the kernel.
buff_a = tvm.nd.array(
np.random.randint(low=0, high=1000, size=arr_length, dtype=placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(shape=arr_length, dtype=placeholder_b.dtype), dev)
func(buff_a, buff_b)
tvm.testing.assert_allclose(
buff_b.numpy(), list(map(lambda x: bin(x).count("1"), buff_a.numpy())), rtol=1e-5
)
check_device("llvm")
check_device("cuda")
check_device("opencl")
if dtype == "uint32":
check_device("metal")
check_device("vulkan")
run("uint32")
run("uint64")
@tvm.testing.requires_gpu
def test_add():
"""Test addition."""
def run(dtype):
# graph
size_var_n = te.size_var("n")
placeholder_a = te.placeholder((size_var_n,), name="A", dtype=dtype)
placeholder_b = te.placeholder((size_var_n,), name="B", dtype=dtype)
result_c = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + placeholder_b(*i), name="C"
)
# schedule
schedule = te.create_schedule(result_c.op)
# create iter var and assign them tags.
num_thread = 16
axis_bx, axis_x = schedule[result_c].split(result_c.op.axis[0], factor=num_thread * 4)
axis_tx, axis_x = schedule[result_c].split(axis_x, nparts=num_thread)
_, axis_x = schedule[result_c].split(axis_x, factor=4)
schedule[result_c].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis_tx, te.thread_axis("threadIdx.x"))
schedule[result_c].vectorize(axis_x)
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fadd = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c], device, name="myadd"
)
# launch the kernel.
n = 1024
buff_a = tvm.nd.array(
(np.random.uniform(size=n) * 256).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(
(np.random.uniform(size=n) * 256).astype(placeholder_b.dtype), dev
)
buff_c = tvm.nd.array(np.zeros(n, dtype=result_c.dtype), dev)
ftimer = fadd.time_evaluator(fadd.entry_name, dev, number=1)
_ = ftimer(buff_a, buff_b, buff_c).mean
tvm.testing.assert_allclose(buff_c.numpy(), buff_a.numpy() + buff_b.numpy(), rtol=1e-6)
check_device("opencl")
check_device("cuda")
if dtype == "float32":
check_device("metal")
check_device("vulkan")
run("float32")
run("int32")
run("int64")
run("uint64")
@tvm.testing.requires_gpu
def try_warp_memory():
"""Test using warp memory
skip this in default test because it require higher arch"""
arr_size = 128
placeholder_a = te.placeholder((arr_size,), name="A")
result_b = te.compute((arr_size,), lambda i: placeholder_a[i] + 3, name="B")
warp_size = 32
schedule = te.create_schedule(result_b.op)
cache_read_aa = schedule.cache_read(placeholder_a, "warp", [result_b])
axis_x0, axis_xi = schedule[result_b].split(result_b.op.axis[0], warp_size * 2)
_, axis_xi1 = schedule[result_b].split(axis_xi, factor=warp_size)
thread_axis_tx = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_xi1, thread_axis_tx)
schedule[result_b].bind(axis_x0, te.thread_axis("blockIdx.x"))
schedule[cache_read_aa].compute_at(schedule[result_b], axis_x0)
axis_x0, axis_xi = schedule[cache_read_aa].split(schedule[cache_read_aa].op.axis[0], warp_size)
schedule[cache_read_aa].bind(axis_xi, thread_axis_tx)
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code): # pylint: disable=unused-variable
ptx = nvcc.compile_cuda(code)
return ptx
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
myfunc = tvm.build(schedule, [placeholder_a, result_b], device)
buff_a = tvm.nd.array(
(np.random.uniform(size=arr_size) * 256).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(arr_size, dtype=result_b.dtype), dev)
myfunc(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), buff_a.numpy() + 3, rtol=1e-6)
check_device("cuda")
if __name__ == "__main__":
test_exp()
try_warp_memory()
test_multiple_cache_write()
test_add()
test_log_pow_llvm()
test_popcount()
test_fmod()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_ewise_fpga.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test elementwise ops on fpga."""
import os
import numpy as np
import tvm
import tvm.testing
from tvm import te
os.environ["XCL_EMULATION_MODE"] = "1"
os.environ["CL_CONTEXT_EMULATOR_DEVICE_INTELFPGA"] = "1"
@tvm.register_func
def tvm_callback_vhls_postproc(code):
"""Hook to inspect the Vivado HLS code before actually run it"""
print(code)
return code
def test_exp():
"""Test scheduling and running exp function."""
# graph
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_b = te.placeholder((arr_length_tvm,), name="A")
result_b = te.compute(placeholder_b.shape, lambda *i: te.exp(placeholder_b(*i)), name="B")
schedule = te.create_schedule(result_b.op)
# create iter var and assign them tags.
axis1, _ = schedule[result_b].split(result_b.op.axis[0], nparts=1)
schedule[result_b].bind(axis1, te.thread_axis("pipeline"))
# one line to build the function.
def check_device(device, host="llvm"):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
fexp = tvm.build(schedule, [placeholder_b, result_b], device, host, name="myexp")
dev = tvm.device(device, 0)
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_b.dtype), dev)
buff_b = tvm.nd.array(np.zeros(arr_length, dtype=result_b.dtype), dev)
fexp(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), np.exp(buff_a.numpy()), rtol=1e-5)
check_device("sdaccel")
if "AWS_PLATFORM" in os.environ:
check_device("sdaccel -device=" + os.environ.get("AWS_PLATFORM"))
check_device("aocl_sw_emu")
def test_multi_kernel():
"""Test scheduling with multiple computes."""
# graph
arr_length = 1024
arr_length_tvm = tvm.runtime.convert(arr_length)
placeholder_a = te.placeholder((arr_length_tvm,), name="A")
placeholder_b = te.placeholder((arr_length_tvm,), name="B")
result_c = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + placeholder_b(*i), name="C"
)
result_d = te.compute(
placeholder_a.shape, lambda *i: placeholder_a(*i) + result_c(*i), name="D"
)
schedule = te.create_schedule(result_d.op)
# create iter var and assign them tags.
axis1, _ = schedule[result_c].split(result_c.op.axis[0], nparts=1)
schedule[result_c].bind(axis1, te.thread_axis("pipeline"))
axis1, _ = schedule[result_d].split(result_d.op.axis[0], nparts=1)
schedule[result_d].bind(axis1, te.thread_axis("pipeline"))
# one line to build the function.
def check_device(device, host="llvm"):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
fadd = tvm.build(
schedule, [placeholder_a, placeholder_b, result_c, result_d], device, host, name="myadd"
)
dev = tvm.device(device, 0)
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_b.dtype), dev)
buff_c = tvm.nd.array(np.random.uniform(size=arr_length).astype(result_c.dtype), dev)
buff_d = tvm.nd.array(np.random.uniform(size=arr_length).astype(result_d.dtype), dev)
fadd(buff_a, buff_b, buff_c, buff_d)
tvm.testing.assert_allclose(buff_d.numpy(), buff_a.numpy() * 2 + buff_b.numpy(), rtol=1e-5)
check_device("sdaccel")
check_device("aocl_sw_emu")
if __name__ == "__main__":
test_exp()
test_multi_kernel()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test scheduling and running a gemm!"""
import numpy as np
import tvm
import tvm.testing
from tvm import te
@tvm.testing.requires_gpu
def test_gemm():
"""Test the gemm!"""
# graph
dim1_length = 1024
dim_n = tvm.runtime.convert(dim1_length)
dim_m = dim_n
dim_l = dim_n
placeholder_a = te.placeholder((dim_n, dim_l), name="A")
placeholder_b = te.placeholder((dim_m, dim_l), name="B")
axis_k = te.reduce_axis((0, dim_l), name="k")
result_c = te.compute(
(dim_n, dim_m),
lambda ii, jj: te.sum(placeholder_a[ii, axis_k] * placeholder_b[jj, axis_k], axis=axis_k),
name="CC",
)
# schedule
schedule = te.create_schedule(result_c.op)
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis("threadIdx.y")
cache_write = schedule.cache_write(result_c, "local")
cache_read_a = schedule.cache_read(placeholder_a, "shared", [cache_write])
cache_read_b = schedule.cache_read(placeholder_b, "shared", [cache_write])
axis_by, axis_yi = schedule[result_c].split(result_c.op.axis[0], factor=block_factor)
axis_bx, axis_xi = schedule[result_c].split(result_c.op.axis[1], factor=block_factor)
schedule[result_c].reorder(axis_by, axis_bx, axis_yi, axis_xi)
schedule[result_c].bind(axis_by, block_y)
schedule[result_c].bind(axis_bx, block_x)
axis_ty, axis_yi = schedule[result_c].split(axis_yi, nparts=num_thread)
axis_tx, axis_xi = schedule[result_c].split(axis_xi, nparts=num_thread)
schedule[result_c].reorder(axis_ty, axis_tx, axis_yi, axis_xi)
schedule[result_c].bind(axis_ty, thread_y)
schedule[result_c].bind(axis_tx, thread_x)
axis_yo, axis_xo = cache_write.op.axis
schedule[cache_write].reorder(axis_k, axis_yo, axis_xo)
schedule[cache_write].compute_at(schedule[result_c], axis_tx)
schedule[cache_read_a].compute_at(schedule[cache_write], axis_k)
schedule[cache_read_b].compute_at(schedule[cache_write], axis_k)
schedule[cache_read_a].double_buffer()
schedule[cache_read_b].double_buffer()
axis_ty, axis_xi = schedule[cache_read_a].split(
schedule[cache_read_a].op.axis[0], nparts=num_thread
)
axis_tx, axis_xi = schedule[cache_read_a].split(axis_xi, nparts=num_thread)
schedule[cache_read_a].bind(axis_ty, thread_y)
schedule[cache_read_a].bind(axis_tx, thread_x)
axis_ty, axis_xi = schedule[cache_read_b].split(
schedule[cache_read_b].op.axis[0], nparts=num_thread
)
axis_tx, axis_xi = schedule[cache_read_b].split(axis_xi, nparts=num_thread)
schedule[cache_read_b].bind(axis_ty, thread_y)
schedule[cache_read_b].bind(axis_tx, thread_x)
# lowering test
schedule = schedule.normalize()
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
with tvm.target.Target(device):
f = tvm.build(schedule, [placeholder_a, placeholder_b, result_c])
# launch the kernel.
num_n = dim1_length
num_m = num_n
num_l = num_n
a_np = np.random.uniform(size=(num_n, num_l)).astype(placeholder_a.dtype)
b_np = np.random.uniform(size=(num_m, num_l)).astype(placeholder_b.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros((num_n, num_m), dtype=result_c.dtype), dev)
ftimer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = ftimer(buff_a, buff_b, buff_c).mean
print("%s: exec=%g sec/op" % (dev, tcost))
tvm.testing.assert_allclose(buff_c.numpy(), np.dot(a_np, b_np.T), rtol=1e-5)
check_device("vulkan")
check_device("nvptx -mcpu=sm_20")
check_device("rocm")
check_device("metal")
check_device("opencl")
check_device("cuda")
if __name__ == "__main__":
test_gemm()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_legacy_tuning.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test the tuner
"""
import logging
import multiprocessing as mp
import textwrap
import tvm
import tvm.relay
import tvm.testing
from tvm import autotvm, te
from tvm.autotvm.measure import measure_methods
from tvm.autotvm.tuner import RandomTuner
from tvm.contrib import tar
from tvm.ir.instrument import pass_instrument
from tvm.ir.transform import PassContext
from tvm.target import Target
from tvm.tir.analysis import _ffi_api as _analysis_ffi_api
def setup_module():
"""Setup the module used for testing."""
@autotvm.template("testing/conv2d_no_batching")
def conv2d_no_batching( # pylint: disable=unused-variable
batch_size, input_h, input_w, channels_in, channels_out, kernel_h, kernel_w
):
"""An example template for testing"""
assert batch_size == 1, "Only consider batch_size = 1 in this template"
data = te.placeholder((batch_size, channels_in, input_h, input_w), name="data")
kernel = te.placeholder((channels_out, channels_in, kernel_h, kernel_w), name="kernel")
axis_rc = te.reduce_axis((0, channels_in), name="rc")
axis_ry = te.reduce_axis((0, kernel_h), name="ry")
axis_rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch_size, channels_out, input_h - kernel_h + 1, input_w - kernel_w + 1),
lambda nn, ff, yy, xx: te.sum(
data[nn, axis_rc, yy + axis_ry, xx + axis_rx]
* kernel[ff, axis_rc, axis_ry, axis_rx],
axis=[axis_rc, axis_ry, axis_rx],
),
tag="conv2d_nchw",
)
schedule = te.create_schedule([conv.op])
output = conv
cache_write_ol = schedule.cache_write(conv, "local")
# create cache stage
cache_read_aa = schedule.cache_read(data, "shared", [cache_write_ol])
cache_read_ww = schedule.cache_read(kernel, "shared", [cache_write_ol])
cache_read_al = schedule.cache_read(cache_read_aa, "local", [cache_write_ol])
cache_read_wl = schedule.cache_read(cache_read_ww, "local", [cache_write_ol])
# tile and bind spatial axes
axis_n, axis_f, axis_y, axis_x = schedule[output].op.axis
cfg = autotvm.get_config()
cfg.define_split("tile_f", cfg.axis(axis_f), num_outputs=4)
cfg.define_split("tile_y", cfg.axis(axis_y), num_outputs=4)
cfg.define_split("tile_x", cfg.axis(axis_x), num_outputs=4)
axis_bf, axis_vf, axis_tf, axis_fi = cfg["tile_f"].apply(schedule, output, axis_f)
axis_by, axis_vy, axis_ty, axis_yi = cfg["tile_y"].apply(schedule, output, axis_y)
axis_bx, axis_vx, axis_tx, axis_xi = cfg["tile_x"].apply(schedule, output, axis_x)
kernel_scope = axis_n # this is the scope to attach global config inside this kernel
schedule[output].bind(axis_bf, te.thread_axis("blockIdx.z"))
schedule[output].bind(axis_by, te.thread_axis("blockIdx.y"))
schedule[output].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[output].bind(axis_vf, te.thread_axis("vthread"))
schedule[output].bind(axis_vy, te.thread_axis("vthread"))
schedule[output].bind(axis_vx, te.thread_axis("vthread"))
schedule[output].bind(axis_tf, te.thread_axis("threadIdx.z"))
schedule[output].bind(axis_ty, te.thread_axis("threadIdx.y"))
schedule[output].bind(axis_tx, te.thread_axis("threadIdx.x"))
schedule[output].reorder(
axis_n,
axis_bf,
axis_by,
axis_bx,
axis_vf,
axis_vy,
axis_vx,
axis_tf,
axis_ty,
axis_tx,
axis_fi,
axis_yi,
axis_xi,
)
schedule[cache_write_ol].compute_at(schedule[output], axis_tx)
# tile and bind reduction axes
axis_n, axis_f, axis_y, axis_x = schedule[cache_write_ol].op.axis
axis_rc, axis_ry, axis_rx = schedule[cache_write_ol].op.reduce_axis
cfg.define_split("tile_rc", cfg.axis(axis_rc), num_outputs=3)
cfg.define_split("tile_ry", cfg.axis(axis_ry), num_outputs=3)
cfg.define_split("tile_rx", cfg.axis(axis_rx), num_outputs=3)
axis_rco, axis_rcm, axis_rci = cfg["tile_rc"].apply(schedule, cache_write_ol, axis_rc)
axis_ryo, axis_rym, axis_ryi = cfg["tile_rx"].apply(schedule, cache_write_ol, axis_ry)
axis_rxo, axis_rxm, axis_rxi = cfg["tile_ry"].apply(schedule, cache_write_ol, axis_rx)
schedule[cache_write_ol].reorder(
axis_rco,
axis_ryo,
axis_rxo,
axis_rcm,
axis_rym,
axis_rxm,
axis_rci,
axis_ryi,
axis_rxi,
axis_n,
axis_f,
axis_y,
axis_x,
)
schedule[cache_read_aa].compute_at(schedule[cache_write_ol], axis_rxo)
schedule[cache_read_ww].compute_at(schedule[cache_write_ol], axis_rxo)
schedule[cache_read_al].compute_at(schedule[cache_write_ol], axis_rxm)
schedule[cache_read_wl].compute_at(schedule[cache_write_ol], axis_rxm)
# cooperative fetching
for load in [cache_read_aa, cache_read_ww]:
axis_n, axis_f, axis_y, axis_x = schedule[load].op.axis
fused = schedule[load].fuse(axis_n, axis_f, axis_y, axis_x)
axis_tz, fused = schedule[load].split(fused, nparts=cfg["tile_f"].size[2])
axis_ty, fused = schedule[load].split(fused, nparts=cfg["tile_y"].size[2])
axis_tx, fused = schedule[load].split(fused, nparts=cfg["tile_x"].size[2])
schedule[load].bind(axis_tz, te.thread_axis("threadIdx.z"))
schedule[load].bind(axis_ty, te.thread_axis("threadIdx.y"))
schedule[load].bind(axis_tx, te.thread_axis("threadIdx.x"))
# tune unroll
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
schedule[output].pragma(
kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val
)
schedule[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
return schedule, [data, kernel, conv]
def teardown_module():
"""Remove the module from the autotvm task tables."""
# TODO(areusch): Tasks should not be registered into a global.
del autotvm.task.task.TASK_TABLE["testing/conv2d_no_batching"]
def get_sample_task(target=tvm.target.cuda(), target_host=None):
"""return a sample task for testing"""
target, target_host = Target.canon_target_and_host(target, target_host)
task = autotvm.task.create(
"testing/conv2d_no_batching", args=(1, 7, 7, 512, 512, 3, 3), target=target
)
return task, target
def run_test_with_all_multiprocessing(func, *args, **kwargs):
"""Check all multiprocessing methods work for the tuning test.
In the past fork() had the most support at detriment to spawn() and forkserver().
As fork() is unavailable or unsafe on some platforms it is good to check all
available methods.
"""
for multiprocessing_method in mp.get_all_start_methods():
old_start_method = mp.get_start_method()
try:
mp.set_start_method(multiprocessing_method, force=True)
func(*args, **kwargs)
finally:
mp.set_start_method(old_start_method, force=True)
@tvm.testing.parametrize_targets("cuda", "opencl")
def test_tuning_gpu(target):
"""Test gpu tuning."""
def runner(target):
# init task
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
measure_option = autotvm.measure_option(autotvm.LocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=20,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 20
successful_results = [
r
for r in results
if r.error_no == autotvm.MeasureErrorNo.NO_ERROR
# We filter records before building if we know they won't work ahead of time.
# We can't guarantee we get one good record so we count these as success too
or r.error_no == autotvm.MeasureErrorNo.INSTANTIATION_ERROR
]
assert len(successful_results) > 0, f"No successful tuning runs: {results!r}"
run_test_with_all_multiprocessing(runner, target)
@tvm.testing.parametrize_targets("cuda", "opencl")
def test_tuning_gpu_inherits_pass_context(target):
"""Autotvm tuner inherits PassContexts but also adds a gpu verification pass by default.
Test that using PassContext inherits passes properly but also runs gpu verification pass.
"""
@pass_instrument
class PassInstrumentChecker:
"""Pass Instrument that simply sees if it's been run."""
def __init__(self):
self.has_been_run = False
def run_after_pass(self, *_):
self.has_been_run = True
class GPUVerifyPassMocked:
"""Context manager that mocks tir.analysis.verify_gpu_code meant
to verify the pass has been run. This is done by patching the ffi func handles."""
FFI_FUNC_HANDLE = "tir.analysis.verify_gpu_code"
FUNC_NAME = "verify_gpu_code"
def __init__(self) -> None:
self.old_impl = tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
self.has_been_run = False
def gpu_verify_pass_mocked(self):
"""Get the replacement for the gpu verification pass."""
def _gpu_verify_pass_mocked(*args, **kwargs):
self.has_been_run = True
return self.old_impl(*args, **kwargs)
return _gpu_verify_pass_mocked
def __enter__(self):
tvm._ffi.register_func(
self.FFI_FUNC_HANDLE, self.gpu_verify_pass_mocked(), override=True
)
# Also overwrite the python bindings
setattr(
_analysis_ffi_api, self.FUNC_NAME, tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
)
def __exit__(self, *args, **kwargs):
# Restore FFI status back to normal
tvm._ffi.register_func(self.FFI_FUNC_HANDLE, self.old_impl, override=True)
setattr(_analysis_ffi_api, self.FUNC_NAME, self.old_impl)
class OverwrittenBuildFunc(measure_methods._WrappedBuildFunc):
"""BuildFunc that mocks and patches as necessary to test proper passes are run."""
def __call__(self, measure_input, tmp_dir, **kwargs):
instrument = PassInstrumentChecker()
mocked_pass_checker = GPUVerifyPassMocked()
with mocked_pass_checker:
with PassContext(instruments=[instrument]):
regular_result = super().__call__(measure_input, tmp_dir, **kwargs)
# Check instrument has been run, meaning context was inherited by builder
assert instrument.has_been_run
# But also check the gpu verification pass has been run
# (which was not in the inherited ctx)
assert mocked_pass_checker.has_been_run
return regular_result
class MockedLocalBuilder(measure_methods.LocalBuilder):
"""As measure_methods.LocalBuilder but overwrites the PassContext for testing."""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
# pylint: disable=too-many-function-args
super().__init__(timeout, n_parallel, build_kwargs, build_func, do_fork, runtime)
self.build_func = OverwrittenBuildFunc(tar.tar, runtime)
def runner(target):
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
# Note: we use the MockedLocalBuilder here instead of autotvm.LocalBuilder()
measure_option = autotvm.measure_option(MockedLocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=1,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 1
run_test_with_all_multiprocessing(runner, target)
def test_tuning_cpu():
"""Test tuning on cpu."""
def runner():
ir_mod = tvm.parser.fromtext(
textwrap.dedent(
"""
#[version = "0.0.5"]
def @main(%a : Tensor[(1, 3, 32, 32), float32], %b : Tensor[(3, 3, 5, 5), float32]) {
nn.conv2d(%a, %b, data_layout="NCHW", kernel_layout="OIHW")
}
"""
)
)
tasks = autotvm.task.relay_integration.extract_from_program(
ir_mod, {}, tvm.target.create("llvm")
)
assert len(tasks) == 1, f"Extracted != 1 task from program: {tasks!r}"
task = tasks[0]
measure_option = autotvm.measure_option(autotvm.LocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=20,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 20
successful_results = [r for r in results if r.error_no == autotvm.MeasureErrorNo.NO_ERROR]
assert len(successful_results) > 0, f"No successful tuning runs: {results!r}"
run_test_with_all_multiprocessing(runner)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_lower.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test workload for lowering and build."""
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
@T.prim_func
def tensorcore_gemm(handle_a: T.handle, handle_b: T.handle, handle_c: T.handle) -> None:
# pylint: disable=missing-function-docstring
# match buffer
match_buffer_a = T.match_buffer(handle_a, [1024, 1024], "float16")
match_buffer_b = T.match_buffer(handle_b, [1024, 1024], "float16")
match_buffer_c = T.match_buffer(handle_c, [1024, 1024], "float32")
# body
for block_idx_x in T.thread_binding(0, 16, "blockIdx.x"):
for block_idx_y in T.thread_binding(0, 8, "blockIdx.y"):
with T.block():
axis_bx, axis_by = T.axis.remap("SS", [block_idx_x, block_idx_y])
shared_a = T.alloc_buffer([1024, 1024], "float16", scope="shared")
shared_b = T.alloc_buffer([1024, 1024], "float16", scope="shared")
wmma_a = T.alloc_buffer([1024, 1024], "float16", scope="wmma.matrix_a")
wmma_b = T.alloc_buffer([1024, 1024], "float16", scope="wmma.matrix_b")
wmma_c = T.alloc_buffer([1024, 1024], "float32", scope="wmma.accumulator")
# pylint: disable=too-many-nested-blocks
for thread_ty in T.thread_binding(0, 2, "threadIdx.y"):
for thread_tz in T.thread_binding(0, 2, "threadIdx.z"):
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(64, axis_bx * 4 + thread_ty * 2 + index_i)
new_axis_vj = T.axis.S(64, axis_by * 8 + thread_tz * 4 + index_jj)
T.reads([])
T.writes(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
match_buffer_c0 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
T.evaluate(
T.tvm_fill_fragment(
match_buffer_c0.data,
16,
16,
16,
index_i * 4 + index_jj,
T.float32(0), # pylint: disable=not-callable
dtype="handle",
)
)
for k_o in range(0, 32):
# copy data from global to shared
for thread_tx in T.thread_binding(0, 32, "threadIdx.x"):
for index_i0, index_j0 in T.grid(1, 4):
for index_j1 in T.vectorized(0, 4):
with T.block():
new_axis_vi = T.axis.S(
1024,
axis_bx * 64
+ thread_ty * 32
+ thread_tx
+ index_i0,
)
new_axis_vj = T.axis.S(
1024,
k_o * 32 + thread_tz * 16 + index_j0 * 4 + index_j1,
)
shared_a[new_axis_vi, new_axis_vj + 8] = match_buffer_a[
new_axis_vi, new_axis_vj
]
for index_i0, index_j0 in T.grid(2, 4):
for index_j1 in T.vectorized(0, 4):
with T.block():
new_axis_vi = T.axis.S(
1024,
axis_by * 128
+ thread_ty * 64
+ thread_tx * 2
+ index_i0,
)
new_axis_vj = T.axis.S(
1024,
k_o * 32 + thread_tz * 16 + index_j0 * 4 + index_j1,
)
shared_b[new_axis_vi, new_axis_vj + 8] = match_buffer_b[
new_axis_vi, new_axis_vj
]
for k_i in range(0, 2):
for index_i in range(0, 2):
with T.block():
new_axis_vi = T.axis.S(
64, axis_bx * 4 + thread_ty * 2 + index_i
)
axis_vk = T.axis.S(64, k_o * 2 + k_i)
T.reads(
shared_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
]
)
T.writes(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
match_buffer_a0 = T.match_buffer(
shared_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
],
(16, 16 + 8),
"float16",
strides=[stride0, stride1],
scope="shared",
offset_factor=1,
)
wmma_a0 = T.match_buffer(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_a",
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
wmma_a0.data,
16,
16,
16,
index_i,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
match_buffer_a0.data,
match_buffer_a0.elem_offset + 8,
match_buffer_a0.strides[0],
1,
dtype="handle",
),
match_buffer_a0.strides[0],
"row_major",
dtype="handle",
)
)
for index_jj in range(0, 4):
with T.block():
new_axis_vj = T.axis.S(
64, axis_by * 8 + thread_tz * 4 + index_jj
)
axis_vk = T.axis.S(64, k_o * 2 + k_i)
T.reads(
shared_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
]
)
T.writes(
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
match_buffer_b0 = T.match_buffer(
shared_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16 + 8,
],
(16, 16 + 8),
"float16",
strides=[stride0, stride1],
scope="shared",
offset_factor=1,
)
wmma_b0 = T.match_buffer(
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_b",
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
wmma_b0.data,
16,
16,
16,
index_jj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
match_buffer_b0.data,
match_buffer_b0.elem_offset + 8,
match_buffer_b0.strides[0],
1,
dtype="handle",
),
match_buffer_b0.strides[0],
"col_major",
dtype="handle",
)
)
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(
64, axis_bx * 4 + thread_ty * 2 + index_i
)
new_axis_vj = T.axis.S(
64, axis_by * 8 + thread_tz * 4 + index_jj
)
axis_vk = T.axis.R(64, k_o * 2 + k_i)
T.reads(
[
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
]
)
T.writes(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
wmma_a1 = T.match_buffer(
wmma_a[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_a",
offset_factor=1,
)
wmma_b1 = T.match_buffer(
wmma_b[
new_axis_vj * 16 : new_axis_vj * 16 + 16,
axis_vk * 16 : axis_vk * 16 + 16,
],
(16, 16),
"float16",
strides=[16, 1],
scope="wmma.matrix_b",
offset_factor=1,
)
wmma_c1 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
T.evaluate(
T.tvm_mma_sync(
wmma_c1.data,
index_i * 4 + index_jj,
wmma_a1.data,
index_i,
wmma_b1.data,
index_jj,
wmma_c1.data,
index_i * 4 + index_jj,
dtype="handle",
)
)
for index_i, index_jj in T.grid(2, 4):
with T.block():
new_axis_vi = T.axis.S(64, axis_bx * 4 + thread_ty * 2 + index_i)
new_axis_vj = T.axis.S(64, axis_by * 8 + thread_tz * 4 + index_jj)
T.reads(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
T.writes(
match_buffer_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
]
)
stride0 = T.var("int32")
stride1 = T.var("int32")
wmma_c2 = T.match_buffer(
wmma_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[16 * 4, 1],
scope="wmma.accumulator",
offset_factor=1,
)
match_buffer_c1 = T.match_buffer(
match_buffer_c[
new_axis_vi * 16 : new_axis_vi * 16 + 16,
new_axis_vj * 16 : new_axis_vj * 16 + 16,
],
(16, 16),
"float32",
strides=[stride0, stride1],
offset_factor=1,
)
T.evaluate(
T.tvm_store_matrix_sync(
wmma_c2.data,
16,
16,
16,
index_i * 4 + index_jj,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
match_buffer_c1.data,
match_buffer_c1.elem_offset,
match_buffer_c1.strides[0],
1,
dtype="handle",
),
match_buffer_c1.strides[0],
"row_major",
dtype="handle",
)
)
@tvm.testing.requires_cuda
def test_gemm_tensorcore():
"""Test running gemm on tensorcore."""
dev = tvm.device("cuda", 0)
a_np = np.random.uniform(size=(1024, 1024)).astype("float16")
b_np = np.random.uniform(size=(1024, 1024)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.T.astype("float32"))
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros((1024, 1024), dtype="float32"), dev)
myfunc = tvm.build(tensorcore_gemm, target="cuda", name="dense")
myfunc(buff_a, buff_b, buff_c)
tvm.testing.assert_allclose(buff_c.numpy(), c_np, rtol=1e-3)
evaluator = myfunc.time_evaluator(myfunc.entry_name, dev, number=100)
time_elapsed = evaluator(buff_a, buff_b, buff_c).mean
num_flops = 2 * 1024 * 1024 * 1024
gflops = num_flops / (time_elapsed * 1e3) / 1e6
print("gemm with tensor core: %f ms" % (time_elapsed * 1e3))
print("GFLOPS: %f" % gflops)
if __name__ == "__main__":
test_gemm_tensorcore()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test scheduling of reduction operations."""
import pytest
import numpy as np
import tvm
from tvm import te, topi
from tvm.driver.build_module import schedule_to_module
import tvm.testing
import tvm.topi.testing
@tvm.testing.requires_gpu
def test_reduce_prims():
"""Test reduction operations."""
def test_prim(reducer, np_reducer):
# graph
size_var_n = tvm.te.size_var("n")
size_var_m = tvm.te.size_var("m")
placeholder_a = te.placeholder((size_var_n, size_var_m), name="A")
result_r = te.compute((size_var_n,), lambda i: tvm.tir.Select((i > 1), 1, 0), name="R")
axis_k = te.reduce_axis((0, size_var_m))
result_b = te.compute(
(size_var_n,),
lambda i: reducer(placeholder_a[i, axis_k], axis=axis_k, where=(result_r[i] == 1)),
name="B",
)
# schedule
schedule = te.create_schedule(result_b.op)
# create iter var and assign them tags.
num_thread = 1
axis_x0, axis_x1 = schedule[result_b].split(result_b.op.axis[0], factor=num_thread)
schedule[result_b].bind(axis_x0, te.thread_axis("blockIdx.x"))
schedule[result_b].bind(axis_x1, te.thread_axis("threadIdx.x"))
schedule[result_r].compute_inline()
# one line to build the function.
def check_device(device, host="llvm"):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
freduce = tvm.build(
schedule,
args=[placeholder_a, result_b],
target=tvm.target.Target(device, host),
name="myreduce",
)
# launch the kernel.
num_n = 1028
num_m = 129
buff_x = tvm.nd.array(
np.random.uniform(size=(num_n, num_m)).astype(placeholder_a.dtype), dev
)
buff_y = tvm.nd.array(np.zeros(num_n, dtype=result_b.dtype), dev)
freduce(buff_x, buff_y)
npy = buff_y.numpy()
npy[:2] = 0
res = np_reducer(buff_x.numpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(npy, res, rtol=1e-4)
check_device("metal")
check_device("vulkan")
check_device("cuda")
check_device("opencl")
check_device("rocm")
test_prim(te.sum, np.sum)
test_prim(tvm.te.min, np.amin)
test_prim(tvm.te.max, np.amax)
def test_init_imm():
"""Test initial values which are immutable in reduction ops."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(), lambda: te.sum(placeholder_a[axis_k], axis=axis_k, init=10.0), name="B"
)
# schedule
schedule_s = te.create_schedule(result_b.op)
# one line to build the function.
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule_s, args=[placeholder_a, result_b])
fsum = tvm.build(fapi, target=target, name="mysum")
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = 10.0 + np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_init():
"""Test initializer which is non-const."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length, arr_length), name="A")
placeholder_c = te.placeholder((arr_length, arr_length), name="C")
placeholder_i = te.placeholder((arr_length, arr_length), name="I")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(arr_length, arr_length),
lambda i, j: te.sum(
placeholder_a[i, axis_k] * placeholder_c[axis_k, j],
axis=axis_k,
init=placeholder_i[i, j],
),
name="B",
)
# schedule
schedule = te.create_schedule(result_b.op)
# one line to build the function.
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_c, placeholder_i, result_b])
print(fapi)
mmult = tvm.build(fapi, target=target, name="mmult")
# launch the kernel.
buff_a = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_a.dtype), dev
)
buff_c = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_c.dtype), dev
)
buff_i = tvm.nd.array(np.random.uniform(size=(num_n, num_n)).astype(result_b.dtype), dev)
buf_b = tvm.nd.array(np.zeros((num_n, num_n), dtype=result_b.dtype), dev)
mmult(buff_a, buff_c, buff_i, buf_b)
res = buff_i.numpy() + np.matmul(buff_a.numpy(), buff_c.numpy())
tvm.testing.assert_allclose(buf_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor():
"""Test rfactors."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
placeholder_b = te.compute((), lambda: te.sum(placeholder_a[axis_k], axis=axis_k), name="B")
# schedule
schedule = te.create_schedule(placeholder_b.op)
axis_kf, _ = schedule[placeholder_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(placeholder_b, axis_kf)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
if not tvm.testing.device_enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_b])
fsum = tvm.build(fapi, target=target, name="mysum")
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=placeholder_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor_init():
"""Test rfactors with constant inits."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length, arr_length), name="A")
placeholder_c = te.placeholder((arr_length, arr_length), name="C")
placeholder_i = te.placeholder((arr_length, arr_length), name="I")
axis_k = te.reduce_axis((0, arr_length))
result_b = te.compute(
(arr_length, arr_length),
lambda i, j: te.sum(
placeholder_a[i, axis_k] * placeholder_c[axis_k, j],
axis=axis_k,
init=placeholder_i[i, j],
),
name="B",
)
# schedule
schedule = te.create_schedule(result_b.op)
axis_kf, _ = schedule[result_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(result_b, axis_kf, 1)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
if not tvm.runtime.enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_c, placeholder_i, result_b])
print(fapi)
mmult = tvm.build(fapi, target=target, name="mmult")
# launch the kernel.
buff_a = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_a.dtype), dev
)
buff_c = tvm.nd.array(
np.random.uniform(size=(num_n, num_n)).astype(placeholder_c.dtype), dev
)
buff_i = tvm.nd.array(np.random.uniform(size=(num_n, num_n)).astype(result_b.dtype), dev)
buff_b = tvm.nd.array(np.zeros((num_n, num_n), dtype=result_b.dtype), dev)
mmult(buff_a, buff_c, buff_i, buff_b)
res = buff_i.numpy() + np.matmul(buff_a.numpy(), buff_c.numpy())
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
def test_rfactor_factor_axis():
"""Test rfactors across axis."""
num_n = 1027
arr_length = tvm.runtime.convert(num_n)
placeholder_a = te.placeholder((arr_length,), name="A")
axis_k = te.reduce_axis((0, arr_length))
placeholder_b = te.compute((), lambda: te.sum(placeholder_a[axis_k], axis=axis_k), name="B")
# schedule
schedule = te.create_schedule(placeholder_b.op)
axis_kf, _ = schedule[placeholder_b].split(axis_k, nparts=4)
rfactor_bf = schedule.rfactor(placeholder_b, axis_kf, 0)
schedule[rfactor_bf].parallel(rfactor_bf.op.axis[0])
# one line to build the function.
def check_target(target="llvm"):
if not tvm.testing.device_enabled(target):
return
dev = tvm.cpu(0)
fapi = tvm.lower(schedule, args=[placeholder_a, placeholder_b])
fsum = tvm.build(fapi, target=target, name="mysum")
# launch the kernel.
buff_a = tvm.nd.array(np.random.uniform(size=(num_n,)).astype(placeholder_a.dtype), dev)
buff_b = tvm.nd.array(np.zeros((), dtype=placeholder_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=0)
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target()
@tvm.testing.requires_gpu
def test_rfactor_threads():
"""Test rfactors across threads."""
num_n = 1027
num_m = 10
length_n = tvm.runtime.convert(num_n)
length_m = tvm.runtime.convert(num_m)
placeholder_a = te.placeholder((length_m, length_n), name="A")
axis_k = te.reduce_axis((0, length_n))
nthread = 16
result_b = te.compute(
(length_m,),
lambda i: te.sum(placeholder_a[i, axis_k], axis=axis_k, where=(i > 1)),
name="B",
)
# schedule
schedule = te.create_schedule(result_b.op)
_, axis_kf = schedule[result_b].split(axis_k, factor=nthread)
rfactor_bf = schedule.rfactor(result_b, axis_kf)
axis_bx, axis_ty = schedule[result_b].split(schedule[result_b].op.axis[0], factor=nthread)
schedule[result_b].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_b].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_tx, thread_x)
schedule[rfactor_bf].compute_at(schedule[result_b], axis_tx)
schedule[result_b].set_store_predicate(thread_x.var.equal(0))
# one line to build the function.
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a, result_b])
fsum = tvm.build(fapi, target=device, name="mysum")
# launch the kernel.
buff_a = tvm.nd.array(
np.random.uniform(size=(num_m, num_n)).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(num_m, dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=1)
res[:2] = 0
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
check_target("rocm")
@tvm.testing.requires_gpu
def test_rfactor_elemwise_threads():
"""Test rfactor elemwise threads."""
num_n = 1025
num_m = 10
placeholder_a = te.placeholder((num_m, num_n), name="A")
axis_k = te.reduce_axis((0, num_n))
nthread = 16
result_b = te.compute(
(num_m,), lambda i: te.sum(placeholder_a[i, axis_k], axis=axis_k), name="B"
)
result_bb = te.compute((num_m,), lambda i: result_b[i] + 1, name="BB")
result_c = te.compute((num_m,), lambda i: result_bb[i] + 1, name="C")
# schedule
schedule = te.create_schedule(result_c.op)
schedule[result_bb].compute_inline()
axis_bx, axis_ty = schedule[result_c].split(schedule[result_c].op.axis[0], factor=nthread)
_, axis_kf = schedule[result_b].split(axis_k, factor=nthread)
rfactor_bf = schedule.rfactor(result_b, axis_kf)
schedule[result_b].compute_at(schedule[result_c], axis_ty)
schedule[result_c].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_c].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b].bind(axis_tx, thread_x)
schedule[rfactor_bf].compute_at(schedule[result_b], axis_tx)
# Since thread_x is shared across reductions
# only one of them need to do write back
schedule[result_b].set_store_predicate(thread_x.var.equal(0))
schedule[result_c].set_store_predicate(thread_x.var.equal(0))
# one line to build the function.
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a, result_c])
fsum = tvm.build(fapi, target=device, name="mysum")
# launch the kernel.
buff_a = tvm.nd.array(
np.random.uniform(size=(num_m, num_n)).astype(placeholder_a.dtype), dev
)
buff_b = tvm.nd.array(np.zeros(num_m, dtype=result_b.dtype), dev)
fsum(buff_a, buff_b)
res = np.sum(buff_a.numpy(), axis=1) + 2
tvm.testing.assert_allclose(buff_b.numpy(), res, rtol=1e-4)
check_target("vulkan")
check_target("cuda")
check_target("metal")
check_target("opencl")
check_target("rocm")
def test_argmax():
"""Test argmax."""
def fcombine(tensor_x, tensor_y):
lhs = tvm.tir.Select((tensor_x[1] >= tensor_y[1]), tensor_x[0], tensor_y[0])
rhs = tvm.tir.Select((tensor_x[1] >= tensor_y[1]), tensor_x[1], tensor_y[1])
return lhs, rhs
def fidentity(tensor1, tensor2):
return tvm.tir.const(-1, tensor1), tvm.te.min_value(tensor2)
argmax = te.comm_reducer(fcombine, fidentity, name="argmax")
size_var_m = te.size_var("m")
size_var_n = te.size_var("n")
idx = te.placeholder((size_var_m, size_var_n), name="idx", dtype="int32")
val = te.placeholder((size_var_m, size_var_n), name="val", dtype="float32")
axis_k = te.reduce_axis((0, size_var_n), "k")
result_t0, result_t1 = te.compute(
(size_var_m,), lambda i: argmax((idx[i, axis_k], val[i, axis_k]), axis=axis_k), name="T"
)
schedule = te.create_schedule(result_t0.op)
def check_target():
device = "cpu"
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
dev = tvm.device(device, 0)
fapi = tvm.lower(schedule, args=[idx, val, result_t0, result_t1])
fargmax = tvm.build(fapi, target="llvm", name="argmax")
height = 12
width = 16
np_idx = np.repeat(np.arange(width, dtype="int32").reshape(1, width), height, axis=0)
np_val = np.random.uniform(size=(height, width)).astype("float32")
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, dev)
nd_val = tvm.nd.array(np_val, dev)
nd_res0 = tvm.nd.array(np.zeros(height, dtype="int32"), dev)
nd_res1 = tvm.nd.array(np.zeros(height, dtype="float32"), dev)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.numpy())
check_target()
@tvm.testing.requires_gpu
def test_rfactor_argmax():
"""Test rfactor argmax"""
def fcombine(tensor0, tensor1):
lhs = tvm.tir.Select((tensor0[1] >= tensor1[1]), tensor0[0], tensor1[0])
rhs = tvm.tir.Select((tensor0[1] >= tensor1[1]), tensor0[1], tensor1[1])
return lhs, rhs
def fidentity(tensor0, tensor1):
return tvm.tir.const(-1, tensor0), tvm.te.min_value(tensor1)
argmax = te.comm_reducer(fcombine, fidentity, name="argmax")
num_width = 1027
num_height = 10
width = tvm.runtime.convert(num_width)
height = tvm.runtime.convert(num_height)
placeholder_a0 = te.placeholder((height, width), name="A0", dtype="int32")
placeholder_a1 = te.placeholder((height, width), name="A1", dtype="float32")
axis_k = te.reduce_axis((0, width))
result_b0, result_b1 = te.compute(
(height,),
lambda i: argmax((placeholder_a0[i, axis_k], placeholder_a1[i, axis_k]), axis=axis_k),
name="B",
)
# schedule
schedule = te.create_schedule(result_b0.op)
nthread = 16
_, axis_kf = schedule[result_b0].split(axis_k, factor=nthread)
rfactor_bf0, _ = schedule.rfactor(result_b0, axis_kf)
axis_bx, axis_ty = schedule[result_b0].split(schedule[result_b0].op.axis[0], factor=nthread)
schedule[result_b0].bind(axis_bx, te.thread_axis("blockIdx.x"))
schedule[result_b0].bind(axis_ty, te.thread_axis("threadIdx.y"))
axis_tx = schedule[result_b0].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
schedule[result_b0].bind(axis_tx, thread_x)
schedule[rfactor_bf0.op].compute_at(schedule[result_b0], axis_tx)
schedule[result_b0].set_store_predicate(thread_x.var.equal(0))
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fapi = tvm.lower(schedule, args=[placeholder_a0, placeholder_a1, result_b0, result_b1])
fargmax = tvm.build(fapi, target=device, name="argmax")
np_idx = np.repeat(
np.arange(num_width, dtype="int32").reshape(1, num_width), num_height, axis=0
)
np_val = np.random.uniform(size=(num_height, num_width)).astype("float32")
np_res = np.argmax(np_val, axis=1)
nd_idx = tvm.nd.array(np_idx, dev)
nd_val = tvm.nd.array(np_val, dev)
nd_res0 = tvm.nd.array(np.zeros(num_height, dtype="int32"), dev)
nd_res1 = tvm.nd.array(np.zeros(num_height, dtype="float32"), dev)
fargmax(nd_idx, nd_val, nd_res0, nd_res1)
tvm.testing.assert_allclose(np_res, nd_res0.numpy())
check_target("cuda")
check_target("vulkan")
check_target("rocm")
@tvm.testing.requires_gpu
def test_warp_reduction1():
"""Test warp reductions."""
nthx = 32
nthy = 4
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, nthx), "threadIdx.x")
thread_y = te.thread_axis((0, nthy), "threadIdx.y")
def check_target(device, m, n):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
# compute
placeholder_a = te.placeholder((m, n), name="A")
axis_k = te.reduce_axis((0, n))
placeholder_b = te.compute(
(m,), lambda i: te.max(placeholder_a[i][axis_k], axis=axis_k), name="B"
)
schedule = te.create_schedule(placeholder_b.op)
# schedule
axis_k = schedule[placeholder_b].op.reduce_axis[0]
axis_ko, _ = schedule[placeholder_b].split(axis_k, nparts=nthx)
schedule[placeholder_b].bind(axis_ko, thread_x)
axis_xo, axis_xi = schedule[placeholder_b].split(
schedule[placeholder_b].op.axis[0], factor=nthy
)
schedule[placeholder_b].bind(axis_xi, thread_y)
schedule[placeholder_b].bind(axis_xo, block_x)
tvm.lower(schedule, [placeholder_a, placeholder_b], simple_mode=True)
# validation
func = tvm.build(schedule, [placeholder_a, placeholder_b], device, name="warp_reduction")
a_np = np.random.uniform(size=(m, n)).astype(placeholder_a.dtype)
b_np = np.zeros((m,), dtype=placeholder_a.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(b_np, dev)
b_np = np.max(a_np, axis=1)
func(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), b_np, rtol=1e-3, atol=1e-3)
check_target("cuda", m=32, n=256)
check_target("cuda", m=10, n=20)
check_target("rocm", m=32, n=256)
check_target("rocm", m=10, n=20)
# This is a bug in normal reduction.
# check_target("cuda", m=10, n=37)
@tvm.testing.requires_gpu
def test_warp_reduction2():
"""Test warp reductions."""
def fcombine(tensor1, tensor2):
return tensor1[0] + tensor2[0], tensor1[1] * tensor2[1]
def fidentity(tensor1, tensor2):
return tvm.tir.const(0, tensor1), tvm.tir.const(1, tensor2)
add_mul_reducer = te.comm_reducer(fcombine, fidentity, name="add_mul_reducer")
# compute
num_m = 16
num_n = 256
placeholder_a0 = te.placeholder((num_m, num_n), name="A0", dtype="float32")
placeholder_a1 = te.placeholder((num_m, num_n), name="Al", dtype="float32")
axis_k = te.reduce_axis((0, num_n), "k")
result0, result1 = te.compute(
(num_m,),
lambda i: add_mul_reducer(
(placeholder_a0[i, axis_k], placeholder_a1[i, axis_k]), axis=axis_k
),
name="T",
)
nthdx, nthdy = 32, 2
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis((0, nthdx), "threadIdx.x")
thread_y = te.thread_axis((0, nthdy), "threadIdx.y")
def check_target(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
# schedule
schedule = te.create_schedule(result0.op)
axis_ko, _ = schedule[result0].split(axis_k, nparts=nthdx)
axis_xo, axis_xi = schedule[result0].split(schedule[result0].op.axis[0], factor=nthdy)
schedule[result0].bind(axis_ko, thread_x)
schedule[result0].bind(axis_xi, thread_y)
schedule[result0].bind(axis_xo, block_x)
# validation
dev = tvm.device(device, 0)
a0_np = np.random.uniform(size=(num_m, num_n)).astype(placeholder_a0.dtype)
a1_np = np.random.uniform(size=(num_m, num_n)).astype(placeholder_a1.dtype)
t0_np = np.zeros((num_m,), dtype=placeholder_a0.dtype)
t1_np = np.zeros((num_m,), dtype=placeholder_a1.dtype)
buff_a0 = tvm.nd.array(a0_np, dev)
buff_a1 = tvm.nd.array(a1_np, dev)
buff_t0 = tvm.nd.array(t0_np, dev)
buff_t1 = tvm.nd.array(t1_np, dev)
func = tvm.build(
schedule, [placeholder_a0, placeholder_a1, result0, result1], device, name="reduction"
)
func(buff_a0, buff_a1, buff_t0, buff_t1)
t0_np = np.sum(a0_np, axis=1)
t1_np = np.product(a1_np, axis=1)
tvm.testing.assert_allclose(buff_t0.numpy(), t0_np, rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(buff_t1.numpy(), t1_np, rtol=1e-3, atol=1e-3)
check_target("cuda")
check_target("rocm")
@tvm.testing.requires_cuda
def test_reduce_storage_reuse():
"""Test reduction reuses storage."""
target = tvm.target.Target("cuda")
def run_passes(sch, args):
mod = schedule_to_module(sch, args)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
return tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.Simplify(),
tvm.tir.transform.StorageRewrite(),
tvm.tir.transform.LowerThreadAllreduce(),
]
)(mod)
dev = tvm.device(target.kind.name, 0)
shape = (16, 16)
placeholder_a = te.placeholder(shape, dtype="float32", name="A")
placeholder_b = topi.nn.softmax(placeholder_a, axis=1) + 1.0
with tvm.target.Target(target):
schedule = topi.cuda.schedule_softmax(placeholder_b)
mod = run_passes(schedule, [placeholder_a, placeholder_b])
# Due to the storage rewrite pass, the reduction output storage reduce_temp0 can be reused as
# the storage of the next compute.
# Example:
# ...
# tir.tvm_thread_allreduce((uint32)1, normal_reduce_temp0[0], 1, reduce_temp0, threadIdx.x)
# if ((threadIdx.x < 16)) {
# reduce_temp0[0] = (T_softmax_exp[threadIdx.x]/reduce_temp0[0])
# }
# ...
# The LowerThreadAllreduce pass should remap reduce_temp0 on the left hand side of the store
# above, as well as the load on the right hand side.
# Expected output:
# ...
# red_buf0[0] = tir.tvm_warp_shuffle(mask[0], red_buf0[0], 0, 32, 32)
# if ((threadIdx.x < 16)) {
# red_buf0[0] = (T_softmax_exp[threadIdx.x]/red_buf0[0])
# }
# ...
def check_store_dst_remapped(op):
if isinstance(op, tvm.tir.Store):
assert op.buffer_var.name != "reduce_temp0"
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, check_store_dst_remapped)
inp = np.random.uniform(size=shape).astype("float32")
ref = tvm.topi.testing.softmax_python(inp) + 1.0
func = tvm.build(schedule, [placeholder_a, placeholder_b], target)
buff_a = tvm.nd.array(inp, dev)
buff_b = tvm.nd.array(np.zeros(shape, dtype=placeholder_b.dtype), dev)
func(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), ref, rtol=1e-5)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test scheduling adn running scan operators."""
import numpy as np
import tvm
import tvm.testing
from tvm import te
@tvm.testing.requires_gpu
def test_scan():
"""Test scan operators."""
size_var_m = te.size_var("m")
size_var_n = te.size_var("n")
placeholder_x = te.placeholder((size_var_m, size_var_n), name="X")
s_state = te.placeholder((size_var_m, size_var_n))
s_init = te.compute((1, size_var_n), lambda _, i: placeholder_x[0, i])
s_update = te.compute(
(size_var_m, size_var_n), lambda t, i: s_state[t - 1, i] + placeholder_x[t, i]
)
scan = tvm.te.scan(s_init, s_update, s_state)
# test scan + compute case
res = te.compute((size_var_m, size_var_n), lambda i, j: scan[i, j])
# schedule
schedule = te.create_schedule(res.op)
num_thread = 256
block_x = te.thread_axis(None, "blockIdx.x")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
axis_xo, axis_xi = schedule[s_init].split(s_init.op.axis[1], factor=num_thread)
schedule[s_init].bind(axis_xo, block_x)
schedule[s_init].bind(axis_xi, thread_x)
axis_xo, axis_xi = schedule[s_update].split(s_update.op.axis[1], factor=num_thread)
schedule[s_update].bind(axis_xo, block_x)
schedule[s_update].bind(axis_xi, thread_x)
axis_xo, axis_xi = schedule[res].split(res.op.axis[1], factor=num_thread)
schedule[res].bind(axis_xo, block_x)
schedule[res].bind(axis_xi, thread_x)
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fscan = tvm.build(schedule, [placeholder_x, res], device, name="myscan")
# launch the kernel.
num_n = 1024
num_m = 10
a_np = np.random.uniform(size=(num_m, num_n)).astype(res.dtype)
buff_a = tvm.nd.array(a_np, dev)
buff_b = tvm.nd.array(np.zeros((num_m, num_n), dtype=res.dtype), dev)
fscan(buff_a, buff_b)
tvm.testing.assert_allclose(buff_b.numpy(), np.cumsum(a_np, axis=0))
check_device("vulkan")
check_device("cuda")
check_device("metal")
check_device("opencl")
if __name__ == "__main__":
test_scan()
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_tuning.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import logging
import tempfile
from typing import List, Optional
import numpy as np # type: ignore
import pytest
import tvm
from tvm import meta_schedule as ms
from tvm import relay
from tvm.contrib import graph_executor
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import generate_input_data
from tvm.target.target import Target
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
@pytest.mark.skip("Integration test")
@pytest.mark.parametrize(
"model_name, input_shape, data_type, target, layout",
[
("resnet_18", [1, 3, 224, 224], "float32", "llvm --num-cores=12", "NHWC"),
("resnet_18", [1, 3, 224, 224], "float32", "nvidia/geforce-rtx-3070", "NHWC"),
],
)
def test_meta_schedule_tune_relay(
model_name: str,
input_shape: List[int],
data_type: str,
target: str,
layout: Optional[str],
):
dev = tvm.cpu() if str(target).startswith("llvm") else tvm.cuda()
data = generate_input_data(input_shape, data_type)
mod, params, (input_name, _, _) = get_network(
name=model_name,
input_shape=input_shape,
layout=layout,
)
target = Target(target)
with tempfile.TemporaryDirectory() as work_dir:
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=2048,
)
rt_mod1 = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
)
print(profiler.table())
def get_output(data, lib, dev):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(input_name, tvm.nd.array(data, device=dev))
module.run()
return module.get_output(0).numpy()
# Check correctness
actual_output = get_output(data, rt_mod1, dev)
print(
f"{model_name} finished tuning and running on {Target(target).kind.name}. "
"Running baseline...",
flush=True,
)
# Compile without meta-schedule for correctness check
baseline_target = "llvm -num-cores=1"
with tvm.transform.PassContext(opt_level=0):
rt_mod2 = relay.build(mod, target=baseline_target, params=params)
expected_output = get_output(data, rt_mod2, tvm.cpu())
print(
f"Basline finished running on {Target(baseline_target).kind.name}. "
"Verifying correctness...",
flush=True,
)
assert np.allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
print(
f"Correctness verified for {model_name} on {Target(target).kind.name}.",
flush=True,
)
if __name__ == """__main__""":
test_meta_schedule_tune_relay(
"resnet_18", [1, 3, 224, 224], "float32", "llvm --num-cores=12", "NHWC"
)
test_meta_schedule_tune_relay(
"resnet_18", [1, 3, 224, 224], "float32", "nvidia/geforce-rtx-3070", None
)
| https://github.com/zk-ml/tachikoma |
tests/python/integration/test_winograd_nnpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test winograd convolution using nnpack impl."""
import numpy as np
from pytest import skip
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm.contrib import nnpack
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
def verify_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
devices,
dilation=1,
add_bias=False,
add_relu=False,
):
"""Verify conv2d nchw workload."""
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation)
)
in_height = in_width = in_size
placholder_a = te.placeholder((batch, in_channel, in_height, in_width), name="A")
placeholder_w = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(placholder_a.shape)
w_shape = get_const_tuple(placeholder_w.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = placholder_a.dtype
@memoize("topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skipping %s becuase it is not enabled" % device)
print("Running on target: %s" % device)
with tvm.target.Target(device):
result_c = topi.nn.conv2d(
placholder_a,
placeholder_w,
stride,
padding,
dilation,
data_layout="NCHW",
out_dtype=dtype,
)
if add_bias:
result_c = topi.add(result_c, bias)
if add_relu:
result_c = topi.nn.relu(result_c)
schedule = topi.generic.schedule_conv2d_nchw([result_c])
buff_a = tvm.nd.array(a_np, dev)
buff_w = tvm.nd.array(w_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros(get_const_tuple(result_c.shape), dtype=result_c.dtype), dev)
if add_bias:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, bias, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_b, buff_c)
else:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_c)
tvm.testing.assert_allclose(buff_c.numpy(), c_np, rtol=1e-4)
for device in devices:
check_device(device)
class WinogradFallback(autotvm.FallbackContext):
"""Winograd fallbacks."""
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = FallbackConfigEntity()
cfg.template_key = "winograd_nnpack_fp32"
self.memory[key] = cfg
return cfg
def test_conv2d_nchw():
"""Verify conv2d nchw winograd works."""
if not tvm.get_global_func(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform", True
):
skip("extern function is not available")
if not nnpack.is_available():
skip("nnpack is not available")
devices = ["llvm -device=arm_cpu"]
autotvm.GLOBAL_SCOPE.silent = True
with WinogradFallback():
# resnet 18 workloads
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1, devices=devices)
# unet workloads
verify_conv2d_nchw(1, 3, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 4, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 96, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 48, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 24, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 12, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 6, 220, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 220, 6, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 12, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 24, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 48, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 96, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 192, 1, 3, 1, 1, add_bias=True, devices=devices)
# relu, bias
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True, devices=devices)
# werid workloads
verify_conv2d_nchw(1, 3, 3, 3, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 13, 71, 59, 3, 1, 1, devices=devices)
autotvm.GLOBAL_SCOPE.silent = False
if __name__ == "__main__":
import pytest
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/nightly/quantization/test_quantization_accuracy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import tvm
from tvm import relay
from tvm.relay import quantize as qtz
import mxnet as mx
from mxnet import gluon
import logging
import os
import tvm.testing
logging.basicConfig(level=logging.INFO)
Config = namedtuple(
"Config",
[
"model",
"nbit_input",
"dtype_input",
"nbit_output",
"dtype_output",
"global_scale",
"expected_acc",
],
)
def get_val_data(model_name, rec_val, batch_size, num_workers=4):
rec_val = os.path.expanduser(rec_val)
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
img_size = 299 if model_name == "inceptionv3" else 224
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=256,
data_shape=(3, img_size, img_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
)
return val_data, batch_fn
def get_model(model_name, batch_size, qconfig, original=False):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == "inceptionv3" else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
logging.debug("original")
logging.debug(mod.astext(show_meta_data=False))
if original:
return mod, params
with qconfig:
logging.debug("current quantize config")
logging.debug(qtz.current_qconfig())
qfunc = qtz.quantize(mod, params)
logging.debug("after quantize")
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc, params
def eval_acc(
model, params, dataset, batch_fn, target=tvm.target.cuda(), device=tvm.cuda(), log_interval=500
):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(model, target, params=params)
# create runtime module
m = tvm.contrib.graph_executor.GraphModule(lib["default"](device))
# setup evaluaiton metric
dataset.reset()
batch_size = dataset.batch_size
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1.reset()
acc_top5.reset()
# Execute
for i, batch in enumerate(dataset):
data, label = batch_fn(batch, [mx.cpu(0)])
m.set_input("data", tvm.nd.array(data[0].asnumpy()))
m.run()
out_arr = m.get_output(0)
acc_top1.update(label, [mx.nd.array(out_arr.numpy())])
acc_top5.update(label, [mx.nd.array(out_arr.numpy())])
if not (i + 1) % log_interval:
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
nsamples = (i + 1) * batch_size
logging.info("[%d samples] validation: acc-top1=%f acc-top5=%f", nsamples, top1, top5)
logging.info("[final] validation: acc-top1=%f acc-top5=%f", top1, top5)
return top1
@tvm.testing.requires_gpu
def test_quantize_acc(cfg, rec_val):
qconfig = qtz.qconfig(
skip_conv_layers=[0],
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
global_scale=cfg.global_scale,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None,
)
batch_size = 1
model, params = get_model(cfg.model, batch_size, qconfig)
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)
acc = eval_acc(model, params, val_data, batch_fn)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
# TODO(for user): replace the line with the path to imagenet validation dataset
rec_val = "/scratch/tqchen/imagenet/val.rec"
results = []
configs = [
# TODO: need to fix accuracy and add AutoTVM log
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=4.0,
expected_acc=0.666,
),
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=4.0,
expected_acc=0.666,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet34_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.733,
),
Config(
"resnet50_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.747,
),
Config(
"resnet101_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.756,
),
]
for config in configs:
acc = test_quantize_acc(config, rec_val)
results.append((config, acc))
for res in results:
print(res)
| https://github.com/zk-ml/tachikoma |
tests/python/nightly/quantization/test_quantization_accuracy_for_vit.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import logging
import pytest
pytest.importorskip("onnx")
import onnx
import tvm
from tvm import relay
from tvm.relay import quantize as qtz
import tvm.testing
from test_quantization_accuracy import Config, get_val_data, eval_acc
logging.basicConfig(level=logging.INFO)
def calibrate_dataset(model_name, rec_val, batch_size, calibration_samples):
val_data, _ = get_val_data(model_name, rec_val=rec_val, batch_size=batch_size)
val_data.reset()
for i, batch in enumerate(val_data):
if i * batch_size >= calibration_samples:
break
data = batch.data[0].asnumpy()
yield {"data": data}
def download_file(url_base, file_name):
if not os.path.exists(file_name) or not os.path.isfile(file_name):
import urllib.request as urllib2
url = "{}/{}".format(url_base, file_name)
try:
print("download from {}".format(url))
if sys.version_info >= (3,):
urllib2.urlretrieve(url, file_name)
else:
f = urllib2.urlopen(url)
data = f.read()
with open(file_name, "wb") as code:
code.write(data)
except Exception as err:
if os.path.exists(file_name):
os.remove(file_name)
raise Exception("download {} failed due to {}!".format(file_name, repr(err)))
def get_onnx_model(model_name, batch_size, qconfig, original=False, dataset=None):
assert model_name == "vit32", "Only support vit32 model!"
base = "https://github.com/TheGreatCold/tvm-vit/raw/d2aa1e60eef42e2fdedbd1e13aa85ac5faf0a7fc"
logfile = "gtx1660_vit_B32_224.log"
onnx_path = "vit_B32_224.onnx"
download_file(base, logfile)
download_file(base, onnx_path)
onnx_graph = onnx.load(open(onnx_path, "rb"))
data_shape = (batch_size, 3, 224, 224)
mod, params = relay.frontend.from_onnx(onnx_graph, {"data": data_shape})
with tvm.transform.PassContext(opt_level=3):
qfunc = relay.quantize.prerequisite_optimize(mod, params=params)
logging.debug("original")
logging.debug(qfunc.astext(show_meta_data=False))
if original:
return qfunc, params, logfile
with qconfig:
logging.debug("current quantize config")
logging.debug(qtz.current_qconfig())
if dataset is not None:
with tvm.target.cuda():
with tvm.autotvm.apply_history_best(logfile):
qfunc = qtz.quantize(qfunc, params, dataset=dataset)
else:
qfunc = qtz.quantize(qfunc, params)
logging.debug("after quantize")
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc, params, logfile
@tvm.testing.requires_gpu
def test_onnx_quantize_acc(cfg, rec_val, batch_size=1, original=False):
qconfig = qtz.qconfig(
skip_conv_layers=[0],
skip_dense_layer=False,
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None,
calibrate_mode="percentile",
calibrate_chunk_by=8,
)
dataset = list(calibrate_dataset(cfg.model, rec_val, batch_size, 64))
model, params, logfile = get_onnx_model(
cfg.model, batch_size, qconfig, original=original, dataset=dataset
)
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)
with tvm.autotvm.apply_history_best(logfile):
acc = eval_acc(model, params, val_data, batch_fn, log_interval=1000)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
# TODO(for user): replace the line with the path to imagenet validation dataset
rec_val = "/scratch/tqchen/imagenet/val.rec"
configs = [
Config(
"vit32",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.727,
),
]
for config in configs:
# float32 model
acc = test_onnx_quantize_acc(config, rec_val, batch_size=1, original=True)
print("{}-float32: {}".format(config.model, acc))
# int8 model
acc = test_onnx_quantize_acc(config, rec_val, batch_size=1, original=False)
print("{}-int8: {}".format(config.model, acc))
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_aot_create_executor_metadata.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.runtime.ndarray import array
from tvm.relay.backend import Executor
from tvm.relay.backend.aot import CreateExecutorMetadata
from tvm.relay import TensorType
from tvm.tir.usmp.utils import PoolAllocation
from tvm.ir.memory_pools import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_executor_metadata(executor_metadata, expected_metadata):
assert list(executor_metadata.inputs) == expected_metadata["inputs"]
assert list(executor_metadata.input_tensor_types) == expected_metadata["input_tensor_types"]
assert list(executor_metadata.outputs) == expected_metadata["outputs"]
assert list(executor_metadata.output_tensor_types) == expected_metadata["output_tensor_types"]
assert list(executor_metadata.pools) == expected_metadata["pools"]
assert executor_metadata.devices == expected_metadata["devices"]
assert executor_metadata.executor == expected_metadata["executor"]
assert executor_metadata.mod_name == expected_metadata["mod_name"]
assert executor_metadata.interface_api == expected_metadata["interface_api"]
assert executor_metadata.unpacked_api == expected_metadata["unpacked_api"]
assert executor_metadata.workspace_alignment == expected_metadata["workspace_alignment"]
assert executor_metadata.constant_alignment == expected_metadata["constant_alignment"]
assert set(executor_metadata.pool_inputs.keys()) == set(expected_metadata["pool_inputs"].keys())
assert set(executor_metadata.io_pool_allocations.keys()) == set(
expected_metadata["io_pool_allocations"].keys()
)
def test_create_executor_metadata_single_func():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle, workspace: T.Ptr[T.uint8], constants: T.Ptr[T.uint8]
) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
target = Module["__tvm_main__"].attrs["target"]
executor = Executor("aot", {"interface-api": "c"})
workspace_pool_info = AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
3,
)
constant_pool_info = AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
2,
)
io_pool_allocations = {
"a": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
"output": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
}
mod = Module.with_attr("io_tensor_pool_allocations", io_pool_allocations)
mod["__tvm_main__"] = mod["__tvm_main__"].with_attr(
"pool_args",
[
constant_pool_info,
workspace_pool_info,
],
)
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
"pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {
f.params[2]: workspace_pool_info,
f.params[3]: constant_pool_info,
},
"io_pool_allocations": io_pool_allocations,
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
def test_create_executor_metadata_no_usmp():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle
) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
executor = Executor("aot", {"interface-api": "c"})
mod = Module
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
"pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {},
"io_pool_allocations": {},
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_aot_create_function_metadata.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.runtime.ndarray import array
from tvm.relay.backend.aot import CreateFunctionMetadata
from tvm.ir.memory_pools import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_function_metadata(function_metadata, expected_infos):
for symbol, expected_info in expected_infos.items():
func_info = function_metadata[symbol]
# Check workspace_sizes
key, value = func_info.workspace_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["workspace_sizes"]
# Check io_sizes
key, value = func_info.io_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["io_sizes"]
# Check constant_sizes
key, value = func_info.constant_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["constant_sizes"]
# Check tir_primfuncs
key, value = func_info.tir_primfuncs.items()[0]
assert str(key) == expected_info["target"]
tvm.ir.assert_structural_equal(value, expected_info["tir_primfuncs"])
def test_create_function_metadata_workspace_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 432,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 140,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 256,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 256,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_all_single_func():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 688,
"io_sizes": 280,
"constant_sizes": 652,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
),
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_multi_funcs():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
@T.prim_func
def test_fused_add(a: T.handle, b: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod_test_fused_add", "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_0 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("magic", a_buffer.data, b_buffer.data, sid_0, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
},
"test_fused_add": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 144,
"io_sizes": 420,
"constant_sizes": 140,
"tir_primfuncs": Module["test_fused_add"],
},
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_c_device_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C Device API Tests"""
import re
from collections import OrderedDict
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.ir.module import IRModule
from tvm.testing.aot import AOTTestModel, generate_ref_data, compile_models
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
@pytest.fixture(name="device_api_main_func")
def fixture_device_api_main_func():
"""Test function generator which generates C Device API calls"""
# Ideally we should have a sample Target registered here
# but we're going to re-use this for now
pytest.importorskip("ethosu.vela")
# pylint: disable=import-outside-toplevel
import tensorflow as tf
import tflite.Model
from tests.python.contrib.test_ethosu.infra import create_test_runner, generate_ref_data_tflite
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
# pylint: enable=import-outside-toplevel
tf.config.run_functions_eagerly(True)
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.nn.max_pool(x, [1, 2], [1, 2], "SAME")
def representative_dataset():
for _ in range(100):
data = np.random.rand(1, 3, 4, 3)
yield [data.astype(np.float32)]
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec([1, 3, 4, 3], dtype=tf.float32)
)
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": [1, 3, 4, 3]},
dtype_dict={"x": "int8"},
)
mod = partition_for_ethosu(relay_module, params)
# Generate reference data
input_data, output_data = generate_ref_data_tflite(tflite_graph)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = create_test_runner()
compiled_models = compile_models(
models=AOTTestModel(
module=mod,
inputs=input_data,
outputs=output_data,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = compiled_models[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
@pytest.fixture(name="non_device_api_main_func")
def fixture_non_device_api_main_func():
"""Test function generator which does not generate C Device API calls"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = AOT_DEFAULT_RUNNER
compiled_models = compile_models(
models=AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = list(compiled_models[0].executor_factory.lowered_ir_mods.values())[0]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
def test_device_api_hooks_unpacked_api(device_api_main_func):
"""Check for Device API hooks with unpacked internal calls"""
main_func = device_api_main_func(interface_api="c", use_unpacked_api=True)
# Activate Device
assert (
str(main_func.body[0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u))\n"
)
# Open Device
print("main func", repr(main_func.body))
assert (
str(main_func.body[1][0][0][0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u))\n"
)
# Device Call
# We dont need to check exact input and output var names in this test.
# Hence, using a regex to cover any legal I/O name.
regex = re.compile(
r"tir\.tvm_check_return\("
r"0, -1, "
r'tir\.call_extern\("tvmgen_default_ethos_u_main_0", '
r"\w+, \w+, device_context_ethos_u\)\)"
)
assert regex.match(str(main_func.body[1][0][0][1]))
# Close Device
assert (
str(main_func.body[1][0][0][2])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u))\n"
)
# Deactivate Device
assert (
str(str(main_func.body[2]))
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u))\n"
)
@pytest.mark.skip(
"Skipping this test as this is incorrectly using Arm(R) Ethos(TM)-U NPU "
"with packed calling convention which is not supported by the NPU codegen's "
"TIR to Runtime Hook. We need to use a different target to test this feature"
)
def test_device_api_hooks_packed_api(device_api_main_func):
"""Check for Device API hooks with packed internal calls"""
main_func = device_api_main_func(interface_api="packed", use_unpacked_api=False)
# Activate Device
assert (
str(main_func.body[0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Open Device
assert (
str(main_func.body[1].body.body[0][0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Device Call
assert (
str(main_func.body[1].body.body[0][0][1][0].value)
== "@tir.tvm_call_cpacked("
+ '"tvmgen_default_ethos_u_main_0",'
+ " input: handle, output: handle,"
+ " device_context_ethos_u: handle,"
+ " dtype=int32)"
)
# Close Device
assert (
str(main_func.body[1].body.body[0][0][2].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Deactivate Device
assert (
str(main_func.body[2][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
def test_without_device_api_unpacked_api(non_device_api_main_func):
"""Test a graph without the Device API with the unpacked internal calls"""
main_func = non_device_api_main_func(interface_api="c", use_unpacked_api=True)
assert (
str(main_func.body)
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"tvmgen_default_fused_multiply",'
+ " x_buffer_var, y_buffer_var, output_buffer_var))\n"
)
def test_without_device_api_packed_api(non_device_api_main_func):
"""Test a graph without the Device API with the packed internal calls"""
main_func = non_device_api_main_func(interface_api="packed", use_unpacked_api=False)
assert str(main_func.body) == (
'tir.tvm_call_cpacked("tvmgen_default_fused_multiply", '
"tir.tvm_stack_make_array(x_buffer_var, tir.tvm_stack_make_shape(10, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), " # pylint: disable=line-too-long
"tir.tvm_stack_make_array(y_buffer_var, tir.tvm_stack_make_shape(1, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), " # pylint: disable=line-too-long
"tir.tvm_stack_make_array(output_buffer_var, tir.tvm_stack_make_shape(10, 10), tir.reinterpret((uint64)0), (uint32)2, float32(0), 0), " # pylint: disable=line-too-long
"tir.reinterpret((uint64)0))\n"
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_cpp_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C++ Runtime Tests"""
import re
import textwrap
import numpy as np
import pytest
import tvm
from tvm import IRModule
from tvm import relay
from tvm.relay import backend, testing
from tvm.testing.aot import generate_ref_data
def test_error_c_interface():
"""Checks that an error occurs when using the packed API in combination with C interface"""
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Need unpacked-api == false (got: 0) and interface-api == "packed" (got: c) when '
"targeting c++ runtime"
),
):
tvm.relay.build(
IRModule.from_expr(func),
target="llvm",
executor=backend.Executor("aot", {"interface-api": "c"}),
)
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_conv2d(enable_usmp, target_kind):
"""Tests compilation of convolutions"""
relay_model = textwrap.dedent(
"""\
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
ir_mod = tvm.parser.fromtext(relay_model)
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.usmp.enable": enable_usmp,
},
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed", "unpacked-api": False}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
runner.run()
assert (runner.get_output(0).numpy() == list(ref_outputs.values())[0]).all()
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_mobilenet(enable_usmp: bool, target_kind: str):
"""Full network test with Mobilenet"""
ir_mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in ir_mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": enable_usmp}
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
runner.run()
assert (runner.get_output(0).asnumpy() == list(ref_outputs.values())[0]).all()
def test_module_list():
"""Checks the correct list of module names is generated"""
input_x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(input_x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([input_x], expr)),
target="c",
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
mod_name="unusual_module_name_fred",
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11"])
loaded_mod = tvm.runtime.load_module(test_so_path)
list_module_names = loaded_mod.get_function("list_module_names")
names_expected = ["unusual_module_name_fred"]
assert list(sorted(names_expected)) == list(sorted(list_module_names()))
def test_create_executor():
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
actual = relay.create_executor(
"aot", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr)), target="c"
).evaluate()(np.array([2], dtype="float32"))
np.isfinite(np.array([3], dtype="float32"))
np.testing.assert_allclose(actual.numpy(), np.array([3], dtype="float32"))
def test_pass_wrong_device_arg():
"""Ensure an error is generated if the incorrect number of devices are passed"""
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([x], expr)),
target="c",
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
with pytest.raises(tvm.TVMError) as error:
tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0), tvm.cpu(0)))
assert (
"Check failed: devices_.size() == 1 (2 vs. 1) : Expect exactly 1 device passed."
in str(error.exception)
)
# TODO write asserts for # and type of device.
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
@pytest.mark.parametrize("input_name", ["input:0", "input@0", "input_0"])
def test_aot_input_name_with_special_character(target_kind: str, input_name: str):
"""Test name transforms in AOT for input names with special characters."""
dtype = "float32"
input_1 = relay.var(input_name, shape=(10, 5), dtype=dtype)
weight = relay.var("weight", shape=(1, 5), dtype=dtype)
output = relay.add(input_1, weight)
func = relay.Function([input_1, weight], output)
input_data = np.random.rand(10, 5).astype(dtype)
weight_data = np.random.rand(1, 5).astype(dtype)
expected_output = input_data + weight_data
params = {"weight": weight_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(func),
target=target_kind,
params=params,
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
# test both original name and transformed name
for name in ["input_0", input_name]:
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
inputs = {name: input_data}
runner.set_input(**inputs)
input_ind = runner.get_input_index(name)
assert (runner.get_input(input_ind).asnumpy() == input_data).all()
runner.run()
assert (runner.get_output(0).asnumpy() == expected_output).all()
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_crt_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C Runtime Tests"""
from collections import OrderedDict
import re
import os
import tarfile
import pathlib
import numpy as np
import pytest
import tvm
from tvm import relay, TVMError
from tvm.contrib import utils
from tvm.ir.module import IRModule
from tvm.relay import testing, transform
from tvm.relay.testing import byoc
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.backend import Executor, Runtime
from tvm.micro import model_library_format as mlf
from tvm.micro import export_model_library_format
from tvm.ir.instrument import pass_instrument
from tvm.testing.aot import (
AOTTestModel,
generate_ref_data,
compile_and_run,
compile_models,
create_relay_module_and_inputs_from_tflite_file,
)
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER, parametrize_aot_options
from tvm.micro.testing.utils import get_conv2d_relay_module
def test_error_c_interface_with_packed_api():
"""Checks that an error occurs when using the packed API in combination with C interface"""
interface_api = "c"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Either need interface_api == "packed" (got: c) or '
"unpacked-api == true (got: 0) when targeting "
"c runtime"
),
):
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs={}, outputs=generate_ref_data(func, {})
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_conv_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of convolution with parameters"""
mod = get_conv2d_relay_module()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of add with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
input_z = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], input_z)
input_x_data = np.ones((1, 10)).astype("float32")
input_y_data = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": input_x_data}
inputs = {"y": input_y_data}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
def test_packed_global_variables():
"""Check packed global variables in codegen output."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
interface_api = "packed"
use_unpacked_api = False
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=1)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compiled_models_list = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=8,
enable_op_fusion=True,
pass_config=AOT_DEFAULT_RUNNER.pass_config,
use_runtime_executor=True,
target=tvm.target.Target("c"),
)
compiled_model = compiled_models_list[0]
tmp_path = utils.tempdir()
base_path = tmp_path.temp_dir
model = compiled_model.model
tar_file = os.path.join(base_path, f"{model.name}.tar")
export_model_library_format(compiled_model.executor_factory, tar_file)
t = tarfile.open(tar_file)
t.extractall(base_path)
file_list = []
for path in (pathlib.Path(base_path) / "codegen" / "host" / "src").iterdir():
if path.is_file():
file_list.append(path)
assert len(file_list) > 0
for path in file_list:
with open(path, "r") as lib_f:
lib1 = lib_f.readlines()
tvmgen_names = []
tvmgen_funcs = []
for line in lib1:
for item in line.split(" "):
# Find all names starting with tvmgen_default
if item.startswith("tvmgen_default"):
# Collect any name starting with tvmgen_default
tvmgen_names.append(item)
# Collect all functions starting with tvmgen_default
tvmgen_funcs += re.findall(r"(?<=).*(?=\()", item)
# Check if any function name has a packed variable name in all
# items that start with tvmgen_default
for func in tvmgen_funcs:
assert f"{func}_packed" not in tvmgen_names
@parametrize_aot_options
def test_concatenate(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of concatenate"""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=dtype)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
concat_inputs = relay.concatenate((input_x, input_y), axis=1)
func_output = relay.add(input_z, concat_inputs)
# Check result.
func = relay.Function([input_x, input_y, input_z], func_output)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = OrderedDict([("x", x_data), ("y", y_data), ("z", t_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_nested_tuples(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of functions with nested tuple outputs"""
input_x = relay.var("x", shape=(10,))
output_1 = input_x + relay.const(1.0)
output_2 = output_1 + relay.const(1.0)
output_3 = output_2 + relay.const(1.0)
output_4 = output_3 + relay.const(1.0)
full_output = relay.Tuple(
[output_1, relay.Tuple([relay.Tuple([output_2, output_3]), output_4])]
)
func = relay.Function([input_x], full_output)
x_data = np.random.uniform(size=(10,)).astype(np.float32)
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_getitem(interface_api, use_unpacked_api, test_runner):
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_id(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs = {"x": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(ident), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_const(interface_api, use_unpacked_api, test_runner):
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiply(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of multiply"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_subtract(interface_api, use_unpacked_api, test_runner):
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
inputs = {"i": i_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_output(interface_api, use_unpacked_api, test_runner):
"""Tests getting items from tuples"""
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
func = relay.Function([x], out)
x_data = np.random.rand(6, 9).astype("float32")
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
["debug_calculated_workspaces", "workspace_byte_alignment"], [(True, 1), (True, 16), (False, 1)]
)
def test_mobilenet(debug_calculated_workspaces, workspace_byte_alignment):
"""Full network test with Mobilenet"""
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
# TODO(@Mousius) - Enable memory planning to take into account debug information
debugging_memory_overhead = 1024 * 1024
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(
module=mod,
inputs=inputs,
outputs=output_list,
params=params,
extra_memory_in_bytes=debugging_memory_overhead,
),
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=workspace_byte_alignment,
debug_calculated_workspaces=debug_calculated_workspaces,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
# z0 = x + w0
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
# z1 = z0 + w1
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
# z2 = z0 + z1
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm_multiple_subgraphs(merge_compiler_regions):
"""This is a test case to check BYOC capabilities of AOT with multiple sub graphs"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
input_w2 = relay.var("w2", shape=(10, 10))
input_w3 = relay.var("w3", shape=(10, 10))
input_w4 = relay.var("w4", shape=(10, 10))
input_w5 = relay.var("w5", shape=(10, 10))
input_w6 = relay.var("w6", shape=(10, 10))
input_w7 = relay.var("w7", shape=(10, 10))
# C compiler
ccompiler_add_1 = relay.add(input_x, input_w0)
ccompiler_sub_1 = relay.subtract(ccompiler_add_1, input_w1)
ccompiler_mul_1 = relay.multiply(ccompiler_sub_1, input_w2)
ccompiler_add_2 = relay.add(input_x, input_w3)
ccompiler_sub_2 = relay.subtract(ccompiler_add_2, input_w4)
ccompiler_mul_2 = relay.multiply(ccompiler_sub_2, input_w5)
# Other parts on TVM
tvm_add = relay.add(input_x, input_w6)
tvm_sub = relay.subtract(tvm_add, input_w7)
concat_outputs = relay.concatenate((ccompiler_mul_1, ccompiler_mul_2, tvm_sub), axis=0)
relay_func = relay.Function(
[input_x, input_w0, input_w1, input_w2, input_w3, input_w4, input_w5, input_w6, input_w7],
concat_outputs,
)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(relay_func)
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
output_list = generate_ref_data(mod, map_inputs)
input_list = [map_inputs["x"]]
input_list.extend([map_inputs["w{}".format(i)] for i in range(8)])
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_name_mangling_with_params(interface_api, use_unpacked_api, test_runner):
"""Checks name mangling works with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
relay_func = relay.Function([input_x, input_y], func_add)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(relay_func, inputs, params)
compile_and_run(
AOTTestModel(
name="my_mod",
module=relay_func,
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiple_models(interface_api, use_unpacked_api, test_runner):
"""Compiles multiple models to ensure both can be compiled into one output"""
# Identity model without params
x = relay.var("x", "float32")
mod1 = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs1 = {"x": one}
output_list1 = generate_ref_data(mod1, inputs1)
params1 = None
# Convolution model
mod2 = get_conv2d_relay_module()
main_func = mod2["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params2 = {"weight": weight_data}
inputs2 = {"data": input_data}
output_list2 = generate_ref_data(mod2, inputs2, params2)
compile_and_run(
[
AOTTestModel(
name="mod1",
module=mod1,
inputs=inputs1,
outputs=output_list1,
params=params1,
),
AOTTestModel(
name="mod2",
module=mod2,
inputs=inputs2,
outputs=output_list2,
params=params2,
),
],
test_runner,
interface_api,
use_unpacked_api,
)
def test_quant_mobilenet_tfl():
"""Since in AOT we pass directly the output buffer from the user,
in quantized networks sharing the output buffers is not possible.
This is because the output data type is int8 and the intermediate
buffer are int32 or int16. We use mobilenet quantized to stress this
situation and verify that the output buffer sharing is disabled in AOT."""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_transpose(interface_api, use_unpacked_api, test_runner):
"""Test that non-inpleaceable operations (e.g., transpose) do not happen in-place."""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=dtype)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
first_add = relay.add(input_x, input_y)
transpose_add = relay.transpose(first_add)
final_add = relay.add(transpose_add, input_z)
# Check result.
relay_func = relay.Function([input_x, input_y, input_z], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"x": x_data, "y": y_data, "z": t_data}
output_list = generate_ref_data(relay_func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(relay_func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser():
"""Test that input tensors with special characters in the name don't break compilation"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
func = relay.var("input-x::2", "float32")
ident = relay.Function([func], func)
one = np.array(1.0, "float32")
inputs = {"input-x::2": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser_name_clash():
"""Test that 2 input tensors with names that clash once sanitized, generates an error"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
dtype = "float32"
input_non_clashing = relay.var("input::-1", shape=(10, 5), dtype=dtype)
# Next 2 input tensor names will clash once sanitized.
input_clashing_1 = relay.var("input::-2", shape=(10, 5), dtype=dtype)
input_clashing_2 = relay.var("input:--2", shape=(), dtype=dtype)
inner_add = relay.add(input_non_clashing, input_clashing_1)
transpose_add = relay.transpose(inner_add)
final_add = relay.add(transpose_add, input_clashing_2)
# Check result.
func = relay.Function([input_non_clashing, input_clashing_1, input_clashing_2], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"input::-1": x_data, "input::-2": y_data, "input:--2": t_data}
output_list = generate_ref_data(func, inputs)
with pytest.raises(TVMError, match="Sanitized input tensor name clash"):
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_aot_codegen_backend_alloc_workspace_calls():
"""This test checks whether AoT lowering creates TVMBackendAllocWorkspace calls"""
# The %data and %weight shapes in the following primitive Relay should create
# small tensors that would get lowered to stack allocations in the CPU PrimFuncs.
# However, the AoT executor codegen should retain them as TVMBAW calls
# pylint: disable=line-too-long
relay_mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 4, 4, 4), float32] {
%0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {
layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */
};
%2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;
%4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {
layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */
};
%6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */
}
"""
)
# pylint: enable=line-too-long
compiled_test_mods = compile_models(
models=AOTTestModel(module=relay_mod, inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
# There should be three allocates created for three primitive relay function
# calls in the main for the above relay snippet.
assert source.count("TVMBackendAllocWorkspace") == 3
@pytest.mark.parametrize("constants_byte_alignment", [8, 16, 32])
def test_constants_alignment(constants_byte_alignment):
"""Test that constants_byte_alignment correctly sets constants byte alignment"""
use_unpacked_api = True
interface_api = "c"
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
target = f"c -constants-byte-alignment={constants_byte_alignment}"
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
target=tvm.target.Target(target, host=target),
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
assert f'__attribute__((section(".rodata.tvm"), aligned({constants_byte_alignment})))' in source
def test_output_tensor_names():
"""Test that the output names generated match those in the model"""
pytest.importorskip("tflite")
# pylint: disable=import-outside-toplevel
import tensorflow as tf
import tflite.Model
# pylint: enable=import-outside-toplevel
ifm_shape = (1, 299, 299, 3)
padding = "VALID"
strides = (1, 1)
dilation = (1, 1)
kernel_shape = (3, 2)
def create_tflite_graph_two_outs():
"""Create a model with 2 output tensors"""
class Model(tf.Module):
"""Simple TFLite test model"""
@tf.function
def tf_function(self, tf_input_x):
"""Single TFLite function with two convolutions"""
tf_strides = [1, strides[0], strides[1], 1]
filter_shape = [kernel_shape[0], kernel_shape[1], 3, 3]
filter1 = tf.constant(
np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
first_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter1,
strides=tf_strides,
padding=padding,
dilations=dilation,
)
first_conv2d = tf.nn.relu(first_conv2d)
filter2 = tf.constant(
1000 + np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
second_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter2,
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilation,
)
second_conv2d = tf.nn.relu(second_conv2d)
return first_conv2d, second_conv2d
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph_two_outs()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": "int8"},
)
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
in_min, in_max = (-128, 127)
data = np.random.randint(in_min, high=in_max, size=ifm_shape, dtype="int8")
input_name = mod["main"].params[0].name_hint
inputs = {input_name: data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
)
# Check that the names of the output tensors occur in the source code
source = compiled_test_mods[0].executor_factory.lib.get_source()
for output_name in output_list.keys():
assert output_name in source
@pytest.mark.parametrize(
"workspace_byte_alignment,main_workspace_size",
[
(8, 14880),
(16, 14880),
(256, 15616),
],
)
def test_workspace_calculation(workspace_byte_alignment, main_workspace_size):
"""Checks calculated workspace against known values"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == main_workspace_size
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
def test_workspace_calculation_cmsis_nn():
"""This tests cmsis_nn codegen for workspace calculation.
This is tested specially because cmsis-nn codegen creates
multiple PrimFuncs per offloaded relay function in a non
-hierarchical manner."""
pytest.importorskip("tflite")
# pylint: disable=import-outside-toplevel
from tvm.relay.op.contrib import cmsisnn
from tvm.contrib.download import download_testdata
# pylint: enable=import-outside-toplevel
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 16,
"interface-api": "c",
"unpacked-api": True,
},
)
base_url = (
"https://github.com/ARM-software/ML-zoo/raw/"
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
mod, _, params = create_relay_module_and_inputs_from_tflite_file(model_file)
mod = cmsisnn.partition_for_cmsisnn(mod, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == 14256
def test_aot_codegen_checks_returns():
"""This test checks whether AoT lowering creates calls that check the return value correctly"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
compiled_test_mods = compile_models(
models=AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
main_ir_module = compiled_test_mods[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
# Check operator call is wrapped properly
assert (
str(main_func.body[1])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"tvmgen_default_fused_add",'
+ " x_buffer_var, y_buffer_var, output_buffer_var))\n"
)
# TODO(Mousius) - Create a better place for C codegen tests
assert (
"if (tvmgen_default_fused_add(x_buffer_var, y_buffer_var, output_buffer_var) != 0 ) return -1;" # pylint: disable=line-too-long
in source
)
def test_aot_uses_anf():
"""Checks that A-Normal Form is being used in the AOT lowering pipeline."""
input_x = relay.var("x", shape=(1, 10, 10, 10))
input_y = relay.var("y", shape=(1, 10, 10, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
@pass_instrument
class CheckANFRuns:
def __init__(self):
self.did_run_anf = False
def run_before_pass(self, _, info):
if info.name == "ToANormalForm":
self.did_run_anf = True
if info.name == "LowerTE":
assert self.did_run_anf, "ToANormalForm pass should run before LowerTE."
check_run_anf = CheckANFRuns()
model = AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None)
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 8,
"interface-api": "c",
"unpacked-api": True,
},
)
config = {"tir.disable_vectorize": True}
with tvm.transform.PassContext(opt_level=3, config=config, instruments=[check_run_anf]):
tvm.relay.build(
model.module,
tvm.target.Target("c"),
executor=executor,
runtime=runtime,
workspace_memory_pools=None,
params=model.params,
mod_name=model.name,
)
assert check_run_anf.did_run_anf, "Expected ToANormalForm pass to have run."
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_crt_aot_usmp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" This file contains test that use USMP + AoT using C runtime APIs"""
from collections import OrderedDict
import re
import random
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay import testing # pylint: disable=W0611
from tvm.relay import transform
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.backend import Executor, Runtime
from tvm import (
WorkspaceMemoryPools,
ConstantMemoryPools,
WorkspacePoolInfo,
ConstantPoolInfo,
PoolInfoProperties,
)
from tvm.micro import model_library_format as mlf
from tvm.micro.testing.aot_test_utils import parametrize_aot_options
from tvm.testing.aot import (
AOTTestModel,
AOTTestRunner,
generate_ref_data,
compile_and_run,
compile_models,
run_and_check,
create_relay_module_and_inputs_from_tflite_file,
)
from tvm.testing.usmp import is_tvm_backendallocworkspace_calls
def _check_for_no_tvm_backendallocworkspace_calls(mod: tvm.runtime.module):
assert (
is_tvm_backendallocworkspace_calls(mod) is False
), "This is failing because USMP was unable to plan for every tir.allocate node."
# U1 test case
@parametrize_aot_options
def test_synthetic(interface_api, use_unpacked_api, test_runner):
"""
Simple U1 usecase test
"""
mod, params = tvm.relay.testing.synthetic.get_workload()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {}
for name, _ in shape_dict.items():
if name != "data":
params[name] = np.ones(shape_dict[name]).astype(type_dict[name])
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
config = (
{
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": "greedy_by_conflicts",
},
)
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config={**test_runner.pass_config},
)
test_runner.pass_config.update(*config)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
"workspace_byte_alignment,constant_byte_alignment,"
"main_workspace_size,main_constant_size,usmp_algo",
[
(8, 8, 14208, 948, "greedy_by_conflicts"),
(16, 8, 14208, 948, "greedy_by_conflicts"),
(256, 8, 14720, 948, "greedy_by_conflicts"),
(8, 16, 14208, 956, "greedy_by_conflicts"),
(16, 16, 14208, 956, "greedy_by_conflicts"),
(256, 16, 14720, 956, "greedy_by_conflicts"),
(8, 256, 14208, 1804, "greedy_by_conflicts"),
(16, 256, 14208, 1804, "greedy_by_conflicts"),
(256, 256, 14720, 1804, "greedy_by_conflicts"),
(8, 8, 18576, 948, "greedy_by_size"),
(16, 8, 18576, 948, "greedy_by_size"),
(256, 8, 19392, 948, "greedy_by_size"),
(8, 16, 18576, 956, "greedy_by_size"),
(16, 16, 18576, 956, "greedy_by_size"),
(256, 16, 19392, 956, "greedy_by_size"),
(8, 256, 18576, 1804, "greedy_by_size"),
(16, 256, 18576, 1804, "greedy_by_size"),
(256, 256, 19392, 1804, "greedy_by_size"),
(8, 8, 11424, 948, "hill_climb"),
(16, 8, 11424, 948, "hill_climb"),
(256, 8, 11920, 948, "hill_climb"),
(8, 16, 11424, 956, "hill_climb"),
(16, 16, 11424, 956, "hill_climb"),
(256, 16, 11920, 956, "hill_climb"),
(8, 256, 11424, 1804, "hill_climb"),
(16, 256, 11424, 1804, "hill_climb"),
(256, 256, 11920, 1804, "hill_climb"),
],
)
def test_memory_planning(
workspace_byte_alignment,
constant_byte_alignment,
main_workspace_size,
main_constant_size,
usmp_algo,
):
"""Checks calculated workspace against known values"""
random.seed(0)
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
"constant-byte-alignment": constant_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
# The workspace_size dictionary will have an entry for both the 'primitive' and 'host'
# targets, though both are identical.
assert (
sum(lib.function_metadata["__tvm_main__"].workspace_sizes.values()) == main_workspace_size
)
assert sum(lib.function_metadata["__tvm_main__"].constant_sizes.values()) == main_constant_size
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOTTestRunner(pass_config={"tir.usmp.enable": True})
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
# z0 = x + w0
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
# z1 = z0 + w1
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
# z2 = z0 + z1
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compiled_test_mods = compile_models(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
MOBILENET_V1_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
+ "mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
MOBILENET_V2_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
+ "tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
@pytest.mark.parametrize(
"model_url, usmp_algo, workspace_size, constant_size",
[
(MOBILENET_V1_URL, "greedy_by_size", 4845696, 8468008),
(MOBILENET_V1_URL, "greedy_by_conflicts", 4845696, 8468008),
(MOBILENET_V1_URL, "hill_climb", 3240064, 8468008),
],
)
def test_tflite_model_u1_usecase(model_url, usmp_algo, workspace_size, constant_size):
"""
This checks for ML models and the memory used by them
when using USMP with different algorithms
"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo}
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
# Checking the workspace size reported in model library format
mlf_memory_map = mlf._build_function_memory_map(
compiled_test_mods[0].executor_factory.function_metadata
)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == workspace_size
assert mlf_memory_map["main"][0]["constants_size_bytes"] == constant_size
# That should match to workspace size that will be codegen'd to the entry point.
allocated_pool_info_size = sum(
[
_.allocated_size
for _ in list(
dict(
compiled_test_mods[0].executor_factory.executor_codegen_metadata.pool_inputs
).values()
)
]
)
assert allocated_pool_info_size == workspace_size + constant_size
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
def _get_workspace_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_WORKSPACE_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_data_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool data macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_DATA"
return prefix + pool_name.upper() + postfix
def _add_module_prefix(suffix: str, model_name="default") -> str:
"""A helper function create struct types"""
return "tvmgen_" + model_name + "_" + suffix
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"usmp_algo",
[("greedy_by_size"), ("hill_climb")],
)
def test_tflite_model_u3_usecase_conv2d_var_cons(usmp_algo):
"""This checks for inference using workspace and constant pools placed in the application"""
mod = tvm.parser.fromtext(
"""\
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_var_cons_ext_pools(model_url, usmp_algo):
"""This checks for inference using one external workspace and one external constant
pools placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference using two external pools placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_urls, usmp_algo",
[
((MOBILENET_V1_URL, MOBILENET_V2_URL), "greedy_by_size"),
],
)
def test_two_models_with_a_single_external_pool(model_urls, usmp_algo):
"""This checks for inference using a single large enough common pool"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo("my_memory_pool", [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
#define MAX(A, B) ((A > B) ? A : B)
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool[MAX({_get_workspace_size_define_macro("my_memory_pool", "mod1")},{_get_workspace_size_define_macro("my_memory_pool", "mod2")})];
""",
)
tflite_model_file1 = tf_testing.get_workload_official(
model_urls[0][0],
model_urls[0][1],
)
mod1, inputs1, params1 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file1)
output_list1 = generate_ref_data(mod1, inputs1, params1)
tflite_model_file2 = tf_testing.get_workload_official(
model_urls[1][0],
model_urls[1][1],
)
mod2, inputs2, params2 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file2)
output_list2 = generate_ref_data(mod2, inputs2, params2)
compiled_test_mods = compile_models(
[
AOTTestModel(
name="mod1", module=mod1, inputs=inputs1, outputs=output_list1, params=params1
),
AOTTestModel(
name="mod2", module=mod2, inputs=inputs2, outputs=output_list2, params=params2
),
],
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
#include <string.h>
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.{pool_name} = {pool_name}
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
#include <string.h>
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.my_memory_pool_1 = my_memory_pool_1,
.my_memory_pool_2 = my_memory_pool_2,
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
def test_incompatible_interface_api_errors():
"""Ensures an error is thrown if not using the C interface API"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"interface-api": "packed",
},
)
with pytest.raises(
tvm.TVMError,
match=re.escape(
"tir.usmp.use_workspace_io option is only compatible with interface_api c.\n"
"Please use interface_api c to be able to enable tir.usmp.use_workspace_io"
),
):
with tvm.transform.PassContext(
opt_level=3,
config={"tir.usmp.enable": True, "tir.usmp.use_workspace_io": True},
):
tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/aot/test_pass_aot_lower_main.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.ir import assert_structural_equal
from tvm.relay.backend.aot import AOTLowerMain, CallType
from tvm.script import tir as T
def _make_const(dtype, shape):
return tvm.relay.const(np.zeros(shape).astype(dtype))
def _make_consts(dtype, shapes):
return [_make_const(dtype, shape) for shape in shapes]
def _plan_devices(mod):
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
return mod, config
def _assert_lowered_main(mod, main_func, call_type, print_script=False):
mod, config = _plan_devices(mod)
mod = AOTLowerMain("test_mod", config, call_type)(mod)
if print_script:
print(mod["__tvm_main__"].script())
assert_structural_equal(mod["__tvm_main__"], main_func)
def test_single_call_cpacked():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_single_call_packed():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.tvm_call_packed("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.Packed)
def test_single_call_unpacked():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.Unpacked)
def test_constant():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a, meta[relay.Constant][0]) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
init_meta_table={"relay.Constant": _make_consts("float32", [(5, 7)])},
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "global_symbol": "test_mod___tvm_main__", "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
# TODO(@mbaret) There seems to be a TVMScript round-trip bug causing this to fail
@pytest.mark.xfail()
def test_copy_to_output():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%a
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
tmp_read = T.buffer_var("uint8", "")
# buffer definition
tmp_read_1 = T.buffer_decl([T.uint64(140)], dtype="uint8", data=tmp_read)
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
tmp_write: T.Ptr[T.uint8] = output_buffer.data
tmp_write_1 = T.buffer_decl([T.uint64(140)], dtype="uint8", data=tmp_write)
for i in T.serial(140):
tmp_write_1[i] = T.let(tmp_read, a_buffer.data, tmp_read_1[i])
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_two_calls():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
%1 = call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */;
%2 = (%1,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %2) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_output():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { (%x, %x) }
def @main(%a: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output0: T.handle, output1: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output0, output1], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output0_buffer = T.match_buffer(output0, [5, 7], dtype="float32", align=16)
output1_buffer = T.match_buffer(output1, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output0_buffer.data, output1_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_intermediate():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) { (%x, %x) }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add_0, %0);
%2 = (%1.0, %1.1);
call_lowered(@test_fused_add_1, %2)
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_multi_input():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a, %b) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, b: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a, b], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, b_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v1 = call_lowered(@test_fused_add, %0);
%v1
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding_branch():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v0 = call_lowered(@test_fused_add_0, %0);
%1 = (%v0,);
let %v1 = call_lowered(@test_fused_add_0, %1);
%2 = (%v1,);
let %v2 = call_lowered(@test_fused_add_0, %2);
%3 = (%v1, %v2);
let %v3 = call_lowered(@test_fused_add_1, %3);
%v3
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_device_hooks():
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add, %0);
%2 = (%1,);
call_lowered(@test_fused_add, %2)
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle, device_context_example_target_hook: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["example_target_hook"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookActivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
with T.allocate([140], "int8", "global.workspace") as sid_2:
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookDeactivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
# fmt: on
device_contexts = {}
for gv in mod.get_global_vars():
device_contexts[gv] = "example_target_hook"
mod = mod.with_attr("device_contexts", device_contexts)
_assert_lowered_main(mod, func, CallType.CPacked)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/backend/test_pass_lower_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Exercises the LowerTE pass.
import tvm
import tvm.testing
import logging
logging.basicConfig()
logger = logging.getLogger("test_pass_lower_te")
logger.setLevel(logging.INFO)
# Since the TE compiler needs a good refactor it has not been exposed as a 'standard' pass
# in relay.transform. For testing grab it directly.
LowerTE = tvm._ffi.get_global_func("relay.tec.LowerTE")
def transform(mod):
logger.info("Starting module:\n%s", mod)
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
mod = LowerTE("test", config)(mod)
mod = tvm.relay.transform.InferType()(mod)
logger.info("After LowerTE:\n%s", mod)
return mod
# All attempts to use structural equalty tests against an expected IRModule parsed from
# Relay text were thwarted by the difficulty of setting up the expected call_lower attributes
# with the right GlobalVar instances. So the following assert structural correctness the hard way.
def test_lower_primitive():
input_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a);
# call_lowered(@test_fused_add, %0, metadata={relay_attrs={Primitive=1},all_prim_fn_vars=[@test_fused_add]})
# }
# def @test_fused_add = <lowered PrimFunc>
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_fused_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_fn_vars"][0].name_hint == "test_fused_add"
test_fused_add = actual_mod["test_fused_add"]
assert isinstance(test_fused_add, tvm.tir.PrimFunc)
def test_lower_compiler():
@tvm._ffi.register_func("relay.ext.test_pass_lower_te")
def relay_ext_test_pass_lower_te(func):
return None
input_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1, Compiler="test_pass_lower_te", global_symbol="test_add") -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a : Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a)
# call_lowered(@test_add , %0, metadata={relay_attrs={Primitive=1, Compiler="test_pass_lower_te", global_symbol="test_add"}}, all_prim_fn_vars=[]})
# }
# def @test_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
# add(%x, %y)
# }
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert call.attrs.metadata["relay_attrs"].Compiler == "test_pass_lower_te"
assert call.attrs.metadata["relay_attrs"].global_symbol == "test_add"
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["test_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern():
input_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
@my_add(%a, %a)
}
def @my_add(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a);
# call_lowered(@my_add, %0, metadata={relay_attrs={Extern=1}}, all_prim_fn_vars=[]})
# }
# def @my_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
# add(%x, %y)
# }
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Extern == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["my_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern_with_dynamic_shape():
input_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(?, ?), float32] {
@my_dyn(%a, %a)
}
def @my_dyn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(?, ?), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(?, ?), float32] {
# %0 = (%a, %a);
# call_lowered(@my_dyn, %0, metadata={prim_shape_fn_var='test_shape_func_add', relay_attrs={Extern=1}, prim_shape_fn_states=[2, 2], prim_shape_fn_num_inputs=2, all_prim_shape_fn_vars=['shape_func_add'], prim_shape_fn_num_outputs=1, all_prim_fn_vars=[]})
# }
# def @my_dyn(%x: Tensor[(5, 7), float32] , %y: Tensor[(5, 7), float32] , Extern=1) -> Tensor[(?, ?), float32] {
# add(%x, %y)
# }
# def @test_shape_func_add = <shape PrimFunc>
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_dyn"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["prim_shape_fn_var"].name_hint == "test_shape_func_add"
assert call.attrs.metadata["relay_attrs"].Extern == 1
assert len(call.attrs.metadata["prim_shape_fn_states"]) == 2
assert call.attrs.metadata["prim_shape_fn_states"][0] == 2
assert call.attrs.metadata["prim_shape_fn_states"][1] == 2
assert call.attrs.metadata["prim_shape_fn_num_inputs"] == 2
assert len(call.attrs.metadata["all_prim_shape_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_shape_fn_vars"][0].name_hint == "test_shape_func_add"
assert call.attrs.metadata["prim_shape_fn_num_outputs"] == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
my_dyn = actual_mod["my_dyn"]
assert isinstance(my_dyn, tvm.relay.Function)
assert my_dyn.attrs["Extern"] == 1
shape_func_add = actual_mod["test_shape_func_add"]
assert isinstance(shape_func_add, tvm.tir.PrimFunc)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/backend/test_pass_remove_standalone_reshapes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Exercises the RemoveStandaloneReshapes pass.
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator
import tvm.testing
from tvm.script import tir as T
HOST_DEVICE = tvm.device("cpu")
HOST_TARGET = tvm.target.Target("llvm")
CPU_DEVICE = tvm.device("cpu")
CPU_TARGET = tvm.target.Target("llvm").with_host(HOST_TARGET)
CPU = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET) # device_type=1
RemoveStandaloneReshapes = tvm._ffi.get_global_func("relay._transform.RemoveStandaloneReshapes")
class MarkReshapeOnlyMutator(ExprMutator):
"""A pass for marking call_lowered as ReshapeOnly where reshapes exist unfused"""
def __init__(self):
ExprMutator.__init__(self)
def visit_call(self, call):
if isinstance(call.args[0], tvm.ir.GlobalVar) and "reshape" in call.args[0].name_hint:
# attrs = {"relay_attrs" : {"relay.reshape_only" : 1}}
dict_attrs = tvm.ir.make_node("DictAttrs", **{"relay.reshape_only": 1})
attrs = tvm.ir.make_node(
"relay.attrs.CallLoweredAttrs", **{"metadata": {"relay_attrs": dict_attrs}}
)
return relay.Call(call.op, call.args, attrs)
return super().visit_call(call)
# Reshape should not be removed if its the first layer in the network
def test_first_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%1 = call_lowered(@reshape, (%x,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
# When reshape layer is the last one in the network
def test_last_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
# Expected main:
##[version = "0.0.5"]
# def @main(%x /* ty=Tensor[(128, 128), float32] */) -> Tensor[(128, 128), float32] {
# %0 = (%x, %y, %z);
# %1 = call_lowered(@multiply, %0);
# let %x_12: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# let %x_14: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# %x_14
# }
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert not reshapes_present, "Reshape should have been removed."
return
# When reshape layer is not marked as reshape_only
def test_fused_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def fused_reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("fused_reshape", type_annot=reshape_ty)
mod[reshape_gv] = fused_reshape_primfunc
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@fused_reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
# Expected main:
##[version = "0.0.5"]
# def @main(%x /* ty=Tensor[(128, 128), float32] */) -> Tensor[(128, 128), float32] {
# %0 = (%x, %y, %z);
# %1 = call_lowered(@multiply, %0);
# let %x_12: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# let %x_14: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# %x_14
# }
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/benchmarking/benchmark_vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Benchmarking Relay VM using models from MXNet."""
import numpy as np
import tvm
from tvm import te
from tvm.contrib import graph_executor
from tvm import relay
from tvm.runtime import container
from tvm.runtime import vm as vm_rt
from tvm.relay import testing
from tvm.relay import vm
def benchmark_execution(
mod,
params,
measure=True,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
dtype="float32",
model="unknown",
):
def get_graph_executor_output(
mod, data, params, target, dev, dtype="float32", number=2, repeat=20
):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", data)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
if measure:
print("Evaluate graph executor inference cost of {} on " "{}".format(model, repr(dev)))
ftimer = m.module.time_evaluator("run", dev, number=1, repeat=20)
# Measure in millisecond.
prof_res = np.array(ftimer().results) * 1000
print(
"Mean graph executor inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
return out.numpy()
def get_vm_output(mod, data, params, target, dev, dtype="float32", number=2, repeat=20):
with tvm.transform.PassContext(opt_level=3):
exe = vm.compile(mod, target, params=params)
rly_vm = vm_rt.VirtualMachine(exe, dev)
result = rly_vm.run(data)
if measure:
print("Evaluate vm inference cost of {} on {}".format(model, repr(dev)))
ftimer = rly_vm.module.time_evaluator("invoke", dev, number=number, repeat=repeat)
# Measure in millisecond.
prof_res = np.array(ftimer("main", data).results) * 1000
print(
"Mean vm inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
return result.numpy().astype(dtype)
# random input
data = np.random.uniform(size=data_shape).astype(dtype)
for target, dev in testing.enabled_targets():
tvm_out = get_graph_executor_output(
mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype
)
vm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype)
tvm.testing.assert_allclose(vm_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_mlp():
image_shape = (1, 1, 28, 28)
mod, params = testing.mlp.get_workload(1)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 10), model="mlp")
def test_vgg():
for n in [11, 16]:
mod, params = testing.vgg.get_workload(1, num_layers=n)
model = "vgg" + str(n)
benchmark_execution(mod, params, model=model)
def test_resnet():
for n in [18, 50]:
mod, params = testing.resnet.get_workload(batch_size=1, num_layers=n)
model = "resnet" + str(n)
benchmark_execution(mod, params, model=model)
def test_squeezenet():
for version in ["1.0", "1.1"]:
mod, params = testing.squeezenet.get_workload(version=version)
model = "squeezenet" + version
benchmark_execution(mod, params, model=model)
def test_inception_v3():
image_shape = (3, 299, 299)
mod, params = testing.inception_v3.get_workload(image_shape=image_shape)
benchmark_execution(mod, params, data_shape=(1, 3, 299, 299), model="inception_v3")
def test_dqn():
image_shape = (1, 4, 84, 84)
mod, params = testing.dqn.get_workload(batch_size=1, image_shape=image_shape)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 18))
def test_dcgan():
image_shape = (1, 100)
mod, params = testing.dcgan.get_workload(batch_size=1)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 3, 64, 64))
def test_mobilenet():
mod, params = testing.mobilenet.get_workload(batch_size=1)
benchmark_execution(mod, params, model="mobilenet")
# TODO: enable when the low building performance (several minutes) fixed.
def test_mobilenet_nhwc():
image_shape = (1, 224, 224, 3)
mod, params = testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape[1:], layout="NHWC"
)
benchmark_execution(mod, params, measure=False, data_shape=image_shape)
def test_densenet():
mod, params = testing.densenet.get_workload(batch_size=1)
benchmark_execution(mod, params, model="densenet")
if __name__ == "__main__":
test_resnet()
test_vgg()
test_squeezenet()
test_mobilenet()
test_densenet()
test_inception_v3()
test_mlp()
test_dqn()
test_dcgan()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/collage/demo_collage_partitioner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Compares Collage with various other baselines."""
# CAUTION: Requires some changes in python/tvm/autotvm/task/dispatcher.py
# so that AutoTVM tuning records can be cached between runs and between
# models. See https://github.com/mbs-octoml/mbs-tvm/tree/mbs-collage-hacks.
import tvm
import logging
import tempfile
import os
import shutil
import menangerie
# The following are necessary to force global functions or pattern tables to be registered
from tvm.relay.op.contrib.cutlass import partition_for_cutlass
from tvm.contrib.cutlass import num_cutlass_partitions
from tvm.relay.op.contrib.cublas import partition_for_cublas
from tvm.relay.op.contrib.cudnn import partition_for_cudnn
logging.basicConfig(level=logging.INFO)
########### Configuration ###########
###
### Rename to match your hardware, eg ..._vt100...
###
TUNING_LOG = "/home/mbs/collage_autotvm_rtx3070.tuninglog"
###
### If true, runs final model under nvprof
###
PROFILE = True
###
### If true, run all models
###
ALL_MODELS = False
###
### If true, run all configurations
###
ALL_CONFIGS = False
###
### How aggressively to look for candidates?
###
TVM_MAX_DEPTH = 8
BYOC_MAX_DEPTH = 8
###
### AutoTVM tuning parameters.
###
AUTOTVM_NUM_TRIALS = 2000
AUTOTVM_EARLY_STOPPING = 600
TIMEOUT = 10
MEASURE_NUMBER = tvm.relay.collage.MEASURE_NUMBER
MEASURE_REPEAT = tvm.relay.collage.MEASURE_REPEAT
WARMUP_MIN_REPEAT_MS = tvm.relay.collage.WARMUP_MIN_REPEAT_MS
HOST = tvm.target.Target("llvm")
CUDA = tvm.target.Target("cuda", HOST)
########### Runtime ###########
# Code to run a model. The actual call to 'run' is appended at compile time.
# We invoke the model as a sub-process so that we can wrap profiling tools around it.
runner_template = f"""
import tvm
import tvm.runtime.vm
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
MEASURE_NUMBER = {MEASURE_NUMBER}
MEASURE_REPEAT = {MEASURE_REPEAT}
WARMUP_MIN_REPEAT_MS = {WARMUP_MIN_REPEAT_MS}
def arg_for(shape, dtype, device):
return tvm.nd.array(
np.random.rand(*shape).astype(dtype), device=device)
def vm_estimate_seconds(device, vm, args):
vm.benchmark(device, repeat=1, number=1, min_repeat_ms=WARMUP_MIN_REPEAT_MS, **args)
return vm.benchmark(device, repeat=MEASURE_REPEAT, number=MEASURE_NUMBER, min_repeat_ms=0,
**args)
def run(label, name, device, lib_path, code_path, input_shapes, input_dtypes):
logging.info(f"Loading compiled code for {{name}} generated by {{label}} from {{lib_path}} and {{code_path}}...")
loaded_lib = tvm.runtime.load_module(lib_path)
loaded_code = bytearray(open(code_path, "rb").read())
loaded_exe = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)
vm = tvm.runtime.vm.VirtualMachine(loaded_exe, device)
args = {{
input_name: arg_for(input_shapes[input_name], input_dtypes[input_name], device)
for input_name in input_shapes.keys()
}}
logging.info(f"Benchmarking for {{name}} generated by {{label}}...")
profile = vm_estimate_seconds(device, vm, args)
logging.info(f"Benchmarked for {{name}} generated by {{label}}: {{profile}}")
logging.info(f"RESULT: {{label}} | {{name}} | {{profile.median * 1e3}}ms")
if __name__ == "__main__":
"""
########### AutoTVM tuning helpers ###########
def extract_autotvm_tasks(mod, target):
"""Returns TVM kernels to tune for mod and target."""
return tvm.autotvm.task.extract_from_program(mod, target=target, params=None)
def optional_tuning_records(log_filename):
"""Returns existing tuning records, if any."""
if log_filename == "" or not os.path.exists(log_filename):
return tvm.autotvm.task.FallbackContext()
else:
return tvm.autotvm.task.ApplyHistoryBest(log_filename)
def is_already_tuned(task, log_filename):
"""Returns True if we already have a tuning record for task in turning logs in log_filename"""
if not os.path.exists(log_filename):
return False
dispatch_context = tvm.autotvm.task.ApplyHistoryBest(log_filename)
return dispatch_context.contains(task.target, task.workload)
def tune_autotvm_tasks(tasks, log_filename):
"""Appends to log_filename the best strategies for tasks"""
if len(tasks) == 0:
return
measure_option = tvm.autotvm.measure_option(
builder=tvm.autotvm.LocalBuilder(timeout=TIMEOUT),
runner=tvm.autotvm.LocalRunner(
number=MEASURE_NUMBER, repeat=MEASURE_REPEAT, timeout=TIMEOUT, min_repeat_ms=0
),
)
logging.info(
f"Using autotvm tuning for {len(tasks)} tasks with {AUTOTVM_NUM_TRIALS} trials, logging to {log_filename}"
)
# create tmp log file, starting with contents from existing log file
tmp_log_filename = log_filename + ".tmp"
if os.path.exists(tmp_log_filename):
os.remove(tmp_log_filename)
if os.path.exists(log_filename):
logging.info(f"Copying existing log {log_filename} to {tmp_log_filename}")
shutil.copy(log_filename, tmp_log_filename)
for i, task in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
logging.info(f"Considering task {task.name} {prefix}")
if is_already_tuned(task, tmp_log_filename):
logging.info(f"Re-using existing record for {task.name}")
continue
logging.info(f"Using autotvm to tune {task.name}")
tuner_obj = tvm.autotvm.tuner.XGBTuner(task, loss_type="rank")
if os.path.exists(tmp_log_filename):
tuner_obj.load_history(tvm.autotvm.record.load_from_file(tmp_log_filename))
# do tuning
n_trial = min(AUTOTVM_NUM_TRIALS, len(task.config_space))
tuner_obj.tune(
n_trial=n_trial,
early_stopping=AUTOTVM_EARLY_STOPPING,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.progress_bar(n_trial, prefix=prefix),
tvm.autotvm.callback.log_to_file(tmp_log_filename),
],
)
# pick best records and copy back to main log file
tvm.autotvm.record.pick_best(tmp_log_filename, log_filename)
os.remove(tmp_log_filename)
logging.info("Done with autotvm tuning")
def autotvm_tune_module(mod, target, log_filename):
if log_filename == "":
logging.info("Not tuning with autotvm since disabled")
return
# Extract and tune any TVM kernels. BYOC partitions will have no tasks extracted.
logging.info("Extracting tasks from overall module")
tasks = extract_autotvm_tasks(mod, target)
logging.info(f"Auto-tuning {len(tasks)} tasks from overall module")
tune_autotvm_tasks(tasks, log_filename)
########### Drivers ###########
def compile_and_benchmark(label, model, targets, dev, tmp_dir):
"""Compile model for target and run it with profiling."""
logging.info(f"Compiling {model['name']} using {label} with {targets}...")
exe = tvm.relay.vm.compile(model["mod"], target=targets, params=model["params"])
lib_path = os.path.join(tmp_dir, "lib.so")
code_path = os.path.join(tmp_dir, "code.ro")
code, lib = exe.save()
logging.info(f"Saving VM code to {code_path}...")
with open(code_path, "wb") as fo:
fo.write(code)
logging.info(f"Exporting library to {lib_path}...")
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
runner = f"{runner_template} run('{label}', '{model['name']}', tvm.device({dev.device_type}), '{lib_path}', '{code_path}', {model['input_shapes']}, {model['input_dtypes']})\n"
runner_path = os.path.join(tmp_dir, "runner.py")
logging.info(f"Saving runner to {runner_path}...")
with open(runner_path, "w") as fo:
fo.write(runner)
logging.info(f"Invoking runner...")
if PROFILE:
profile_path = os.path.join(tmp_dir, "profile.txt")
os.system(f"nsys nvprof -o {profile_path} python3 {runner_path}")
else:
os.system(f"python3 {runner_path}")
def collage(model):
"""Run the Collage partitioner for a set of CUDA-related targets and profile the result"""
logging.info(f"collage | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
targets = []
targets.append(CUDA)
use_fp16 = model["main_dtype"] == "float16"
targets.append(
tvm.target.Target(f"tensorrt -use_implicit_batch=False -use_fp16={use_fp16}", HOST)
)
tmp_dir = tempfile.mkdtemp()
targets.append(tvm.target.Target(f"cutlass -tmp_dir={tmp_dir}", HOST))
targets.append(tvm.target.Target("cublas", HOST))
targets.append(tvm.target.Target("cudnn", HOST))
config = {
"relay.collage.tvm_max_depth": TVM_MAX_DEPTH,
"relay.collage.byoc_max_depth": BYOC_MAX_DEPTH,
}
logging.info(f"Using PassContext(config={config}")
ctxt = tvm.transform.PassContext(config=config)
config = tvm.target.make_compilation_config(ctxt, targets)
with ctxt:
mod = model["mod"]
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
logging.info("-------------- BEGIN INDEXED --------------")
logging.info(mod)
logging.info("-------------- END INDEXED ----------------")
mod = tvm.relay.transform.CollagePartition(config)(mod)
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("collage", partitioned_model, targets, dev, tmp_dir)
def just_tensorrt(model):
"""Run partition_for_tensorrt, complete the compilation with TVM, and profile the result."""
logging.info(f"just_tensorrt | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
logging.info("Partitioning for TensorRT...")
use_fp16 = model["main_dtype"] == "float16"
trt_target = tvm.target.Target(
f"tensorrt -use_implicit_batch=False -use_fp16={use_fp16}", HOST
)
mod = tvm.relay.op.contrib.partition_for_tensorrt(
mod=model["mod"], params=model["params"], target=trt_target
)
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
targets = []
targets.append(CUDA)
targets.append(trt_target)
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_tensorrt", partitioned_model, targets, dev, tmp_dir)
def just_cutlass(model):
"""Run partition_for_cutlass, complete the compilation with TVM, and profile the result."""
logging.info(f"just_cutlass | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
logging.info("Partitioning for CUTLASS...")
mod = tvm.relay.op.contrib.partition_for_cutlass(model["mod"], model["params"])
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
targets = []
targets.append(CUDA)
targets.append(tvm.target.Target(f"cutlass -tmp_dir={tmp_dir}", HOST))
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_cutlass", partitioned_model, targets, dev, tmp_dir)
def just_tvm(model):
"""Compile and profile using vanilla TVM."""
logging.info(f"just_tvm | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_tvm", model, CUDA, dev, tmp_dir)
def tvm_with_libs(model):
"""As for just_tvm, but use the existing -libs mechanism to enable standard CUDA libs."""
logging.info(f"tvm_with_libs | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
cuda_target = tvm.target.Target("cuda -libs=cudnn,cublas", HOST)
autotvm_tune_module(model["mod"], cuda_target, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
dev = tvm.device(cuda_target.get_target_device_type())
compile_and_benchmark("tvm_with_libs", model, cuda_target, dev, tmp_dir)
########### Runners ###########
def run_all():
"""Run the whole test suite."""
make_models = []
make_models.append(menangerie.resnext50_32x4d)
if ALL_MODELS:
make_models.append(menangerie.resnext50_32x4d_16)
make_models.append(menangerie.gpt2_16)
make_models.append(menangerie.gpt2)
make_models.append(menangerie.mobilenet_16)
make_models.append(menangerie.mobilenet)
make_models.append(menangerie.resnet50_16)
make_models.append(menangerie.resnet50)
run_models = []
if ALL_CONFIGS:
run_models.append(just_tensorrt)
run_models.append(just_tvm)
run_models.append(tvm_with_libs)
run_models.append(collage)
for make_model in make_models:
model = make_model()
for run_model in run_models:
run_model(model)
def run_mini():
"""Run Collage on a tiny GPT2 extract."""
collage(menangerie.gpt2_16_for_cutlass_extract())
if __name__ == "__main__":
# run_all()
run_mini()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/collage/menangerie.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of Relay models for exercising Collage."""
import tvm
import onnx
import numpy as np
import logging
import tvm.contrib.target.onnx
MODEL_PREFIX = "/home/mbs/gauntlet/models/"
MNIST = {
"name": "mnist",
"filename": "mnist-8.onnx",
"input_shapes": {"Input3": [1, 1, 28, 28]},
"input_dtypes": {"Input3": "float32"},
"main_dtype": "float32",
}
GPT2 = {
"name": "gpt2",
"filename": "gpt2.onnx",
"input_shapes": {"input1": [1, 50, 32]},
"input_dtypes": {"input1": "int64"},
"main_dtype": "float32",
}
RESNET50V2 = {
"name": "resnet50",
"filename": "resnet50-v2-7.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
MOBILENETV2 = {
"name": "mobilenet",
"filename": "mobilenetv2-1.0.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
# Note that resnext50_32_4d below was extracted directly from the pytorch model and not from any onnx file.
RESNEXT50_32_4d = {
"name": "resnext50_32_4d",
"filename": "resnext50_32x4d.onnx",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"main_dtype": "float32",
}
def make_const(dtype, shape):
return tvm.relay.const(np.random.rand(*shape).astype(dtype))
def make_consts(dtype, shapes):
return [make_const(dtype, shape) for shape in shapes]
def mnist_consts(dtype):
return make_consts(
dtype,
[
(8, 1, 5, 5), # 0
(8, 1, 1), # 1
(16, 8, 5, 5), # 2
(16, 1, 1), # 3
(10, 256), # 4
(1, 10), # 5
],
)
def mnist():
metatable = {"relay.Constant": mnist_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 1, 28, 28), float32]) -> Tensor[(1, 10), float32] {
%0 = nn.pad(%x, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%1 = nn.conv2d(%0, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=8, kernel_size=[5, 5]);
%2 = add(%1, meta[relay.Constant][1]);
%3 = nn.relu(%2);
%4 = nn.max_pool2d(%3, pool_size=[2, 2], strides=[2, 2], padding=[0, 0, 0, 0]);
%5 = nn.pad(%4, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%6 = nn.conv2d(%5, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=16, kernel_size=[5, 5]);
%7 = add(%6, meta[relay.Constant][3]);
%8 = nn.relu(%7);
%9 = nn.max_pool2d(%8, pool_size=[3, 3], strides=[3, 3], padding=[0, 0, 0, 0]);
%10 = reshape(%9, newshape=[1, 256]);
%11 = nn.dense(%10, meta[relay.Constant][4], units=None, out_dtype="float32");
add(%11, meta[relay.Constant][5])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mnist",
"input_shapes": {"x": [1, 1, 28, 28]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_consts(dtype):
return make_consts(
dtype,
[
(50257, 768), # 0
(1, 32, 768), # 1
(768,), # 2
(768,), # 3
(2304, 768), # 4
(2304,), # 5
(1, 1, 32, 32), # 6
(1, 1, 32, 32), # 7
(768, 768), # 8
(768,), # 9
(768,), # 10
(768,), # 11
(3072, 768), # 12
(3072,), # 13
(768, 3072), # 14
(768,), # 15
(768,), # 16
(768,), # 17
(2304, 768), # 18
(2304,), # 19
(1, 1, 32, 32), # 20
(1, 1, 32, 32), # 21
(768, 768), # 22
(768,), # 23
(768,), # 24
(768,), # 25
(3072, 768), # 26
(3072,), # 27
(768, 3072), # 28
(768,), # 29
(768,), # 30
(768,), # 31
(2304, 768), # 32
(2304,), # 33
(1, 1, 32, 32), # 34
(1, 1, 32, 32), # 35
(768, 768), # 36
(768,), # 37
(768,), # 38
(768,), # 39
(3072, 768), # 40
(3072,), # 41
(768, 3072), # 42
(768,), # 43
(768,), # 44
(768,), # 45
(2304, 768), # 46
(2304,), # 47
(1, 1, 32, 32), # 48
(1, 1, 32, 32), # 49
(768, 768), # 50
(768,), # 51
(768,), # 52
(768,), # 53
(3072, 768), # 54
(3072,), # 55
(768, 3072), # 56
(768,), # 57
(768,), # 58
(768,), # 59
(2304, 768), # 60
(2304,), # 61
(1, 1, 32, 32), # 62
(1, 1, 32, 32), # 63
(768, 768), # 64
(768,), # 65
(768,), # 66
(768,), # 67
(3072, 768), # 68
(3072,), # 69
(768, 3072), # 70
(768,), # 71
(768,), # 72
(768,), # 73
(2304, 768), # 74
(2304,), # 75
(1, 1, 32, 32), # 76
(1, 1, 32, 32), # 77
(768, 768), # 78
(768,), # 79
(768,), # 80
(768,), # 81
(3072, 768), # 82
(3072,), # 83
(768, 3072), # 84
(768,), # 85
(768,), # 86
(768,), # 87
(2304, 768), # 88
(2304,), # 89
(1, 1, 32, 32), # 90
(1, 1, 32, 32), # 91
(768, 768), # 92
(768,), # 93
(768,), # 94
(768,), # 95
(3072, 768), # 96
(3072,), # 97
(768, 3072), # 98
(768,), # 99
(768,), # 100
(768,), # 101
(2304, 768), # 102
(2304,), # 103
(1, 1, 32, 32), # 104
(1, 1, 32, 32), # 105
(768, 768), # 106
(768,), # 107
(768,), # 108
(768,), # 109
(3072, 768), # 110
(3072,), # 111
(768, 3072), # 112
(768,), # 113
(768,), # 114
(768,), # 115
(2304, 768), # 116
(2304,), # 117
(1, 1, 32, 32), # 118
(1, 1, 32, 32), # 119
(768, 768), # 120
(768,), # 121
(768,), # 122
(768,), # 123
(3072, 768), # 124
(3072,), # 125
(768, 3072), # 126
(768,), # 127
(768,), # 128
(768,), # 129
(2304, 768), # 130
(2304,), # 131
(1, 1, 32, 32), # 132
(1, 1, 32, 32), # 133
(768, 768), # 134
(768,), # 135
(768,), # 136
(768,), # 137
(3072, 768), # 138
(3072,), # 139
(768, 3072), # 140
(768,), # 141
(768,), # 142
(768,), # 143
(2304, 768), # 144
(2304,), # 145
(1, 1, 32, 32), # 146
(1, 1, 32, 32), # 147
(768, 768), # 148
(768,), # 149
(768,), # 150
(768,), # 151
(3072, 768), # 152
(3072,), # 153
(768, 3072), # 154
(768,), # 155
(768,), # 156
(768,), # 157
(2304, 768), # 158
(2304,), # 159
(1, 1, 32, 32), # 160
(1, 1, 32, 32), # 161
(768, 768), # 162
(768,), # 163
(768,), # 164
(768,), # 165
(3072, 768), # 166
(3072,), # 167
(768, 3072), # 168
(768,), # 169
(768,), # 170
(768,), # 171
],
)
def gpt2():
metatable = {"relay.Constant": gpt2_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float32", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float32", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float32", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float32", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f);
%134 = multiply(%133, 0.044715f);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f);
%139 = add(%137, 1f);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float32", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float32", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f);
%204 = multiply(%203, 0.044715f);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f);
%209 = add(%207, 1f);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float32", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float32", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f);
%274 = multiply(%273, 0.044715f);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f);
%279 = add(%277, 1f);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float32", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float32", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f);
%344 = multiply(%343, 0.044715f);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f);
%349 = add(%347, 1f);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float32", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float32", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f);
%414 = multiply(%413, 0.044715f);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f);
%419 = add(%417, 1f);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float32", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float32", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f);
%484 = multiply(%483, 0.044715f);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f);
%489 = add(%487, 1f);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float32", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float32", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f);
%554 = multiply(%553, 0.044715f);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f);
%559 = add(%557, 1f);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float32", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float32", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f);
%624 = multiply(%623, 0.044715f);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f);
%629 = add(%627, 1f);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float32", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float32", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f);
%694 = multiply(%693, 0.044715f);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f);
%699 = add(%697, 1f);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float32", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float32", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f);
%764 = multiply(%763, 0.044715f);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f);
%769 = add(%767, 1f);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float32", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float32", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f);
%834 = multiply(%833, 0.044715f);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f);
%839 = add(%837, 1f);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_16():
metatable = {"relay.Constant": gpt2_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f16);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f16);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float16", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f16);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float16", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f16);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f16);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float16", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f16);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float16", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f16);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f16);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f16);
%134 = multiply(%133, 0.044715f16);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f16);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f16);
%139 = add(%137, 1f16);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f16);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f16);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float16", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f16);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float16", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f16);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f16);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f16);
%204 = multiply(%203, 0.044715f16);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f16);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f16);
%209 = add(%207, 1f16);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f16);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f16);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float16", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f16);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float16", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f16);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f16);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f16);
%274 = multiply(%273, 0.044715f16);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f16);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f16);
%279 = add(%277, 1f16);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f16);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f16);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float16", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f16);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float16", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f16);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f16);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f16);
%344 = multiply(%343, 0.044715f16);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f16);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f16);
%349 = add(%347, 1f16);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f16);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f16);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float16", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f16);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float16", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f16);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f16);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f16);
%414 = multiply(%413, 0.044715f16);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f16);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f16);
%419 = add(%417, 1f16);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f16);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f16);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float16", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f16);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float16", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f16);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f16);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f16);
%484 = multiply(%483, 0.044715f16);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f16);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f16);
%489 = add(%487, 1f16);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f16);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f16);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float16", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f16);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float16", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f16);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f16);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f16);
%554 = multiply(%553, 0.044715f16);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f16);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f16);
%559 = add(%557, 1f16);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f16);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f16);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float16", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f16);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float16", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f16);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f16);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f16);
%624 = multiply(%623, 0.044715f16);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f16);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f16);
%629 = add(%627, 1f16);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f16);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f16);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float16", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f16);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float16", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f16);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f16);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f16);
%694 = multiply(%693, 0.044715f16);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f16);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f16);
%699 = add(%697, 1f16);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f16);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f16);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float16", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f16);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float16", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f16);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f16);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f16);
%764 = multiply(%763, 0.044715f16);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f16);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f16);
%769 = add(%767, 1f16);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f16);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f16);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float16", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f16);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float16", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f16);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f16);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f16);
%834 = multiply(%833, 0.044715f16);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f16);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f16);
%839 = add(%837, 1f16);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f16);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f16);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_extract_consts(dtype):
return make_consts(
dtype,
[
(768, 768), # 0
(768,), # 1
(768,), # 2
(768,), # 3
(3072, 768), # 4
(3072,), # 5
(1, 32, 768), # 6
],
)
def gpt2_extract():
metatable = {"relay.Constant": gpt2_extract_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float32]) -> Tensor[(50, 32, 3072), float32] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_extract_16():
metatable = {"relay.Constant": gpt2_extract_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float16]) -> Tensor[(50, 32, 3072), float16] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_extract_16",
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_16_for_cutlass_extract_consts(dtype):
return make_consts(
"float16",
[
(2304, 768), # 0
(2304,), # 1
(600, 32, 64), # 2
(600, 32, 32), # 3
],
)
def gpt2_16_for_cutlass_extract():
metatable = {"relay.Constant": gpt2_16_for_cutlass_extract_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x0: Tensor[(1600, 768), float16],
%x3: Tensor[(600, 32, 64), float16])
-> (Tensor[(1600, 2304), float16], Tensor[(1200, 32, 32), float16]) {
%0 = nn.dense(%x0, meta[relay.Constant][0], units=2304);
%1 = add(%0, meta[relay.Constant][1]);
%2 = nn.batch_matmul(%x3, meta[relay.Constant][2], out_dtype="float16", transpose_b=True);
%3 = (%2, meta[relay.Constant][3]);
%4 = concatenate(%3);
(%1, %4)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16_for_cutlass_extract",
"input_shapes": {"x0": (1600, 768), "x3": (600, 32, 64)},
"input_dtypes": {"x0": "float16", "x3": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def resnet50_consts(dtype):
return make_consts(
dtype,
[
(3,), # 0
(3,), # 1
(3,), # 2
(3,), # 3
(64, 3, 7, 7), # 4
(64,), # 5
(64,), # 6
(64,), # 7
(64,), # 8
(64,), # 9
(64,), # 10
(64,), # 11
(64,), # 12
(64, 64, 1, 1), # 13
(64,), # 14
(64,), # 15
(64,), # 16
(64,), # 17
(64, 64, 3, 3), # 18
(64,), # 19
(64,), # 20
(64,), # 21
(64,), # 22
(256, 64, 1, 1), # 23
(256, 64, 1, 1), # 24
(256,), # 25
(256,), # 26
(256,), # 27
(256,), # 28
(64, 256, 1, 1), # 29
(64,), # 30
(64,), # 31
(64,), # 32
(64,), # 33
(64, 64, 3, 3), # 34
(64,), # 35
(64,), # 36
(64,), # 37
(64,), # 38
(256, 64, 1, 1), # 39
(256,), # 40
(256,), # 41
(256,), # 42
(256,), # 43
(64, 256, 1, 1), # 44
(64,), # 45
(64,), # 46
(64,), # 47
(64,), # 48
(64, 64, 3, 3), # 49
(64,), # 50
(64,), # 51
(64,), # 52
(64,), # 53
(256, 64, 1, 1), # 54
(256,), # 55
(256,), # 56
(256,), # 57
(256,), # 58
(128, 256, 1, 1), # 59
(128,), # 60
(128,), # 61
(128,), # 62
(128,), # 63
(128, 128, 3, 3), # 64
(128,), # 65
(128,), # 66
(128,), # 67
(128,), # 68
(512, 128, 1, 1), # 69
(512, 256, 1, 1), # 70
(512,), # 71
(512,), # 72
(512,), # 73
(512,), # 74
(128, 512, 1, 1), # 75
(128,), # 76
(128,), # 77
(128,), # 78
(128,), # 79
(128, 128, 3, 3), # 80
(128,), # 81
(128,), # 82
(128,), # 83
(128,), # 84
(512, 128, 1, 1), # 85
(512,), # 86
(512,), # 87
(512,), # 88
(512,), # 89
(128, 512, 1, 1), # 90
(128,), # 91
(128,), # 92
(128,), # 93
(128,), # 94
(128, 128, 3, 3), # 95
(128,), # 96
(128,), # 97
(128,), # 98
(128,), # 99
(512, 128, 1, 1), # 100
(512,), # 101
(512,), # 102
(512,), # 103
(512,), # 104
(128, 512, 1, 1), # 105
(128,), # 106
(128,), # 107
(128,), # 108
(128,), # 109
(128, 128, 3, 3), # 110
(128,), # 111
(128,), # 112
(128,), # 113
(128,), # 114
(512, 128, 1, 1), # 115
(512,), # 116
(512,), # 117
(512,), # 118
(512,), # 119
(256, 512, 1, 1), # 120
(256,), # 121
(256,), # 122
(256,), # 123
(256,), # 124
(256, 256, 3, 3), # 125
(256,), # 126
(256,), # 127
(256,), # 128
(256,), # 129
(1024, 256, 1, 1), # 130
(1024, 512, 1, 1), # 131
(1024,), # 132
(1024,), # 133
(1024,), # 134
(1024,), # 135
(256, 1024, 1, 1), # 136
(256,), # 137
(256,), # 138
(256,), # 139
(256,), # 140
(256, 256, 3, 3), # 141
(256,), # 142
(256,), # 143
(256,), # 144
(256,), # 145
(1024, 256, 1, 1), # 146
(1024,), # 147
(1024,), # 148
(1024,), # 149
(1024,), # 150
(256, 1024, 1, 1), # 151
(256,), # 152
(256,), # 153
(256,), # 154
(256,), # 155
(256, 256, 3, 3), # 156
(256,), # 157
(256,), # 158
(256,), # 159
(256,), # 160
(1024, 256, 1, 1), # 161
(1024,), # 162
(1024,), # 163
(1024,), # 164
(1024,), # 165
(256, 1024, 1, 1), # 166
(256,), # 167
(256,), # 168
(256,), # 169
(256,), # 170
(256, 256, 3, 3), # 171
(256,), # 172
(256,), # 173
(256,), # 174
(256,), # 175
(1024, 256, 1, 1), # 176
(1024,), # 177
(1024,), # 178
(1024,), # 179
(1024,), # 180
(256, 1024, 1, 1), # 181
(256,), # 182
(256,), # 183
(256,), # 184
(256,), # 185
(256, 256, 3, 3), # 186
(256,), # 187
(256,), # 188
(256,), # 189
(256,), # 190
(1024, 256, 1, 1), # 191
(1024,), # 192
(1024,), # 193
(1024,), # 194
(1024,), # 195
(256, 1024, 1, 1), # 196
(256,), # 197
(256,), # 198
(256,), # 199
(256,), # 200
(256, 256, 3, 3), # 201
(256,), # 202
(256,), # 203
(256,), # 204
(256,), # 205
(1024, 256, 1, 1), # 206
(1024,), # 207
(1024,), # 208
(1024,), # 209
(1024,), # 210
(512, 1024, 1, 1), # 211
(512,), # 212
(512,), # 213
(512,), # 214
(512,), # 215
(512, 512, 3, 3), # 216
(512,), # 217
(512,), # 218
(512,), # 219
(512,), # 220
(2048, 512, 1, 1), # 221
(2048, 1024, 1, 1), # 222
(2048,), # 223
(2048,), # 224
(2048,), # 225
(2048,), # 226
(512, 2048, 1, 1), # 227
(512,), # 228
(512,), # 229
(512,), # 230
(512,), # 231
(512, 512, 3, 3), # 232
(512,), # 233
(512,), # 234
(512,), # 235
(512,), # 236
(2048, 512, 1, 1), # 237
(2048,), # 238
(2048,), # 239
(2048,), # 240
(2048,), # 241
(512, 2048, 1, 1), # 242
(512,), # 243
(512,), # 244
(512,), # 245
(512,), # 246
(512, 512, 3, 3), # 247
(512,), # 248
(512,), # 249
(512,), # 250
(512,), # 251
(2048, 512, 1, 1), # 252
(2048,), # 253
(2048,), # 254
(2048,), # 255
(2048,), # 256
(1000, 2048), # 257
(1000,), # 258
],
)
def resnet50():
metatable = {"relay.Constant": resnet50_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnet50_16():
metatable = {"relay.Constant": resnet50_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def mobilenet_consts(dtype):
return make_consts(
dtype,
[
(32, 3, 3, 3), # 0
(32,), # 1
(32,), # 2
(32,), # 3
(32,), # 4
(32, 32, 1, 1), # 5
(32,), # 6
(32,), # 7
(32,), # 8
(32,), # 9
(32, 1, 3, 3), # 10
(32,), # 11
(32,), # 12
(32,), # 13
(32,), # 14
(16, 32, 1, 1), # 15
(16,), # 16
(16,), # 17
(16,), # 18
(16,), # 19
(96, 16, 1, 1), # 20
(96,), # 21
(96,), # 22
(96,), # 23
(96,), # 24
(96, 1, 3, 3), # 25
(96,), # 26
(96,), # 27
(96,), # 28
(96,), # 29
(24, 96, 1, 1), # 30
(24,), # 31
(24,), # 32
(24,), # 33
(24,), # 34
(144, 24, 1, 1), # 35
(144,), # 36
(144,), # 37
(144,), # 38
(144,), # 39
(144, 1, 3, 3), # 40
(144,), # 41
(144,), # 42
(144,), # 43
(144,), # 44
(24, 144, 1, 1), # 45
(24,), # 46
(24,), # 47
(24,), # 48
(24,), # 49
(144, 24, 1, 1), # 50
(144,), # 51
(144,), # 52
(144,), # 53
(144,), # 54
(144, 1, 3, 3), # 55
(144,), # 56
(144,), # 57
(144,), # 58
(144,), # 59
(32, 144, 1, 1), # 60
(32,), # 61
(32,), # 62
(32,), # 63
(32,), # 64
(192, 32, 1, 1), # 65
(192,), # 66
(192,), # 67
(192,), # 68
(192,), # 69
(192, 1, 3, 3), # 70
(192,), # 71
(192,), # 72
(192,), # 73
(192,), # 74
(32, 192, 1, 1), # 75
(32,), # 76
(32,), # 77
(32,), # 78
(32,), # 79
(192, 32, 1, 1), # 80
(192,), # 81
(192,), # 82
(192,), # 83
(192,), # 84
(192, 1, 3, 3), # 85
(192,), # 86
(192,), # 87
(192,), # 88
(192,), # 89
(32, 192, 1, 1), # 90
(32,), # 91
(32,), # 92
(32,), # 93
(32,), # 94
(192, 32, 1, 1), # 95
(192,), # 96
(192,), # 97
(192,), # 98
(192,), # 99
(192, 1, 3, 3), # 100
(192,), # 101
(192,), # 102
(192,), # 103
(192,), # 104
(64, 192, 1, 1), # 105
(64,), # 106
(64,), # 107
(64,), # 108
(64,), # 109
(384, 64, 1, 1), # 110
(384,), # 111
(384,), # 112
(384,), # 113
(384,), # 114
(384, 1, 3, 3), # 115
(384,), # 116
(384,), # 117
(384,), # 118
(384,), # 119
(64, 384, 1, 1), # 120
(64,), # 121
(64,), # 122
(64,), # 123
(64,), # 124
(384, 64, 1, 1), # 125
(384,), # 126
(384,), # 127
(384,), # 128
(384,), # 129
(384, 1, 3, 3), # 130
(384,), # 131
(384,), # 132
(384,), # 133
(384,), # 134
(64, 384, 1, 1), # 135
(64,), # 136
(64,), # 137
(64,), # 138
(64,), # 139
(384, 64, 1, 1), # 140
(384,), # 141
(384,), # 142
(384,), # 143
(384,), # 144
(384, 1, 3, 3), # 145
(384,), # 146
(384,), # 147
(384,), # 148
(384,), # 149
(64, 384, 1, 1), # 150
(64,), # 151
(64,), # 152
(64,), # 153
(64,), # 154
(384, 64, 1, 1), # 155
(384,), # 156
(384,), # 157
(384,), # 158
(384,), # 159
(384, 1, 3, 3), # 160
(384,), # 161
(384,), # 162
(384,), # 163
(384,), # 164
(96, 384, 1, 1), # 165
(96,), # 166
(96,), # 167
(96,), # 168
(96,), # 169
(576, 96, 1, 1), # 170
(576,), # 171
(576,), # 172
(576,), # 173
(576,), # 174
(576, 1, 3, 3), # 175
(576,), # 176
(576,), # 177
(576,), # 178
(576,), # 179
(96, 576, 1, 1), # 180
(96,), # 181
(96,), # 182
(96,), # 183
(96,), # 184
(576, 96, 1, 1), # 185
(576,), # 186
(576,), # 187
(576,), # 188
(576,), # 189
(576, 1, 3, 3), # 190
(576,), # 191
(576,), # 192
(576,), # 193
(576,), # 194
(96, 576, 1, 1), # 195
(96,), # 196
(96,), # 197
(96,), # 198
(96,), # 199
(576, 96, 1, 1), # 200
(576,), # 201
(576,), # 202
(576,), # 203
(576,), # 204
(576, 1, 3, 3), # 205
(576,), # 206
(576,), # 207
(576,), # 208
(576,), # 209
(160, 576, 1, 1), # 210
(160,), # 211
(160,), # 212
(160,), # 213
(160,), # 214
(960, 160, 1, 1), # 215
(960,), # 216
(960,), # 217
(960,), # 218
(960,), # 219
(960, 1, 3, 3), # 220
(960,), # 221
(960,), # 222
(960,), # 223
(960,), # 224
(160, 960, 1, 1), # 225
(160,), # 226
(160,), # 227
(160,), # 228
(160,), # 229
(960, 160, 1, 1), # 230
(960,), # 231
(960,), # 232
(960,), # 233
(960,), # 234
(960, 1, 3, 3), # 235
(960,), # 236
(960,), # 237
(960,), # 238
(960,), # 239
(160, 960, 1, 1), # 240
(160,), # 241
(160,), # 242
(160,), # 243
(160,), # 244
(960, 160, 1, 1), # 245
(960,), # 246
(960,), # 247
(960,), # 248
(960,), # 249
(960, 1, 3, 3), # 250
(960,), # 251
(960,), # 252
(960,), # 253
(960,), # 254
(320, 960, 1, 1), # 255
(320,), # 256
(320,), # 257
(320,), # 258
(320,), # 259
(1280, 320, 1, 1), # 260
(1280,), # 261
(1280,), # 262
(1280,), # 263
(1280,), # 264
(1000, 1280, 1, 1), # 265
],
)
def mobilenet():
metatable = {"relay.Constant": mobilenet_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def mobilenet_16():
metatable = {"relay.Constant": mobilenet_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def batch_norm_extract():
consts = make_consts(
"float32",
[
(32,), # 0
(32,), # 1
(32,), # 2
(32,), # 3
],
)
metatable = {"relay.Constant": consts}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%FunctionVar_0: Tensor[(1, 32, 112, 112), float32]) -> Tensor[(1, 32, 112, 112), float32] {
%3 = nn.batch_norm(%FunctionVar_0, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%3.0
}
""",
"from_string",
None,
metatable,
)
return {
"name": "batch_norm_extract",
"input_shapes": {"FunctionVar_0": [1, 32, 112, 112]},
"input_dtypes": {"FunctionVar_0": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_consts(dtype):
return make_consts(
dtype,
[
(128, 64, 1, 1), # 0
(128, 4, 3, 3), # 1
(256, 128, 1, 1), # 2
(256, 64, 1, 1), # 3
(128, 256, 1, 1), # 4
(128, 4, 3, 3), # 5
(256, 128, 1, 1), # 6
(128, 256, 1, 1), # 7
(128, 4, 3, 3), # 8
(256, 128, 1, 1), # 9
(256, 256, 1, 1), # 10
(256, 8, 3, 3), # 11
(512, 256, 1, 1), # 12
(512, 256, 1, 1), # 13
(256, 512, 1, 1), # 14
(256, 8, 3, 3), # 15
(512, 256, 1, 1), # 16
(256, 512, 1, 1), # 17
(256, 8, 3, 3), # 18
(512, 256, 1, 1), # 19
(256, 512, 1, 1), # 20
(256, 8, 3, 3), # 21
(512, 256, 1, 1), # 22
(512, 512, 1, 1), # 23
(512, 16, 3, 3), # 24
(1024, 512, 1, 1), # 25
(1024, 512, 1, 1), # 26
(512, 1024, 1, 1), # 27
(512, 16, 3, 3), # 28
(1024, 512, 1, 1), # 29
(512, 1024, 1, 1), # 30
(512, 16, 3, 3), # 31
(1024, 512, 1, 1), # 32
(512, 1024, 1, 1), # 33
(512, 16, 3, 3), # 34
(1024, 512, 1, 1), # 35
(512, 1024, 1, 1), # 36
(512, 16, 3, 3), # 37
(1024, 512, 1, 1), # 38
(512, 1024, 1, 1), # 39
(512, 16, 3, 3), # 40
(1024, 512, 1, 1), # 41
(1024, 1024, 1, 1), # 42
(1024, 32, 3, 3), # 43
(2048, 1024, 1, 1), # 44
(2048, 1024, 1, 1), # 45
(1024, 2048, 1, 1), # 46
(1024, 32, 3, 3), # 47
(2048, 1024, 1, 1), # 48
(1024, 2048, 1, 1), # 49
(1024, 32, 3, 3), # 50
(2048, 1024, 1, 1), # 51
],
)
def resnext50_32x4d():
metatable = {"relay.Constant": resnext50_32x4d_consts("float32")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float32]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_16():
metatable = {"relay.Constant": resnext50_32x4d_consts("float16")}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float16]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d_16",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def describe_onnx(name, filename):
"""Returns the description of the ONNX model at filename, which can be passed to from_onnx to actually load
the model. Note that ? (ie unknown) shape dimensions must be manually changed to concrete dimensions
which are consistent with the overall model."""
onnx_model = onnx.load(MODEL_PREFIX + filename)
input_shapes = {}
input_dtypes = {}
initializer_names = [n.name for n in onnx_model.graph.initializer]
for input_info in onnx_model.graph.input:
if input_info.name not in initializer_names:
_, shape, dtype, _ = tvm.relay.frontend.onnx.get_info(input_info)
if dtype is None:
raise ValueError(f"Unknown dtype on input '{input_info.name}' is not supported.")
input_shapes.update({input_info.name: shape})
input_dtypes.update({input_info.name: dtype})
print(
f"{{'name': '{name}', 'filename': '{filename}', 'input_shapes': {input_shapes}, 'input_dtypes': {input_dtypes}, 'main_dtype': 'float32'}}"
)
def from_onnx(model):
logging.info("-------------------- BEGIN ONNX IMPORT --------------------")
filename = MODEL_PREFIX + model["filename"]
logging.info(f"Loading ONNX model from {filename}")
onnx_model = onnx.load(filename)
logging.info(f"Loaded model from {filename}")
mod, params = tvm.relay.frontend.from_onnx(
onnx_model, model["input_shapes"], freeze_params=True
)
mod = tvm.relay.transform.InferType()(mod)
logging.info("-------------------- END ONNX IMPORT --------------------")
logging.info(f"Imported model:\n{mod}")
logging.info(f"Params:\n{params}")
return {
"name": model["name"],
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"mod": mod,
"params": params,
"main_dtype": model["main_dtype"],
}
def to_onnx(model):
logging.info("-------------------- BEGIN ONNX EXPORT --------------------")
short_filename = model["name"] + ".onnx"
filename = MODEL_PREFIX + short_filename
logging.info(f"Saving ONNX model to {filename}")
params = model["params"]
if params is None:
params = {}
tvm.contrib.target.onnx.to_onnx(model["mod"], params, model["name"], path=filename)
logging.info("-------------------- END ONNX EXPORT --------------------")
return {
"name": model["name"],
"filename": short_filename,
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"main_dtype": model["main_dtype"],
}
| https://github.com/zk-ml/tachikoma |
tests/python/relay/collage/test_sub_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import logging
import tvm.testing
logging.basicConfig(level=logging.INFO)
partition_for_testing = tvm._ffi.get_global_func("relay.collage.PartitionForTesting")
def print_with_indexes(mod):
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
print(mod)
def run(in_mod, expected_mod, max_outputs, allow_taps, compiler, map):
expected_mod = tvm.relay.transform.InferType()(expected_mod)
in_mod = tvm.relay.transform.InferType()(in_mod)
in_mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(in_mod)
indexes = [i for l, iss in map.items() for i in iss]
labels = [l for l, iss in map.items() for i in iss]
actual_mod = partition_for_testing(max_outputs, allow_taps, compiler, indexes, labels)(in_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, True):
# Print everything in full so we can see what's going on when things fail.
print("Input module:")
print(in_mod)
print("Expected module:")
print(expected_mod)
print("Actual module:")
print(actual_mod)
# Assert again so as to see the actual disagreeing sub-expressions.
tvm.ir.assert_structural_equal(actual_mod, expected_mod, map_free_vars=True)
def test_single_op():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d); // node 7
subtract(%0, %1)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = (fn(%x, %y, Compiler="foo") { add(%x, %y) })(%c, %d);
subtract(%0, %1)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [7]})
def test_multi_output():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b); // node 6
%1 = add(%c, %d); // node 7
subtract(%0, %1)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = (fn(%w, %x, %y, %z, Compiler="foo") { (add(%y, %z), add(%w, %x)) })(%c, %d, %a, %b);
%1 = %0.0;
%2 = %0.1;
subtract(%1, %2)
}
"""
)
# No rewrite since 2 outputs
run(input(), input(), 1, False, "foo", {"": [6, 7]})
# Rewrite
run(input(), expected(), 2, False, "foo", {"": [6, 7]})
def test_classic_conv2d_add_relu():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%0 = nn.conv2d(%a, %b); // node 8
%1 = add(%0, %c); // node 9
%2 = nn.relu(%1); // node 10
subtract(%2, %d)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%2 = (fn(%x, %y, %z, Compiler="foo") {
%0 = nn.conv2d(%x, %y);
%1 = add(%0, %z);
nn.relu(%1)
})(%a, %b, %c);
subtract(%2, %d)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [8, 9, 10]})
def test_diamond_single_output():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
%2 = nn.relu(%1); // node 7
%3 = nn.leaky_relu(%0, alpha=0f); // node 9
add(%2, %3) // node 10
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
(fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
add(%2, %3)
})(%a, %b)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [5, 6, 7, 9, 10]})
def test_diamond_multi_output():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
%2 = nn.relu(%1); // node 7
%3 = nn.leaky_relu(%0, alpha=0f); // node 9
add(%2, %3)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%4 = (fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
(%2, %3)
})(%a, %b);
%5 = %4.0;
%6 = %4.1;
add(%5, %6)
}
"""
)
run(input(), expected(), 2, False, "foo", {"": [5, 6, 7, 9]})
def test_with_tap():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
add(%1, %0)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%2 = (fn (%x, %y, Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
(%0, %1)
})(%a, %b);
%3 = %2.1;
%4 = %2.0;
add(%3, %4)
}
"""
)
# No rewrite since has tap
run(input(), input(), 2, False, "foo", {"": [5, 6]})
# Rewrite
run(input(), expected(), 2, True, "foo", {"": [5, 6]})
def test_no_cycles():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
%0 = add(%a, %b); // node 3
%1 = add(%0, %b);
add(%1, %b) // node 5
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
(fn(%x, %y, Compiler="foo") {
%0 = add(%x, %y);
%1 = add(%0, %y);
add(%1, %y)
})(%a, %b)
}
"""
)
# No rewrite since would create cycle
run(input(), input(), 2, False, "foo", {"": [3, 5]})
# No cycle
run(input(), expected(), 2, False, "foo", {"": [3, 4, 5]})
def test_labels_direct_connection():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a); // node 3
%1 = nn.relu(%0); // node 4
%2 = nn.relu(%1); // node 5
%3 = nn.relu(%1); // node 6
%4 = add(%2, %3); // node 7
%5 = nn.relu(%4); // node 8
%6 = nn.relu(%4); // node 9
%7 = add(%5, %6); // node 10
nn.relu(%7) // node 11
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
(fn(%aa: Tensor[(5, 7), float32], Compiler="foo") {
%0 = nn.relu(%aa);
%4 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
add(%2, %3)
})(%0);
%7 = (fn(%z, Composite="b") {
%5 = nn.relu(%z);
%6 = nn.relu(%z);
add(%5, %6)
})(%4);
nn.relu(%7)
})(%a)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [3, 11], "a": [4, 5, 6, 7], "b": [8, 9, 10]})
def test_labels_nested_tap():
def input():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a); // node 3
%1 = nn.relu(%0); // node 4
%2 = nn.relu(%1); // node 5
%3 = nn.relu(%1); // node 6
%4 = add(%2, %3); // node 7
%5 = nn.relu(%4); // node 8
%6 = nn.relu(%4); // node 9
%7 = add(%5, %6); // node 10
add(%2, %7) // node 11
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a);
%9 = (fn(%x: Tensor[(5, 7), float32], Compiler="foo") {
%5 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
%4 = add(%2, %3);
(%2, %4)
})(%x);
%8 = (fn(%z, Composite="b") {
%6 = nn.relu(%z);
%7 = nn.relu(%z);
add(%6, %7)
})(%5.1);
(%5.0, %8)
})(%0);
add(%9.0, %9.1)
}
"""
)
run(input(), expected(), 2, True, "foo", {"a": [4, 5, 6, 7], "b": [8, 9, 10]})
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Support level10 operator test cases.
"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
import random
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
def verify_more_dynamic_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
reshape_shape = relay.Var("shape", relay.ty.TensorType((len(x_shape),), shape_type))
broadcast_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType((np.prod(x_shape),), dtype))
r = relay.reshape(x, reshape_shape)
z = relay.broadcast_to(r, broadcast_shape)
func = relay.Function([x, reshape_shape, broadcast_shape], z)
x = np.random.uniform(size=np.prod(x_shape)).astype(dtype)
ref_res = np.broadcast_to(np.reshape(x, x_shape), out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(
x, np.array(x_shape).astype(shape_type), np.array(out_shape).astype(shape_type)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_more_dynamic_broadcast_to((4, 3), (3, 4, 3))
def verify_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
ref_res = np.broadcast_to(x, out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(x, np.array(out_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_broadcast_to((1,), (1, 1, 1))
verify_broadcast_to((1, 1), (4, 1, 1))
verify_broadcast_to((4, 1), (1, 4, 3))
@tvm.testing.uses_gpu
def test_dyn_broadcast_to(executor_kind):
dtype = "uint8"
rank = 3
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x_shape = (1,)
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
dyn_shape = (1,) * rank
ref_res = np.broadcast_to(x, dyn_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate(
func
)(x, np.array(dyn_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dyn_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
depth_var = relay.var("depth", relay.TensorType((), "int32"))
on_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth_var, axis, dtype)
func = relay.Function([indices, depth_var], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
out_relay = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(indices_np, np.array(depth).astype("int32"))
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 dynamic operator test cases.
"""
import numpy as np
import tvm
from tvm import relay
from tvm import te
from tvm.relay.testing import enabled_targets
import random
from test_dynamic_op_level3 import verify_func
import tvm.topi.testing
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_dyn_upsampling_run(executor_kind):
def verify_upsampling(dshape, scale_h, scale_w, layout, method, align_corners=False):
if layout == "NCHW":
(n, c, h, w) = dshape
x_data = np.random.uniform(size=(n, c, h, w)).astype("float32")
elif layout == "NHWC":
(n, h, w, c) = dshape
x_data = np.random.uniform(size=(n, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"align_corners" if align_corners else "asymmetric",
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling(
x, scale_h_var, scale_w_var, method=method, layout=layout, align_corners=align_corners
)
zz = run_infer_type(z)
func = relay.Function([x, scale_h_var, scale_w_var], z)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data, np.array(scale_h).astype("float32"), np.array(scale_w).astype("float32")
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling((1, 16, 32, 32), 3, 2.0, "NCHW", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 5, 2.0, "NCHW", "bilinear", True)
verify_upsampling((1, 16, 32, 32), 2.0, 6, "NHWC", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 2.0, 2.0, "NHWC", "bilinear", True)
# tests upsampling type inference with scale_h passed in as a constant and scale_w as a variable
@tvm.testing.uses_gpu
def test_dyn_upsampling_infer_type_const():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data = relay.var("data", relay.TensorType((n, c, h, w), "int8"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling(data, 2.0, scale_w)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, relay.Any(), relay.Any()), "int8")
@tvm.testing.uses_gpu
def test_dyn_upsampling3d_run(executor_kind):
def verify_upsampling3d(
dshape, scale_d, scale_h, scale_w, layout, method, coord_trans="asymmetric"
):
if layout == "NCDHW":
(n, c, d, h, w) = dshape
x_data = np.random.uniform(size=(n, c, d, h, w)).astype("float32")
elif layout == "NDHWC":
(n, d, h, w, c) = dshape
x_data = np.random.uniform(size=(n, d, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
coord_trans,
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_d_var = relay.var("scale_d", relay.TensorType((), "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(
x,
scale_d_var,
scale_h_var,
scale_w_var,
method=method,
layout=layout,
coordinate_transformation_mode=coord_trans,
)
zz = run_infer_type(z)
func = relay.Function([x, scale_d_var, scale_h_var, scale_w_var], z)
for target, dev in enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data,
np.array(scale_d).astype("float32"),
np.array(scale_h).astype("float32"),
np.array(scale_w).astype("float32"),
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 3.0, 4.0, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 5.0, 1.0, "NCDHW", "trilinear", "align_corners")
verify_upsampling3d((1, 20, 3, 4, 16), 2.0, 2.0, 2.0, "NDHWC", "nearest_neighbor")
verify_upsampling3d((1, 8, 4, 16, 15), 2.0, 2.0, 2.0, "NDHWC", "trilinear", "align_corners")
# tests upsampling type inference with scale_h passed in as a constant and scale_w as a variable
def test_dyn_upsampling3d_infer_type_const():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
data = relay.var("data", relay.TensorType((n, c, d, h, w), "int8"))
scale_d = relay.Var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(data, scale_d, 2.0, scale_w, layout="NCDHW", method="trilinear")
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(
(n, c, relay.Any(), relay.Any(), relay.Any()), "int8"
)
@tvm.testing.uses_gpu
def test_dyn_pad(executor_kind):
def verify_pad(dshape, pad_width, pad_val, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
pad_val_var = relay.var("pad_val_var", relay.TensorType((), dtype))
y = relay.nn.pad(x, pad_width_var, pad_val_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var, pad_val_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width, "constant", constant_values=(((pad_val,) * 2),) * ndim)
pad_width = np.array(pad_width).astype("int64")
verify_func(
executor_kind, func, [data, pad_width, np.array(pad_val).astype(dtype)], ref_res
)
def verify_pad_default_fill(dshape, pad_width, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
y = relay.nn.pad(x, pad_width_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width)
pad_width = np.array(pad_width).astype("int64")
verify_func(executor_kind, func, [data, pad_width], ref_res)
verify_pad((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), 2.0, "int32")
verify_pad((2, 7), ((1, 4), (2, 2)), 4.0, "float64")
verify_pad_default_fill((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), "float64")
verify_pad_default_fill((2, 7), ((1, 4), (2, 2)), "int32")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.relay.testing import check_grad, run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
def verify_func(executor_kind, func, data, ref_res, target_device=tvm.testing.enabled_targets()):
assert isinstance(data, list)
for target, dev in target_device:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
relay.backend.te_compiler.get().clear()
def check_on_vm(target, dev, args, expected_result, mod):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
"""
rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.numpy())
@tvm.testing.uses_gpu
def test_dyn_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType((len(newshape),), "int64"))
z = relay.reshape(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
x_data = np.ones(shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(
run_infer_type(func),
inputs=[x_data, np.array(newshape).astype("int64")],
test_inputs=[x_data],
eps=1e-3,
)
verify_func(executor_kind, func, [x_data, np.array(newshape).astype("int64")], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
@tvm.testing.uses_gpu
def test_dyn_shape_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(run_infer_type(func), inputs=[x_data, y_data], eps=1e-3)
verify_func(executor_kind, func, [x_data, y_data], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
def test_squeeze(executor_kind):
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
assert axis is not None
np_axis = tuple(axis)
axis = relay.var("axis", relay.TensorType([len(axis)], "int64"))
squeeze = relay.squeeze(x, axis=axis)
func = relay.Function([x, axis], squeeze)
x_data = np.random.random_sample(shape).astype(dtype)
ref_res = np.squeeze(x_data, axis=np_axis)
verify_func(executor_kind, func, [x_data, np.array(np_axis).astype("int64")], ref_res)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
@tvm.testing.uses_gpu
def test_dyn_expand_dims(executor_kind):
def verify_expand_dims(
dshape, dtype, oshape, axis, num_newaxis, target_device=tvm.testing.enabled_targets()
):
# Use 1 to avoid issues with invalid buffer sizes
x = relay.Var("x", relay.TensorType(dshape, dtype))
y = relay.var("axis", shape=[], dtype="int64")
z = relay.expand_dims(x, axis=y, num_newaxis=num_newaxis)
func = relay.Function([x, y], z)
data_np = np.random.uniform(size=dshape).astype(dtype)
axis_np = np.array(axis).astype("int64")
ref_res = data_np.reshape(oshape)
verify_func(executor_kind, func, [data_np, axis_np], ref_res, target_device=target_device)
for dtype in ["float16", "float32"]:
verify_expand_dims((2, 2), dtype, (2, 2, 1), 2, 1)
verify_expand_dims((2, 2), dtype, (2, 1, 2), 1, 1)
verify_expand_dims((2, 2), dtype, (1, 2, 2), 0, 1)
# TODO (AndrewZhaoLuo): investigate why runtimes in non-llvm are extremely slow
# for multiple new axis
llvm_target_only = [x for x in tvm.testing.enabled_targets() if "llvm" in x]
verify_expand_dims((2, 2), dtype, (2, 2, 1, 1), 2, 2, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (2, 1, 1, 1, 2), 1, 3, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (1, 1, 1, 1, 2, 2), 0, 4, target_device=llvm_target_only)
@tvm.testing.uses_gpu
def test_dyn_tile(executor_kind):
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
r = relay.var("reps", relay.TensorType((len(reps),), "float32"))
z = relay.tile(x, r)
func = relay.Function([x, r], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
reps_data = np.array(reps).astype("float32")
verify_func(executor_kind, func, [x_data, np.array(reps).astype("float32")], ref_res)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_dyn_zeros_ones(executor_kind):
def verify_zeros_ones(shape, dtype):
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
rank = len(shape)
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), "int64"))
y = op(dyn_shape, dtype)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([dyn_shape], y)
ref_res = ref(shape, dtype)
verify_func(
executor_kind, func, [np.array(shape).astype("int64")], ref_res.astype("int64")
)
verify_zeros_ones((1, 3), "int64")
verify_zeros_ones((8, 9, 1, 2), "float32")
@tvm.testing.uses_gpu
def test_dyn_full(executor_kind):
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
rank = len(src_shape)
dyn_src_shape = relay.var("dyn_scr_shape", relay.ty.TensorType((rank,), "int64"))
z = relay.full(x, dyn_src_shape, dtype)
func = relay.Function([x, dyn_src_shape], z)
ref_res = np.full(src_shape, fill_value).astype(dtype)
verify_func(
executor_kind,
func,
[np.array(fill_value).astype(dtype), np.array(src_shape).astype("int64")],
ref_res,
)
verify_full(4, (1, 3, 4, 4), "int32")
verify_full(4, (1, 3, 4, 4), "int64")
verify_full(4.0, (2, 50), "float32")
@tvm.testing.uses_gpu
def test_dyn_sparse_to_dense(executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
output_shape_data = np.array(output_shape)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
output_shape_var = relay.var(
"output_shape", relay.TensorType(output_shape_data.shape, str(output_shape_data.dtype))
)
if default_value is None:
args = [a, b, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b, c)
zz = run_infer_type(d)
assert len(zz.checked_type.shape) == len(output_shape)
func = relay.Function(args, d)
if default_value is None:
arguments = [sparse_indices_data, sparse_values_data, output_shape_data]
else:
arguments = [
sparse_indices_data,
sparse_values_data,
default_value_data,
output_shape_data,
]
verify_func(executor_kind, func, arguments, xpected)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
# default value not specified
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
@pytest.mark.parametrize(
"sparse_indices, sparse_values, dense_shape, default_value",
[
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([10], dtype=np.int64),
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
np.array([5], dtype=np.int64),
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
np.array([100], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("dtype", [np.int64, np.int32])
@pytest.mark.parametrize("use_dyn", [True, False])
def test_sparse_fill_empty_rows(
sparse_indices, sparse_values, dense_shape, default_value, dtype, use_dyn, executor_kind
):
def ref_sparse_fill_empty_rows(
sparse_indices: np.ndarray,
sparse_values: np.ndarray,
dense_shape: np.ndarray,
default_value: np.ndarray,
) -> None:
"""
This function calculates the expected output of sparse_fill_empty_rows operator given the
inputs.
"""
def check_add_rows(current_idx, limit_idx):
while current_idx < limit_idx:
new_sparse_indices.append([current_idx] + [0] * (num_cols - 1))
new_sparse_values.append(default_value[0])
empty_row_indicator[current_idx] = True
current_idx += 1
return current_idx
current_idx = 0
new_sparse_indices = []
new_sparse_values = []
empty_row_indicator = [False for _ in range(dense_shape[0])]
num_cols = sparse_indices.shape[1]
for sparse_row, sparse_value in zip(sparse_indices, sparse_values):
limit_idx = sparse_row[0]
current_idx = check_add_rows(current_idx, limit_idx)
new_sparse_indices.append(list(sparse_row))
new_sparse_values.append(sparse_value)
current_idx = limit_idx + 1
check_add_rows(current_idx, dense_shape[0])
return new_sparse_indices, new_sparse_values, empty_row_indicator
def verify_sparse_fill_empty_rows(
sparse_indices_np: np.ndarray,
sparse_values_np: np.ndarray,
dense_shape_np: np.ndarray,
default_value_np: np.ndarray,
) -> None:
"""
This function verifies the relay output of sparse_fill_empty_rows with its expected output.
"""
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
sparse_values = relay.var(
"sparse_values",
shape=[relay.Any()],
dtype=str(sparse_values_np.dtype),
)
dense_shape = relay.var(
"dense_shape",
shape=[relay.Any()],
dtype=str(dense_shape_np.dtype),
)
default_value = relay.var(
"default_value",
shape=[relay.Any()],
dtype=str(default_value_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
sparse_values = relay.var(
"sparse_values",
relay.TensorType(sparse_values_np.shape, str(sparse_values_np.dtype)),
)
dense_shape = relay.var(
"dense_shape",
relay.TensorType(dense_shape_np.shape, str(dense_shape_np.dtype)),
)
default_value = relay.var(
"default_value",
relay.TensorType(default_value_np.shape, str(default_value_np.dtype)),
)
z = relay.sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value)
func = relay.Function([sparse_indices, sparse_values, dense_shape, default_value], z)
ref_res = ref_sparse_fill_empty_rows(
sparse_indices_np,
sparse_values_np,
dense_shape_np,
default_value_np,
)
(
new_sparse_indices_infer_type,
new_sparse_values_infer_type,
empty_row_indicator_infer_type,
) = run_infer_type(z)
assert new_sparse_indices_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert new_sparse_values_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert empty_row_indicator_infer_type.checked_type.dtype == "bool"
verify_func(
executor_kind,
func,
[sparse_indices_np, sparse_values_np, dense_shape_np, default_value_np],
ref_res,
[("llvm", tvm.cpu())],
)
verify_sparse_fill_empty_rows(
sparse_indices.astype(dtype),
sparse_values.astype(dtype),
dense_shape.astype(dtype),
default_value.astype(dtype),
)
def test_dyn_copy():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int64] {
copy(%x)
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data
check_on_vm(target, dev, [x_data], expected, mod)
def test_dyn_copy_scalar():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: int32, %y: Tensor[(?), int32]) -> Tensor[(?), int32] {
%0 = copy(%x);
%1 = expand_dims(%0, axis=0);
%2 = (%y, %1);
concatenate(%2)
}
"""
)
x_data = 3
y_data = np.random.rand(7).astype("int32")
expected = np.concatenate((y_data, np.expand_dims(x_data, axis=0)))
check_on_vm(target, dev, [x_data, y_data], expected, mod)
def test_dyn_cast():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int32] {
cast(%x, dtype="int32")
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data.astype("int32")
check_on_vm(target, dev, [x_data], expected, mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level4.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
def verify(dshape, begin, end, strides, slice_mode="end", test_ref=True, dtype="int32"):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
slice_dim = len(begin)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)[:slice_dim]
if strides:
if len(strides) == 1:
strides = strides * slice_dim
else:
strides = [1] * slice_dim
num_static_axes = len(dshape) - len(begin)
# target numpy result
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(x_data, begin, end, strides, slice_mode)
data = [x_data, np.array(begin, dtype=dtype), np.array(end, dtype=dtype)]
begin = relay.var("begin", shape=[len(begin)], dtype=dtype)
end = relay.var("end", shape=[len(end)], dtype=dtype)
inputs = [x, begin, end]
if strides:
data.append(np.array(strides, dtype=dtype))
strides = relay.var("strides", shape=[len(strides)], dtype=dtype)
inputs.append(strides)
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
else:
z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)
func = relay.Function(inputs, z)
func = run_infer_type(func)
if num_static_axes > 0:
oshape = run_infer_type(z).checked_type.shape
assert tuple(oshape[-num_static_axes:]) == dshape[-num_static_axes:]
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
*data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify((20, 10, 5), [20, 10, 4], [0, 0, 1], [-1, -3, -2])
verify((3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], slice_mode="size", test_ref=False)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], slice_mode="size", test_ref=True)
# Slicing along first few axes, where the rest of axes remain static
verify((3, 4, 3), [0], [2], None)
verify((3, 4, 3), [1], [4], [2])
verify((3, 4, 3), [1, 0], [4, 2], [2, 1])
if __name__ == "__main__":
test_dynamic_strided_slice()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level5.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level5 operator test cases.
"""
import math
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
def test_resize2d_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
size = relay.var("size", relay.TensorType((2,), "int8"))
z = relay.image.resize2d(x, size)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, relay.Any(), relay.Any()), "int8")
@tvm.testing.uses_gpu
def test_resize2d(executor_kind):
def verify_resize2d(dshape, scale, method, layout):
if layout == "NHWC":
size = (dshape[1] * scale, dshape[2] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale)
size = np.array(size).astype("int64")
x_data = np.random.uniform(size=dshape).astype("float32")
x = relay.var("x", relay.TensorType(dshape, "float32"))
size_var = relay.var("size", relay.TensorType((2,), "int64"))
coord_trans = "asymmetric" if method == "nearest_neighbor" else "align_corners"
z = relay.image.resize2d(
x, size_var, None, layout, method, coordinate_transformation_mode=coord_trans
)
zz = run_infer_type(z)
func = relay.Function([x, size_var], z)
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, method, coord_trans
)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(x_data, size)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
for method in ["linear", "nearest_neighbor"]:
for layout in ["NCHW", "NHWC"]:
verify_resize2d((1, 4, 4, 4), 2, method, layout)
verify_resize2d((2, 8, 17, 20), 7, method, layout)
if __name__ == "__main__":
test_resize2d_infer_type()
test_resize2d()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/dyn/test_dynamic_op_level6.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level6 operator test cases.
"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_dynamic_topk(executor_kind):
def verify_topk(k, axis, ret_type, is_ascend, dtype):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, "float32"))
k_var = relay.var("x", relay.TensorType((1,), "float32"))
out = relay.topk(x, k_var, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x, k_var], out)
np_data = np.random.uniform(size=shape).astype("float32")
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(np_data, np.array([k]).astype("float32"))
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
if __name__ == "__main__":
test_dynamic_topk()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/op/annotation/test_annotation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotations."""
import tvm
import tvm.testing
from tvm import relay
import pytest
def test_on_device_via_string():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.virtual_device.virtual_device_id == 0
assert call.attrs.virtual_device.target is None
assert call.attrs.virtual_device.memory_scope == ""
assert call.attrs.constrain_body
assert not call.attrs.constrain_result
def test_on_device_via_device():
x = relay.Var("x")
call = relay.annotation.on_device(x, tvm.device("cpu"))
assert call.attrs.virtual_device.device_type_int == 1 # ie kDLCPU
def test_on_device_invalid_device():
x = relay.Var("x")
pytest.raises(ValueError, lambda: relay.annotation.on_device(x, "bogus"))
def test_on_device_fixed():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=True)
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.constrain_body
assert call.attrs.constrain_result
def test_on_device_free():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=False, constrain_body=False)
assert call.attrs.virtual_device.device_type_int == -1 # ie kInvalidDeviceType
assert not call.attrs.constrain_body
assert not call.attrs.constrain_result
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/op/test_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for tensor helpers."""
import tvm
import tvm.testing
from tvm import relay
import pytest
def test_device_copy_via_string():
x = relay.var("x")
call = relay.op.device_copy(x, "cuda", "cpu")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.src_virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.src_virtual_device.virtual_device_id == 0
assert call.attrs.src_virtual_device.target is None
assert call.attrs.src_virtual_device.memory_scope == ""
assert call.attrs.dst_virtual_device.device_type_int == 1 # ie kDLCPU
assert call.attrs.dst_virtual_device.virtual_device_id == 0
assert call.attrs.dst_virtual_device.target is None
assert call.attrs.dst_virtual_device.memory_scope == ""
def test_device_copy_via_device():
x = relay.var("x")
call = relay.op.device_copy(x, tvm.device("cuda"), tvm.device("cpu"))
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.src_virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.dst_virtual_device.device_type_int == 1 # ie kDLCPU
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/test_conv2d_nchw_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
import pytest
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad(target, dtype):
input_shape = (1, 32, 42, 42)
filter_shape = (96, 32, 3, 3)
bias_shape = (1, 96, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad_pass(target, dtype):
input_shape = (1, 32, 40, 40)
filter_shape = (96, 32, 2, 2)
bias_shape = (1, 96, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_35_35_strides(target, dtype):
input_shape = (1, 48, 35, 35)
filter_shape = (64, 48, 5, 5)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[2, 2, 2, 2],
strides=[1, 1],
out_dtype=dtype,
channels=64,
kernel_size=(5, 5),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_resnet50_v2_nchw_3c(target, dtype):
input_shape = (1, 3, 224, 224)
filter_shape = (64, 3, 7, 7)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(7, 7),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_nchw_3c(target, dtype):
input_shape = (1, 3, 299, 299)
filter_shape = (64, 3, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_1x1_16c16spatial(target, dtype):
input_shape = (1, 16, 256, 256)
filter_shape = (32, 16, 4, 4)
bias_shape = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4_16c16pad(target, dtype):
input_shape = (1, 32, 256, 256)
filter_shape = (32, 32, 4, 4)
bias_shape = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4x4_16c16pad(target, dtype):
input_shape = (1, 32, 256, 256)
filter_shape = (4, 32, 4, 4)
bias_shape = (1, 4, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_yolov3_v2_nchw_3c(target, dtype):
input_shape = (1, 1024, 13, 13)
filter_shape = (255, 1024, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=255,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(mod, params, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_vgg16_winograd_4d(target, dtype):
input_shape = (1, 512, 28, 28)
filter_shape = (512, 512, 3, 3)
bias_shape = (1, 512, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
channels=512,
kernel_size=[3, 3],
out_dtype=dtype,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 512, 28, 28], "{dtype}"], ["TENSOR", [512, 512, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_conv(target, dtype):
input_shape = (1, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape3 = (8, 4, 3, 3)
bias_shape3 = (8,)
B3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
D = relay.nn.conv2d(
A, B3, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], out_dtype=dtype
)
filter_shape4 = (8, 8, 3, 3)
bias_shape4 = (8,)
B4 = relay.var("weight4", shape=filter_shape4, dtype=dtype)
D = relay.nn.conv2d(
D, B4, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], out_dtype=dtype
)
mod = relay.Function([A, B3, B4], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data3 = np.zeros(filter_shape3).astype(dtype)
bias_data3 = np.zeros(bias_shape3).astype(dtype)
filter_data4 = np.zeros(filter_shape4).astype(dtype)
bias_data4 = np.zeros(bias_shape4).astype(dtype)
initializer("weight", filter_data3)
initializer("bias", bias_data3)
initializer("weight", filter_data4)
initializer("bias", bias_data4)
params1 = {
"weight3": tvm.nd.array(filter_data3),
"weight4": tvm.nd.array(filter_data4),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 4, 3, 3], "{dtype}"], ["TENSOR", [8, 4, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_residual_block(target, dtype):
"""
- some kind of residual block followed by convolution to have texture after residual block
- scalar data type verification which should be mapped to global memory scope
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- to get textures as output
/ \
conv2d (2) |
\ /
add <- add should be fused into conv2d (2)
multiply to scalar <- buffer to the input of multiply scalar value
relu
| <- texture in intermediate tensor
conv2d (3)
relu
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
D = relay.op.add(conv1, B1)
D = relay.op.nn.relu(D)
conv2 = relay.nn.conv2d(
D,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
D = relay.op.add(conv2, D)
D = D * relay.const(0.15, dtype)
D = relay.op.nn.relu(D)
conv3 = relay.nn.conv2d(
D,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
D = relay.op.nn.relu(conv3)
mod = relay.Function([A, W1, B1, W2, W3], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
if dtype == "float16":
static_memory_scope = [
"global",
"global.texture",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global",
"global.texture",
"global.texture-weight",
"",
"",
]
else:
static_memory_scope = [
"global",
"global.texture",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"",
"",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_concat(target, dtype):
"""
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- to get textures as output
/ \
conv2d (2) conv2d (3)
\ / <- concat does not support textures, there we should have buffers
concatenation
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (96, 32, 2, 2)
filter_shape2 = (32, 96, 2, 2)
filter_shape3 = (5, 96, 2, 2)
bias_shape1 = (1, 96, 1, 1)
bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
B2 = relay.var("bias2", shape=bias_shape2, dtype=dtype)
# C = relay.nn.relu(A)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv1, B1)
D = relay.op.nn.relu(D)
conv2 = relay.nn.conv2d(
D,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv2 = relay.op.add(conv2, B2)
conv2 = relay.op.nn.relu(conv2)
conv3 = relay.nn.conv2d(
D,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=5,
kernel_size=(2, 2),
)
t = relay.Tuple([conv2, conv3])
c = relay.op.concatenate(t, axis=1)
mod = relay.Function([A, W1, B1, W2, B2, W3], c)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
bias_data2 = np.zeros(bias_shape2).astype(dtype)
initializer("weight", filter_data2)
initializer("bias", bias_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"bias2": tvm.nd.array(bias_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"",
"global",
"global.texture-weight",
"global.texture-weight",
"global",
"global.texture-weight",
"global.texture-weight",
"",
"",
"",
"",
"",
]
static_memory_scope = []
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_pooling_branching_texture_params(target, dtype):
"""
Verification of the pooling and many branches having textures
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (0) <- to get textures
| <- textures
pooling
/ \ \ <- textures
conv2d (1) conv2d (2) conv2d (3)
\ / |
add | <- to have the only one output, will be fused
\ /
add <- to have the only one output, will be fused
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape0 = (32, 32, 1, 1)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
# bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W0 = relay.var("weight0", shape=filter_shape0, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv0 = relay.nn.conv2d(
A,
W0,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
pool = relay.nn.avg_pool2d(conv0, pool_size=(2, 2), strides=(2, 2))
conv1 = relay.nn.conv2d(
pool,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv1 = relay.op.add(conv1, B1)
conv1 = relay.op.nn.relu(conv1)
conv2 = relay.nn.conv2d(
pool,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
pool,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 1, 1, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv3 = relay.op.nn.relu(conv3)
res = relay.op.add(conv1, conv2)
res = relay.op.add(res, conv3)
mod = relay.Function([A, W0, W1, B1, W2, W3], res)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data0 = np.zeros(filter_shape0).astype(dtype)
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight0": tvm.nd.array(filter_data0),
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"global",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture",
"global.texture-weight",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global.texture",
"",
"",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_branching_texture_params(target, dtype):
"""
Verification of passing texture to several consumers markup of relay variables in
primary functions + on_device
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (0) <- to get textures
/ \ \ <- here should be textures and textures in params
conv2d (1) conv2d (2) conv2d (3)
\ / |
add | <- to have the only one output
\ /
add <- to have the only one output
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape0 = (32, 32, 1, 1)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
# bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W0 = relay.var("weight0", shape=filter_shape0, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv0 = relay.nn.conv2d(
A,
W0,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv1 = relay.nn.conv2d(
conv0,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv1 = relay.op.add(conv1, B1)
conv1 = relay.op.nn.relu(conv1)
conv2 = relay.nn.conv2d(
conv0,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
conv0,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 1, 1, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv3 = relay.op.nn.relu(conv3)
res = relay.op.add(conv1, conv2)
res = relay.op.add(res, conv3)
mod = relay.Function([A, W0, W1, B1, W2, W3], res)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data0 = np.zeros(filter_shape0).astype(dtype)
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight0": tvm.nd.array(filter_data0),
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"global",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global.texture",
"",
"",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_different_lowering_same_op(target, dtype):
"""
Use case for verification of caching compiled functions
Three convolutions following by each other in this case should be
compiled in three different entities and lowered differently because
they are differ in input param memory scopes and in output memory scope
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- buffer as input tensor and texture as output
| <- texture
conv2d (2) <- texture as input and texture as output
| <- texture
conv2d (3) <- texture as input and buffer as output
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (32, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv2 = relay.nn.conv2d(
conv1,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
conv2,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
mod = relay.Function([A, W1], conv3)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
params1 = {
"weight1": tvm.nd.array(filter_data1),
}
static_memory_scope = [
"global",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture",
"",
"",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_non_rect(target, dtype):
input_shape = (1, 771, 36, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape = (128, 771, 3, 3)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A, B, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], out_dtype=dtype
)
mod = relay.Function([A, B], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256 -texture_spatial_limit=16384 -thread_warp_size=1", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 771, 36, 64], "{dtype}"], ["TENSOR", [128, 771, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 5399, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 16], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 8]], ["tile_rc", "sp", [-1, 193]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_injective_nwo_inputs1(target, dtype):
"""
Use case for verification of stability of annotation primary functions
having several ops accepting data outside of Primary function
The visiting of ops during traversing of graph inside primary function
can depend on order of relay graph creation. Thus the annotation mechanism
should be reliable for graph traversal order
The current decision if Prim Function support textures or not depend on
*any* op accepting input of the function and if op support textures
Input
/ \
layout_transform (NCHW->NCHW4c) |
| /
conv2d (1) /
| /
conv2d (2) mean /
/ \ / <- Primary function several head ops
(1)add (2)layout_transform |
| (NCHW4c->NCHW) |
| | \ /
| | (3) add
| | |
layout_transform \ /
(NCHW4c->NCHW) \ /
\ mul
\ /
add
This test verifies a case when the latest op which is visited is (3) and does not
support textures, but there is (1) supporting textures, thus the whole func will
support textures
"""
input_shape = (1, 4, 40, 40)
filter_shape1 = (4, 4, 3, 3)
filter_shape2 = (4, 4, 3, 3)
filter_shape3 = (4, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
conv2 = relay.nn.conv2d(
conv1,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
ad3 = relay.op.add(conv1, conv2)
ad1 = relay.op.add(mean, conv1)
ad2 = relay.op.multiply(ad1, conv2)
ad4 = relay.op.add(ad3, ad2)
mod = relay.Function([A, W1, W2], ad4)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data1)
initializer("weight", filter_data2)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"weight2": tvm.nd.array(filter_data2),
}
static_memory_scope = [
"global",
"global.texture",
"global.texture-nhwc",
"global.texture",
"global.texture-nhwc",
"global.texture",
"global",
"global",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_injective_nwo_inputs2(target, dtype):
"""
Use case for verification of stability of annotation primary functions
having several ops accepting data outside of Primary function
The visiting of ops during traversing of graph inside primary function
can depend on order of relay graph creation. Thus the annotation mechanism
should be reliable for graph traversal order
The current decision if Prim Function support textures or not depend on
*any* op accepting input of the function and if op support textures
Input
/ \
layout_transform (NCHW->NCHW4c) |
| /
conv2d (1) /
| /
conv2d (2) mean /
/ \ / <- Primary function several head ops
(1)add (2)layout_transform |
| (NCHW4c->NCHW) |
| | \ /
| | (3) add
| | |
layout_transform \ /
(NCHW4c->NCHW) \ /
\ mul
\ /
add
This test verifies a case when the latest op which is (1), it supports textures
an whole prim function is considered as a func working with textures
"""
input_shape = (1, 4, 40, 40)
filter_shape1 = (4, 4, 3, 3)
filter_shape2 = (4, 4, 3, 3)
filter_shape3 = (4, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
conv2 = relay.nn.conv2d(
conv1,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
ad3 = relay.op.add(conv1, conv2)
ad1 = relay.op.add(mean, conv1)
ad2 = relay.op.multiply(ad1, conv2)
ad4 = relay.op.add(ad2, ad3)
mod = relay.Function([A, W1, W2], ad4)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data1)
initializer("weight", filter_data2)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"weight2": tvm.nd.array(filter_data2),
}
static_memory_scope = [
"global",
"global.texture",
"global.texture-nhwc",
"global.texture",
"global",
"global.texture-nhwc",
"global.texture",
"global",
]
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, static_memory_scope)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/test_conv2d_nhwc_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
import pytest
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_257_257_32x1_1_32_16(target, dtype):
input_shape = (1, 257, 257, 32)
filter_shape = (1, 1, 32, 16)
bias_shape = (filter_shape[-1],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=(1, 1),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_257_257_32x1_1_32_16_with_padding(target, dtype):
input_shape = (1, 257, 257, 32)
filter_shape = (1, 1, 32, 16)
bias_shape = (filter_shape[-1],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=(1, 1),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4_35_35_32x3_3_144_16(target, dtype):
input_shape = (4, 35, 35, 32)
filter_shape = (3, 3, 32, 16)
bias_shape = (filter_shape[-1],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_513_513_3x3_3_3_32(target, dtype):
input_shape = (1, 513, 513, 3)
filter_shape = (3, 3, 3, 32)
bias_shape = (filter_shape[-1],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad(target, dtype):
input_shape = (1, 42, 42, 32)
filter_shape = (3, 3, 32, 96)
bias_shape = (1, 1, 1, 96)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad_pass(target, dtype):
input_shape = (1, 40, 40, 32)
filter_shape = (2, 2, 32, 96)
bias_shape = (1, 1, 1, 96)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_35_35_strides(target, dtype):
input_shape = (1, 35, 35, 48)
filter_shape = (5, 5, 48, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[2, 2, 2, 2],
strides=[1, 1],
out_dtype=dtype,
channels=64,
kernel_size=(5, 5),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_resnet50_v2_nhwc_3c(target, dtype):
input_shape = (1, 224, 224, 3)
filter_shape = (7, 7, 3, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(7, 7),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_nhwc_3c(target, dtype):
input_shape = (1, 299, 299, 3)
filter_shape = (3, 3, 3, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_1x1_16c16spatial(target, dtype):
input_shape = (1, 128, 128, 16)
filter_shape = (4, 4, 16, 32)
bias_shape = (1, 1, 1, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4_16c16pad(target, dtype):
input_shape = (1, 256, 256, 32)
filter_shape = (4, 4, 32, 32)
bias_shape = (1, 1, 1, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4x4_16c16pad(target, dtype):
input_shape = (1, 256, 256, 32)
filter_shape = (4, 4, 32, 4)
bias_shape = (1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_yolov3_v2_nhwc_3c(target, dtype):
input_shape = (1, 13, 13, 1024)
filter_shape = (1, 1, 1024, 255)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=255,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(mod, params, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_vgg16_winograd_4d(target, dtype):
input_shape = (1, 28, 28, 512)
filter_shape = (3, 3, 512, 512)
bias_shape = (1, 1, 1, 512)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=512,
kernel_size=[3, 3],
out_dtype=dtype,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 28, 28, 512], "{dtype}"], ["TENSOR", [3, 3, 512, 512], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_conv(target, dtype):
input_shape = (1, 3, 3, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape3 = (3, 3, 4, 8)
bias_shape3 = (1, 1, 1, 8)
B3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
D = relay.nn.conv2d(
A,
B3,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=8,
kernel_size=[3, 3],
out_dtype=dtype,
)
filter_shape4 = (3, 3, 8, 8)
bias_shape4 = (1, 1, 1, 8)
B4 = relay.var("weight4", shape=filter_shape4, dtype=dtype)
D = relay.nn.conv2d(
D,
B4,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=8,
kernel_size=[3, 3],
out_dtype=dtype,
)
mod = relay.Function([A, B3, B4], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data3 = np.zeros(filter_shape3).astype(dtype)
bias_data3 = np.zeros(bias_shape3).astype(dtype)
filter_data4 = np.zeros(filter_shape4).astype(dtype)
bias_data4 = np.zeros(bias_shape4).astype(dtype)
initializer("weight", filter_data3)
initializer("bias", bias_data3)
initializer("weight", filter_data4)
initializer("bias", bias_data4)
params1 = {
"weight3": tvm.nd.array(filter_data3),
"weight4": tvm.nd.array(filter_data4),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 3, 3, 4], "{dtype}"], ["TENSOR", [3, 3, 4, 8], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_non_rect(target, dtype):
input_shape = (1, 36, 64, 771)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape = (3, 3, 771, 128)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=128,
kernel_size=[3, 3],
out_dtype=dtype,
)
mod = relay.Function([A, B], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256 -texture_spatial_limit=16384 -thread_warp_size=1", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 36, 64, 771], "{dtype}"], ["TENSOR", [3, 3, 771, 128], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 5399, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 16], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 8]], ["tile_rc", "sp", [-1, 193]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
mod, params1, {"data": input_shape}, dtype, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/test_depthwise_conv2d_nchw_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from utils.adreno_utils import gpu_preprocess, build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_bias_nchwc(target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_nchwc(target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target, [], gpu_preprocess)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_bias_nchw(target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_repack_bias_nchw(target, dtype):
input_shape = (1, 63, 112, 112)
filter_shape = (63, 1, 3, 3)
bias_shape = (1, 63, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=63,
groups=63,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/test_depthwise_conv2d_nhwc_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from utils.adreno_utils import build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1(target, dtype):
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_4_35_35_576x3_3_576_1(target, dtype):
input_shape = (4, 35, 35, 576)
filter_shape = (3, 3, 576, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1_with_padding(target, dtype):
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_1_513_513_7x3_3_7_1(target, dtype):
input_shape = (1, 513, 513, 7)
filter_shape = (3, 3, 7, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_1_513_513_3x3_3_3_1(target, dtype):
input_shape = (1, 513, 513, 3)
filter_shape = (3, 3, 3, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(mod, params1, {"data": input_shape}, dtype, target)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/test_reduction_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean(target, dtype):
# NCHW
input_shape = (1, 3, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(mod, {}, {"data": input_shape}, dtype, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_argmax(target, dtype):
# NCHW
input_shape = (1, 3, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.argmax(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(mod, {}, {"data": input_shape}, dtype, target)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/opencl_texture/utils/adreno_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils for adreno compute/schedules"""
import os
import tvm
import numpy as np
from tvm import relay
from tvm import autotvm
from tvm.relay import testing
from tvm.relay.transform import recast
from tvm.contrib import graph_runtime
import json
def get_cpu_reference(mod, params1, input_shape, inputs):
mod_fp32 = recast(mod, "float32", "float32", ops=["nn.conv2d", "add", "nn.relu"])
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod_fp32, "llvm", params=params1)
ctx = tvm.cpu()
m = graph_runtime.create(graph, lib, ctx)
if isinstance(input_shape, dict):
for key in input_shape:
m.set_input(key, inputs[-1])
else:
m.set_input("data", inputs[-1])
m.set_input(**params)
m.run()
return [
m.get_output(0).asnumpy(),
]
# build module run with opencl and cpu, compare results
def build_run_compare(
tvm_mod,
params1,
input_shape,
dtype="float32",
target="llvm",
static_mem_scopes=[],
gpu_preprocess=None,
stat_file=None,
):
if "TVM_TRACKER_HOST" in os.environ and "TVM_TRACKER_PORT" in os.environ:
rpc_tracker_host = os.environ["TVM_TRACKER_HOST"]
rpc_tracker_port = os.environ["TVM_TRACKER_PORT"]
run_on_host = 0
target_host = "llvm -mtriple=arm64-linux-android"
rpc_tracker_port = int(rpc_tracker_port)
else:
run_on_host = 1
target_host = "llvm"
if gpu_preprocess:
tvm_mod_nchwc = gpu_preprocess(tvm_mod)
else:
tvm_mod_nchwc = tvm_mod
if stat_file is not None:
with autotvm.apply_history_best(stat_file):
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
tvm_mod_nchwc, target_host=target_host, target=target, params=params1
)
else:
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
tvm_mod_nchwc, target_host=target_host, target=target, params=params1
)
# verification that storage_scope has expected textures scopes
graph_json = json.loads(graph)
if "storage_scope" in graph_json["attrs"]:
assert (
len(static_mem_scopes) == len(graph_json["attrs"]["storage_scope"][1])
or len(static_mem_scopes) == 0
)
else:
assert len(static_mem_scopes) == 0
for i in range(0, len(static_mem_scopes)):
assert static_mem_scopes[i] == graph_json["attrs"]["storage_scope"][1][i]
if run_on_host:
ctx = tvm.opencl()
m = graph_runtime.create(graph, lib, ctx)
else:
from tvm import rpc
from tvm.contrib import utils, ndk
rpc_key = "android"
tracker = rpc.connect_tracker(rpc_tracker_host, rpc_tracker_port)
remote = tracker.request(rpc_key, priority=0, session_timeout=600)
temp = utils.tempdir()
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
ctx = remote.cl(0)
lib.export_library(dso_binary_path, ndk.create_shared)
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
m = graph_runtime.create(graph, rlib, ctx)
m.set_input(**params)
inputs = []
if isinstance(input_shape, dict):
for key in input_shape:
inputs.append(np.random.normal(size=input_shape[key]).astype(dtype))
m.set_input(key, inputs[-1])
else:
inputs.append(np.random.normal(size=input_shape).astype(dtype))
m.set_input("data", inputs[-1])
m.run()
ref_outputs = get_cpu_reference(tvm_mod, params1, input_shape, inputs)
for i, ref_output in enumerate(ref_outputs):
tvm_output = m.get_output(i)
output = tvm_output.asnumpy()
# for index, x in np.ndenumerate(ref_output):
# if abs(output[index] - x) > 0.01:
# print(index, output[index], x)
np.testing.assert_allclose(output, ref_output, rtol=1e-1, atol=1e-1)
return graph
def gpu_preprocess(tvm_mod):
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["NCHW4c", "OIHW4o"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = tvm.IRModule.from_expr(tvm_mod)
tvm_mod_nchwc = seq(mod)
return tvm_mod_nchwc
| https://github.com/zk-ml/tachikoma |
tests/python/relay/qnn/test_canonicalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable
import numpy as np
from tvm import relay
from tvm.relay.qnn.op import canonicalizations
class TestIntegerTableLookupTable:
"""Consists of tests testing functionality of creating lookup tables for integer operations."""
def fake_identity_func_numpy(self, arr: np.ndarray):
return arr.astype("float32")
def fake_identity_func_relay(
self,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg=None,
in_scale=relay.const(1.0, dtype="float32"),
in_zero_point=relay.const(0, dtype="int32"),
out_scale=relay.const(1.0, dtype="float32"),
out_zero_point=relay.const(0, dtype="int32"),
in_axis=-1,
out_axis=-1,
in_dtype="uint8",
out_dtype="uint8",
):
if input_arg is None:
input_arg = relay.const(np.arange(0, 256, dtype="uint8").view(in_dtype))
return (
canonicalizations.create_integer_lookup_op(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=in_scale,
in_zero_point=in_zero_point,
out_scale=out_scale,
out_zero_point=out_zero_point,
in_axis=in_axis,
out_axis=out_axis,
in_dtype=in_dtype,
out_dtype=out_dtype,
),
input_arg.data.numpy(),
)
def dequantize_numpy(self, np_arr, np_scale=1.0, np_zero_point=0):
return (np_arr.astype("int32") - np_zero_point) * np_scale
def run_function_test(
self,
in_scale: float,
in_zero_point: int,
out_scale: float,
out_zero_point: int,
in_dtype: str,
out_dtype: str,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg: relay.Expr = None,
rtol=1e-7,
atol=0,
):
relay_lookup, input_arg = self.fake_identity_func_relay(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=relay.const(in_scale, "float32"),
in_zero_point=relay.const(in_zero_point, "int32"),
out_scale=relay.const(out_scale, "float32"),
out_zero_point=relay.const(out_zero_point, "int32"),
in_dtype=in_dtype,
out_dtype=out_dtype,
)
result = canonicalizations.run_const_expr(relay_lookup)
np.testing.assert_allclose(
floating_point_func(
self.dequantize_numpy(input_arg, np_scale=in_scale, np_zero_point=in_zero_point)
),
self.dequantize_numpy(result, np_scale=out_scale, np_zero_point=out_zero_point),
atol=atol,
rtol=rtol,
)
"""Test mapping between different input/output dtypes"""
def test_int8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_int8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=128,
in_dtype="int8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=0,
in_dtype="uint8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
"""Test different input shapes"""
def test_keep_input_shapes(self):
# input in floating point ~[-2, 2], final output ~[0, 8]
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 8, 8])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 64])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 128])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
"""Test mapping with different in/out qparams works."""
def test_different_in_out_qparams(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
atol=1, # numbers range from -128 -> 128 so not that big error
rtol=0,
)
"""Test some simple functions"""
def test_tanh(self):
# 1 / 64 in scale -- input range is ~ (-2, 2), tanh(+-2) ~= +-1
# 1 / 128 out_scale -- output range is ~(-1, 1)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=1 / 64,
in_zero_point=0,
out_scale=1 / 128,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=np.tanh,
atol=0.01,
rtol=0.01,
)
def test_exp(self):
# input in floating point ~[-2, 2], final output ~[0, 8]
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=np.exp,
atol=0.03,
rtol=0.01,
)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_avg_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
schedule_name,
):
"""Test a subgraph with a single pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestAvgPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NWC", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True, False),
((3, 32, 27), (3,), (2,), 1, 1, "NCW", False, True),
((1, 1, 32), 3, 1, 0, 1, "NCW", False, False),
((1, 4, 20), 3, 2, 2, 1, "NCW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestAvgPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NHWC", False, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, False),
((2, 27, 27, 16), (3, 3), (2, 2), 0, 1, "NHWC", True, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, True),
((1, 25, 5, 64), (25, 5), (25, 5), 0, 1, "NHWC", False, False),
((1, 3, 3, 256), (3, 3), (3, 3), 0, 1, "NHWC", False, False),
((1, 8, 8, 64), (8, 8), (8, 8), 0, 1, "NHWC", False, False),
((1, 1, 32, 32), (3, 3), 1, 0, 1, "NCHW", False, False),
((1, 4, 32, 20), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestAvgPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_conv1d_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_ncw operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv1d_ncw(BasicConv1dTests):
"""This test is for conv1d_ncw.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((1, 12, 32), 3, 16, 1, 0, 1),
((3, 10, 12), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 2, 10), 4, 4, 2, (1, 1), 1),
((1, 2, 20), 4, 4, 2, (0, 1), 1),
((1, 4, 16), 1, 12, 1, (1, 0), 1),
((1, 16, 24), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NCW")
schedule_name = tvm.testing.parameter("conv1d_ncw.generic")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_conv1d_nwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_nwc operator."""
ishape = data_shape
wshape = (kernel_size, data_shape[-1], num_filter)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout="WIO",
out_dtype="int32",
out_layout="NWC",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
if kernel_layout == "WOI":
weight1 = relay.const(np.moveaxis(weight_data, 1, -1))
else:
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout="NWC",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv1d_dsp(BasicConv1dTests):
"""This test is for conv1d_dsp schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
# TODO: The following 4 tests fail due to https://github.com/apache/tvm/issues/11466
# ((3, 12, 10), 4, 24, 1, 0, 1),
# ((1, 7, 7), 3, 5, 1, 0, 1),
# ((1, 10, 2), 4, 4, 2, (1, 1), 1),
# ((1, 20, 2), 4, 4, 2, (0, 1), 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WOI")
schedule_name = tvm.testing.parameter("conv1d_dsp")
class TestConv1d_nwc(BasicConv1dTests):
"""This test is for conv1d_nwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
((3, 12, 10), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 10, 2), 4, 4, 2, (1, 1), 1),
((1, 20, 2), 4, 4, 2, (0, 1), 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WIO")
schedule_name = tvm.testing.parameter("conv1d_nwc.generic")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for regular conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import fixture, main, parameter, parameters
class Conv2dTests(GeneralizedConv2dTests):
"""Helper for constructing regular Conv2ds. Always sets groups to 1. We set the reference
kernel layout here as we must pick something, but the x86 implementation supports several."""
@fixture
def groups(self):
"""Using a fixture instead of a parameter stops Pytest from adding the (redundant) number of
groups to the name of each test."""
return 1
def setup_method(self):
self.ref_kernel_layout = "HWIO"
class TestConv2d_NHWC_DSP(Conv2dTests):
"""This test is for conv2d_nhwc_dsp.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
# TODO(mehrdadh): Fails due to https://github.com/apache/tvm/issues/11216
# ((1, 32, 32, 1), (3, 3), 12, 1, 0, 1),
# ((1, 32, 10, 3), (3, 3), 16, 1, 0, 1),
# ((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
# from Keyword Spotting model from MLPerfTiny models
# TODO(mehrdad): Fails due to https://github.com/apache/tvm/issues/11216
# ((1, 49, 10, 1), (10, 4), 64, (2, 2), (4, 1, 5, 1), 1),
# from Visual Wake Word model from MLPerfTiny models
# TODO(mehrdadh): fails due to https://github.com/apache/tvm/issues/11216
# ((1, 96, 96, 3), (3, 3), 8, (2, 2), (0, 0, 1, 1), 1),
# from Image Classification model from MLPerfTiny models
((1, 16, 16, 32), (1, 1), 64, (2, 2), 0, 1),
((4, 16, 16, 8), (5, 5), 8, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 16, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 8, 2, 0, 1),
((4, 16, 16, 8), (5, 5), 16, 2, 0, 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 0, 1, 1), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (1, 1, 2, 2), 1),
((1, 16, 16, 8), (5, 5), 16, 2, (3, 3, 2, 2), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 1, 2, 3), 1),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_dsp.arm_cpu")
class TestConv2d_NHWC_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nhwc_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 1), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 3), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_spatial_pack.arm_cpu")
class TestConv2d_Tensordot(Conv2dTests):
"""This test is for the regular conv2d schedule tensorized using tensordot."""
data_shape, kernel_size, num_filter, strides, padding = parameters(
# Disabled because these kernels are not an integral number of words
# ((1, 32, 32, 1), (3, 3), 12, 1, 0),
# ((1, 32, 10, 3), (3, 3), 16, 1, 0),
# ((1, 96, 96, 3), (3, 3), 8, (2, 2), (0, 0, 1, 1)),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0)),
((1, 16, 16, 32), (1, 1), 64, (2, 2), 0),
((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1)),
((4, 16, 16, 16), (5, 5), 8, 2, 0),
)
dilation = parameter(1)
in_dtype = parameter("int8", "int16", "int32")
data_layout = parameter("NHWC")
kernel_layout = parameter("OHWI")
out_layout = parameter("NHWC", "NCHW")
schedule_name = parameter("conv2d_nhwc_ohwi_dsp.arm_cpu")
class TestConv2d_NCHW_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nchw_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation, in_dtype = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int8"),
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int16"),
((1, 16, 16, 32), (3, 3), 12, 1, 0, 1, "int16"),
)
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("conv2d_nchw_spatial_pack.arm_cpu")
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv2d_NCHWc operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv2d_NCHWc(BasicConv2dTests):
"""This test is for conv2d_NCHWc.x86 schedule."""
(
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW2i8o", "NCHW8c"),
# ResNet18 workloads
# this test does not fit in corstone300 DCTM section.
# ((1, 3, 112, 112), (7, 7), 64, (2, 2), (3, 3), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (3, 3), 64, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 64, (1, 1), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (3, 3), 128, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 128, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 128, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 256, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (1, 1), 256, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 256, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 512, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (1, 1), 512, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 512, 3, 3), (3, 3), 512, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
)
schedule_name = tvm.testing.parameter("conv2d_NCHWc.x86")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_dense_dsp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicDenseTests:
@tvm.testing.requires_corstone300
def test_dense(self, shape, weight_shape, dtype, schedule_name, enable_bias):
"""Test a subgraph with a single dense operator."""
ishape = shape
wshape = weight_shape
out_dtype = "int32"
units = weight_shape[0]
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
if enable_bias:
bias_data = np.random.randint(low=-10, high=10, size=(wshape[0]), dtype=out_dtype)
input = relay.var("input", relay.TensorType(ishape, dtype))
weight = relay.const(weight_data)
dense = relay.op.nn.dense(
input,
weight,
units=units,
out_dtype=out_dtype,
)
if enable_bias:
bias = relay.const(bias_data)
relay_op = relay.op.nn.bias_add(dense, bias)
else:
relay_op = dense
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
ref_mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
output_list = generate_ref_data(ref_mod, inputs)
mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestDense(BasicDenseTests):
"""This test is for dense_dsp schedule."""
shape, weight_shape = tvm.testing.parameters(
((8, 128), (32, 128)),
((32, 32), (32, 32)),
((1, 64), (1, 64)),
((11, 2), (2, 2)),
((1, 32), (64, 32)),
((3, 12), (10, 12)),
)
dtype = tvm.testing.parameter("int8", "int16")
schedule_name = tvm.testing.parameter("dense_dsp.arm_cpu")
enable_bias = tvm.testing.parameter(False, True)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for depthwise_conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import fixture, main, parameter, parameters
class DepthwiseConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing depthwise Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
@fixture
def groups(self, data_shape):
"""By definition, a depthwise_conv2d has a number of groups equal to the number of input
channels, so we don't need to specify the number of groups each time."""
return data_shape[3]
def setup_method(self):
self.ref_kernel_layout = "HWOI"
class TestDepthwiseConv2d_NCHW_OIHW(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 10, 3, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 2),
((1, 32, 16, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("depthwise_conv2d_nchw.arm_cpu")
class TestDepthwiseConv2d_NHWC_HWOI(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 64), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc.generic")
class TestDepthwiseConv2d_NHWC_HWOI_DSP(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc_dsp.arm_cpu schedule. The tests that are parameterized
by dtype work for both int8 and int16, while the others only work on the specified dtype."""
in_dtype_parameterized_tests = [
# Depthwise_conv2d parameters from MobileNetV1 0.25x
((1, 48, 48, 8), (3, 3), 8, (1, 1), 1),
((1, 48, 48, 16), (3, 3), 16, (2, 2), (1, 1, 0, 0)),
((1, 24, 24, 32), (3, 3), 32, (1, 1), 1),
((1, 24, 24, 32), (3, 3), 32, (2, 2), (1, 1, 0, 0)),
((1, 12, 12, 64), (3, 3), 64, (1, 1), 1),
((1, 12, 12, 64), (3, 3), 64, (2, 2), (1, 1, 0, 0)),
((1, 6, 6, 128), (3, 3), 128, (1, 1), 1),
((1, 6, 6, 128), (3, 3), 128, (2, 2), (1, 1, 0, 0)),
((1, 3, 3, 256), (3, 3), 256, (1, 1), 1),
# Asymmetric and larger kernels
((1, 25, 5, 64), (3, 3), 64, (1, 1), 1),
((1, 24, 24, 8), (5, 5), 8, (1, 1), 1),
((1, 24, 24, 8), (3, 5), 8, (1, 1), 1),
]
data_shape, kernel_size, num_filter, strides, padding, in_dtype = parameters(
# Make a copy of each parameterized test for int8 and one for int16
*map(lambda t: t + ("int8",), in_dtype_parameterized_tests),
*map(lambda t: t + ("int16",), in_dtype_parameterized_tests),
# Test the int16 implementation with channel numbers not divisible by four
((1, 48, 48, 6), (3, 3), 6, (1, 1), 1, "int16"),
)
dilation = parameter(1)
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc_dsp.arm_cpu")
class TestDepthwiseConv2d_Tensordot(DepthwiseConv2dTests):
"""This test is for the depthwise_conv2d schedule tensorized using tensordot."""
data_shape, kernel_size, num_filter, strides, padding, in_dtype = parameters(
# Currently, our schedule requires kernel_w be divisible by the number of simd lanes given
# its dtype. This means 3x3 and 5x5 kernels do not work on int16 or int8 for now. If you had
# to, you could hack around this by padding the data and kernel.
((1, 48, 48, 8), (3, 3), 8, (1, 1), 1, "int32"),
((1, 48, 48, 16), (3, 3), 16, (2, 2), (1, 1, 0, 0), "int32"),
((1, 24, 24, 32), (3, 3), 32, (1, 1), 1, "int32"),
((1, 24, 24, 32), (3, 3), 32, (2, 2), (1, 1, 0, 0), "int32"),
((1, 12, 12, 64), (3, 3), 64, (1, 1), 1, "int32"),
((1, 12, 12, 64), (3, 3), 64, (2, 2), (1, 1, 0, 0), "int32"),
((1, 6, 6, 128), (3, 3), 128, (1, 1), 1, "int32"),
((1, 6, 6, 128), (3, 3), 128, (2, 2), (1, 1, 0, 0), "int32"),
((1, 3, 3, 256), (3, 3), 256, (1, 1), 1, "int32"),
((1, 25, 5, 64), (3, 3), 64, (1, 1), 1, "int32"),
((1, 24, 24, 8), (5, 5), 8, (1, 1), 1, "int32"),
((1, 24, 24, 8), (3, 5), 8, (1, 1), 1, "int32"),
# These "evenly divisible" kernels work on smaller dtypes.
((1, 48, 48, 8), (3, 2), 8, 1, 0, "int16"),
((1, 48, 48, 8), (4, 4), 8, 1, 0, "int8"),
)
dilation = parameter(1)
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NHWC", "NCHW")
schedule_name = parameter("depthwise_conv2d_nchw_oihw_dsp.arm_cpu")
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_depthwise_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
groups,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single depthwise_conv2d_nchwc operator."""
ishape = data_shape
wshape = (data_shape[1], 1, *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
groups = groups
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestDepthWiseConv2d_NCHWc(BasicConv2dTests):
"""This test is for depthwise_conv2d_NCHWc schedule."""
(
data_shape,
kernel_size,
groups,
strides,
padding,
dilation,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 16, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i8o", "NCHW8c"),
)
dtype = tvm.testing.parameter("int8", "int16", "int32")
schedule_name = tvm.testing.parameter("depthwise_conv2d_NCHWc")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_generalized_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper class for testing variations of 2D convolution. Should be used by subclassing
`GeneralizedConv2dTests`, and then setting the arguments using tvm.testing.parameter(s)."""
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import AOT_CORSTONE300_RUNNER
def _change_ndarray_layout(arr, src_layout, dst_layout):
"""Makes a copy of an ndarray, reshaping it to a new data layout.
Parameter
---------
arr : numpy.ndarray
The ndarray to be reformatted.
src_layout : str
The current layout of the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
dst_layout : str
The desired layout of new the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
Returns
-------
dst_shape : numpy.ndarray
A copy of the ndarray with the new layout.
"""
assert src_layout.isalpha() and dst_layout.isalpha()
axis_order = [src_layout.index(c) for c in dst_layout]
return np.transpose(arr, axis_order)
class GeneralizedConv2dTests:
"""Superclass which can be used to test regular, depthwise, or grouped conv2D. Cannot be used
for 5D data formats (NCHWc and such) as written, but could be extended. Might also be worth
abstracting some of this logic into an even more general class that could be used for other
operators.
Note that data_shape should always be a tuple of length four indicating the data shape in NHWC
format (it will later be reshaped according to the given data_layout), and kernel_size should be
a length two tuple giving the height and width of the kernel.
This test (and other base Conv2dTests classes) are not run by Pytest, as their names do not
start with `Test`."""
@tvm.testing.requires_corstone300
def test_conv2d(
self,
data_shape,
kernel_size,
num_filter,
in_dtype,
strides,
padding,
groups,
dilation,
data_layout,
kernel_layout,
out_layout,
schedule_name,
):
"""Test a subgraph with a single conv2d operator."""
ref_input_data = np.random.randint(low=-128, high=127, size=data_shape, dtype=in_dtype)
ref_input_var = relay.var("input", relay.TensorType(data_shape, in_dtype)) # NHWC layout
kernel_shape = (*kernel_size, data_shape[-1] // groups, num_filter) # HWIO layout
ref_kernel_data = np.random.randint(low=-10, high=10, size=kernel_shape, dtype=in_dtype)
"""Our x86 depthwise implementation only supports HWOI with NHWC, so we need to change our
kernel layout to work around this. We can't just change the whole thing to HWIO or
something else, as then group conv2d would not work. Eventually, we should switch to using
TensorFlow to create the reference output so we can ensure our implementation is right.
See https://github.com/apache/tvm/issues/13137 for details."""
ref_relay_op = relay.op.nn.conv2d(
ref_input_var,
relay.const(_change_ndarray_layout(ref_kernel_data, "HWIO", self.ref_kernel_layout)),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout="NHWC",
kernel_layout=self.ref_kernel_layout,
out_dtype="int32",
out_layout="NHWC",
)
ref_module = tvm.IRModule.from_expr(relay.Function([ref_input_var], ref_relay_op))
ref_outputs = generate_ref_data(ref_module, {"input": ref_input_data})
# Reshape output dictionary to match out_layout
assert len(ref_outputs) == 1
output_tensor_name, output_tensor = next(iter(ref_outputs.items()))
ref_outputs[output_tensor_name] = _change_ndarray_layout(output_tensor, "NHWC", out_layout)
test_input_data = _change_ndarray_layout(ref_input_data, "NHWC", data_layout)
test_input_var = relay.var("input", relay.TensorType(test_input_data.shape, in_dtype))
test_kernel_data = _change_ndarray_layout(ref_kernel_data, "HWIO", kernel_layout)
test_relay_op = relay.op.nn.conv2d(
test_input_var,
relay.const(test_kernel_data),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout=out_layout,
)
test_function = relay.Function([test_input_var], test_relay_op)
test_model = AOTTestModel(
module=tvm.IRModule.from_expr(test_function),
inputs={"input": test_input_data},
outputs=ref_outputs,
)
compile_and_run(
test_model,
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for grouped conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import main, parameter, parameters
class GroupConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing group Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
def setup_method(self):
self.ref_kernel_layout = "HWIO"
class TestGroupConv2d_NCHW_OIHW(GroupConv2dTests):
"""This test is for group_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 32, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("group_conv2d_nchw.arm_cpu")
class TestGroupConv2d_NHWC_HWIO(GroupConv2dTests):
"""This test is for group_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 16), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("group_conv2d_nhwc.generic")
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/strategy/arm_cpu/test_max_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
schedule_name,
):
"""Test a subgraph with a single max_pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestMaxPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True),
((1, 32, 1), 3, 1, 0, 1, "NWC", False),
((1, 20, 4), 3, 2, 0, 1, "NWC", False),
)
pool_type = tvm.testing.parameter("max_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestMaxPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False),
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True),
((1, 26, 26, 12), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 11, 11, 32), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 3, 3, 64), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", False),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", True),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", True),
)
pool_type = tvm.testing.parameter("max_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestMaxPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False),
)
pool_type = tvm.testing.parameter("max_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_adt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay import testing
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import create_executor
from tvm.relay.prelude import Prelude, StaticTensorArrayOps
from tvm.relay.testing import count as count_, make_nat_value, make_nat_expr
import numpy as np
prelude = p = Prelude(tvm.IRModule({}))
p.mod.import_from_std("nat.rly")
def count(e):
return count_(p, e)
dev = tvm.device("llvm", 0)
def eval(expr):
# CAUTION: These tests re-process the entire prelude for each test expression.
# Hoisting the create_executor won't improve that since preprocessing won't begin
# until the evaluate.
return create_executor(mod=prelude.mod, device=dev, target="llvm").evaluate(expr)
nat, z, s = prelude.mod.get_type("nat")
double = p.mod.get_global_var("nat_double")
add = p.mod.get_global_var("nat_add")
optional, some, none = prelude.mod.get_type("Option")
rlist, cons, nil = prelude.mod.get_type("List")
hd = p.hd
tl = p.tl
nth = p.nth
update = p.update
length = p.length
map = p.map
foldl = p.foldl
foldr = p.foldr
foldr1 = p.foldr1
sum = p.sum
concat = p.concat
filter = p.filter
zip = p.zip
rev = p.rev
unfoldl = p.unfoldl
unfoldr = p.unfoldr
map_accumr = p.map_accumr
map_accuml = p.map_accuml
tree, rose = prelude.mod.get_type("Tree")
tmap = p.tmap
size = p.size
compose = p.compose
iterate = p.iterate
def to_list(l):
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0])
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
def tree_to_dict(t):
assert isinstance(t, ConstructorValue)
ret = {}
assert t.tag == rose.tag
ret["member"] = t.fields[0]
ret["children"] = []
for subtree in to_list(t.fields[1]):
l = tree_to_dict(subtree)
ret["children"].append(l)
return ret
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
tensor_nil = p.get_var("tensor_nil", dtype=dtype)
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1], dtype)
hd = vmobj_to_list(o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
# turns a scalar-valued relay tensor value into a python number
def get_scalar(tv):
return tv.numpy().item()
# @tvm.testing.uses_gpu
def test_nat_value():
assert count(make_nat_value(p, 10)) == 10
assert count(eval(s(s(z())))) == 2
@tvm.testing.uses_gpu
def test_nat_constructor():
func = relay.Function([], z())
test_z = relay.GlobalVar("test_z")
test_sz = relay.GlobalVar("test_sz")
prelude.mod[test_z] = func
func = relay.Function([], s(z()))
prelude.mod[test_sz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_z].body.checked_type == nat()
assert ck_mod[test_sz].body.checked_type == nat()
@tvm.testing.uses_gpu
def test_double():
assert prelude.mod[double].checked_type == relay.FuncType([nat()], nat())
res = eval(double(s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_add():
assert prelude.mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
res = eval(add(s(z()), s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_list_constructor():
test_consz = relay.GlobalVar("test_consz")
func = relay.Function([], cons(z(), nil()))
prelude.mod[test_consz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_consz].body.checked_type == rlist(nat())
@tvm.testing.uses_gpu
def test_hd_tl():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(make_nat_expr(prelude, i), l)
got = []
for i in range(len(expected)):
got.append(count(eval(hd(l))))
l = tl(l)
assert got == expected
@tvm.testing.uses_gpu
def test_nth():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
for i in range(len(expected)):
nth = prelude.mod.get_global_var("nth")
item = eval(nth(l, relay.const(i)))
assert get_scalar(item) == i
@tvm.testing.uses_gpu
def test_update():
expected = list(range(10))
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(make_nat_expr(prelude, 0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), make_nat_expr(prelude, v))
got = []
for i in range(len(expected)):
got.append(count(eval(nth(l, relay.const(i)))))
assert got == expected
@tvm.testing.uses_gpu
def test_length():
a = relay.TypeVar("a")
assert prelude.mod[length].checked_type == relay.FuncType(
[rlist(a)], relay.scalar_type("int32"), [a]
)
res = eval(length(cons(z(), cons(z(), cons(z(), nil())))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_map():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[map].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), rlist(a)], rlist(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(map(add_one, cons(z(), cons(z(), nil()))))
ones = to_list(res)
assert len(ones) == 2
assert count(ones[0]) == 1 and count(ones[1]) == 1
@tvm.testing.uses_gpu
def test_foldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldl].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], a), a, rlist(b)], a, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
rev_dup = relay.Function([y, x], cons(x, cons(x, y)))
res = eval(
foldl(
rev_dup,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
reversed = to_list(res)
assert len(reversed) == 6
assert count(reversed[0]) == 3 and count(reversed[1]) == 3
assert count(reversed[2]) == 2 and count(reversed[3]) == 2
assert count(reversed[4]) == 1 and count(reversed[5]) == 1
@tvm.testing.uses_gpu
def test_foldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldr].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], b), b, rlist(a)], b, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
identity = relay.Function([x, y], cons(x, y))
res = eval(
foldr(
identity,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
same = to_list(res)
assert len(same) == 3
assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3
@tvm.testing.uses_gpu
def test_foldr1():
a = relay.TypeVar("a")
lhs = prelude.mod[foldr1].checked_type
rhs = relay.FuncType([relay.FuncType([a, a], a), rlist(a)], a, [a])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], add(x, y))
res = eval(
foldr1(
f,
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
assert count(res) == 6
@tvm.testing.uses_gpu
def test_sum():
assert prelude.mod[sum].checked_type == relay.FuncType(
[rlist(relay.scalar_type("int32"))], relay.scalar_type("int32")
)
res = eval(sum(cons(relay.const(1), cons(relay.const(2), nil()))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_concat():
a = relay.TypeVar("a")
assert prelude.mod[concat].checked_type == relay.FuncType([rlist(a), rlist(a)], rlist(a), [a])
l1 = cons(make_nat_expr(prelude, 1), cons(make_nat_expr(prelude, 2), nil()))
l2 = cons(make_nat_expr(prelude, 3), cons(make_nat_expr(prelude, 4), nil()))
res = eval(concat(l1, l2))
catted = to_list(res)
assert len(catted) == 4
assert count(catted[0]) == 1
assert count(catted[1]) == 2
assert count(catted[2]) == 3
assert count(catted[3]) == 4
@tvm.testing.uses_gpu
def test_filter():
a = relay.TypeVar("a")
expected_type = relay.FuncType(
[relay.FuncType([a], relay.scalar_type("bool")), rlist(a)], rlist(a), [a]
)
assert prelude.mod[filter].checked_type == expected_type
x = relay.Var("x", nat())
greater_than_one = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
s, [relay.PatternConstructor(s, [relay.PatternWildcard()])]
),
relay.const(True),
),
relay.Clause(relay.PatternWildcard(), relay.const(False)),
],
),
)
res = eval(
filter(
greater_than_one,
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 3),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 1), nil())),
),
),
),
),
)
)
filtered = to_list(res)
assert len(filtered) == 2
assert count(filtered[0]) == 3
assert count(filtered[1]) == 5
@tvm.testing.uses_gpu
def test_zip():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType([rlist(a), rlist(b)], rlist(relay.TupleType([a, b])), [a, b])
assert prelude.mod[zip].checked_type == expected_type
l1 = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
l2 = cons(nil(), cons(cons(nil(), nil()), cons(cons(nil(), cons(nil(), nil())), nil())))
res = eval(zip(l1, l2))
zipped = to_list(res)
assert len(zipped) == 3
assert count(zipped[0][0]) == 1
assert len(to_list(zipped[0][1])) == 0
assert count(zipped[1][0]) == 2
assert len(to_list(zipped[1][1])) == 1
assert count(zipped[2][0]) == 3
assert len(to_list(zipped[2][1])) == 2
# test truncation
l3 = cons(make_nat_expr(prelude, 4), cons(make_nat_expr(prelude, 5), nil()))
shorter_res = eval(zip(l3, l2))
truncated = to_list(shorter_res)
assert len(truncated) == 2
assert count(truncated[0][0]) == 4
assert len(to_list(truncated[0][1])) == 0
assert count(truncated[1][0]) == 5
assert len(to_list(truncated[1][1])) == 1
l4 = cons(nil(), nil())
shortest_res = eval(zip(l3, l4))
singleton = to_list(shortest_res)
assert len(singleton) == 1
assert count(singleton[0][0]) == 4
assert len(to_list(singleton[0][1])) == 0
@tvm.testing.uses_gpu
def test_rev():
a = relay.TypeVar("a")
assert prelude.mod[rev].checked_type == relay.FuncType([rlist(a)], rlist(a), [a])
res = eval(
rev(
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
)
)
reversed = to_list(res)
assert len(reversed) == 3
assert count(reversed[0]) == 3
assert count(reversed[1]) == 2
assert count(reversed[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldr(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 3
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldl(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 1
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 3
@tvm.testing.uses_gpu
def test_map_accumr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accumr].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_acc_to_each = relay.Function([acc, x], relay.Tuple([add(x, acc), add(x, acc)]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accumr(add_acc_to_each, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 6
assert count(new_vals[1]) == 5
assert count(new_vals[2]) == 3
@tvm.testing.uses_gpu
def test_map_accuml():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accuml].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_to_acc = relay.Function([acc, x], relay.Tuple([add(x, acc), x]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accuml(add_to_acc, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 3
assert count(new_vals[1]) == 2
assert count(new_vals[2]) == 1
@tvm.testing.uses_gpu
def test_optional_matching():
x = relay.Var("x")
y = relay.Var("y")
v = relay.Var("v")
condense = relay.Function(
[x, y],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]), cons(v, y)),
relay.Clause(relay.PatternConstructor(none), y),
],
),
)
res = eval(
foldr(
condense,
nil(),
cons(
some(make_nat_expr(prelude, 3)),
cons(none(), cons(some(make_nat_expr(prelude, 1)), nil())),
),
)
)
reduced = to_list(res)
assert len(reduced) == 2
assert count(reduced[0]) == 3
assert count(reduced[1]) == 1
@tvm.testing.uses_gpu
def test_tmap():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[tmap].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), tree(a)], tree(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(tmap(add_one, rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))))
tree_dict = tree_to_dict(res)
assert count(tree_dict["member"]) == 1
assert len(tree_dict["children"]) == 2
for subtree in tree_dict["children"]:
assert count(subtree["member"]) == 1
assert len(subtree["children"]) == 0
@tvm.testing.uses_gpu
def test_size():
a = relay.TypeVar("a")
lhs = prelude.mod[size].checked_type
rhs = relay.FuncType([tree(a)], relay.scalar_type("int32"), [a])
assert lhs == rhs
root = rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))
t = rose(z(), cons(root, cons(root, cons(root, nil()))))
res = eval(size(t))
assert get_scalar(res) == 10
@tvm.testing.uses_gpu
def test_wildcard_match_solo():
x = relay.Var("x", nat())
copy = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternWildcard(), x)]), nat())
res = eval(copy(s(s(s(z())))))
assert count(res) == 3
@tvm.testing.uses_gpu
def test_wildcard_match_order():
x = relay.Var("x", rlist(nat()))
y = relay.Var("y")
a = relay.Var("a")
return_zero = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternWildcard(), z()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(a)]), y
),
relay.Clause(relay.PatternConstructor(nil), s(z())),
],
),
nat(),
)
res = eval(return_zero(cons(s(z()), nil())))
# wildcard pattern is evaluated first
assert count(res) == 0
@tvm.testing.uses_gpu
def test_nested_matches():
a = relay.TypeVar("a")
# TODO(@jroesch): inference should be able to handle this one
x = relay.Var("x", type_annotation=rlist(rlist(a)))
y = relay.Var("y")
w = relay.Var("w")
h = relay.Var("h")
t = relay.Var("t")
flatten = relay.GlobalVar("flatten")
# flatten could be written using a fold, but this way has nested matches
inner_match = relay.Match(
y,
[
relay.Clause(relay.PatternConstructor(nil), flatten(w)),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, flatten(cons(t, w))),
),
],
)
prelude.mod[flatten] = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(nil), nil()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(w)]),
inner_match,
),
],
),
rlist(a),
[a],
)
first_list = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
second_list = cons(
make_nat_expr(prelude, 4),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 6), nil())),
)
final_list = cons(first_list, cons(second_list, nil()))
res = eval(flatten(final_list))
flat = to_list(res)
assert len(flat) == 6
for i in range(6):
assert count(flat[i]) == i + 1
@tvm.testing.uses_gpu
def test_match_full_var():
x = relay.Var("x")
v = relay.Var("v")
id_func = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternVar(v), v)]))
res1 = eval(id_func(nil()))
res2 = eval(id_func(cons(z(), cons(z(), nil()))))
empty = to_list(res1)
assert len(empty) == 0
zeroes = to_list(res2)
assert len(zeroes) == 2
assert count(zeroes[0]) == 0
assert count(zeroes[1]) == 0
@tvm.testing.uses_gpu
def test_nested_pattern_match():
x = relay.Var("x", rlist(nat()))
h1 = relay.Var("h1")
h2 = relay.Var("h2")
t = relay.Var("t")
match = relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternVar(h1),
relay.PatternConstructor(cons, [relay.PatternVar(h2), relay.PatternVar(t)]),
],
),
h2,
),
relay.Clause(relay.PatternWildcard(), z()),
],
)
get_second = relay.Function([x], match)
res = eval(get_second(cons(s(z()), cons(s(s(z())), nil()))))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_compose():
n = relay.Var("n")
inc = relay.Function([n], s(n))
x = relay.Var("x")
res = eval(relay.Call(compose(inc, double), [s(s(z()))]))
assert count(res) == 5
@tvm.testing.uses_gpu
def test_iterate():
expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(prelude, 3)])
res = eval(relay.Function([], expr)())
assert count(res) == 12
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_basic_block_normal_form.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay.analysis import check_basic_block_normal_form
def test_one_block():
x = relay.var("x")
y = relay.add(x, x)
z = relay.add(x, y)
check_basic_block_normal_form(z)
def test_let():
x = relay.var("x")
y = relay.var("y")
body = relay.Let(y, x, y)
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
"""
The program below violates basic block normal form, as the scope of %shared
is ambiguous and should not be in that of true branch.
free_var %cond: bool
if (%cond) {
free_var %shared
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
def test_valid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
shared_bound = relay.var("shared_bound", shape=(1,), dtype="float32")
body = relay.Let(shared, shared_bound, body)
"""
The program below uses let binding to control the scope of %shared, which
follows the basic block normal form.
free_var %shared_bound: Tensor[(1), float32]
let %shared = %shared_bound;
free_var %cond: bool
if (%cond) {
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if2():
"""
fn (%x: float32) {
%0 = equal(%x, 2f);
if (%0) {
%1 = add(%x, 1f);
multiply(%1, 2f)
} else {
multiply(%1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.add(x, one)
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
def test_valid_if2():
"""
fn (%x: float32) {
let %v1 = add(%x, 1f);
%0 = equal(%x, 2f);
if (%0) {
multiply(%v1, 2f)
} else {
multiply(%v1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.var("v1")
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
body = relay.Let(v1, relay.add(x, one), body)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func():
x = relay.var("x", shape=(1,), dtype="float32") # , a)
y = relay.var("y", shape=(1,), dtype="float32") # , a)
z = relay.var("z", shape=(1,), dtype="float32") # , a)
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y)) # , a, [a])
func_b = relay.Function([z], relay.add(x2, z)) # , a, [a])
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_return():
x = relay.var("x", shape=(1,), dtype="float32") # , a)
y = relay.var("y", shape=(1,), dtype="float32") # , a)
z = relay.var("z", shape=(1,), dtype="float32") # , a)
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y)) # , a, [a])
func_b = relay.Function([z], relay.add(x2, z)) # , a, [a])
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_nested():
x = relay.var("x", dtype="float32", shape=(1,))
s = relay.var("s", dtype="float32", shape=(1,))
shared = relay.add(s, s)
func_true = relay.Function([x], relay.add(x, shared))
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
z = relay.Var("z")
body = relay.If(f(), func_true, relay.Function([z], relay.add(z, shared)))
top = relay.Function([f, s], body)
"""
fn (%f: fn () -> bool, %s: Tensor[(1), float32]) {
%0 = %f();
if (%0) {
fn (%x: Tensor[(1), float32]) {
%1 = add(%s, %s);
add(%x, %1)
}
} else {
fn (%z) {
add(%z, %1)
}
}
}
"""
check_basic_block_normal_form(top)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_extract_fake_quantized_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
def test_fake_quantize_conv():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.conv2d": 1}
def test_fake_quantize_dense():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 1}
def test_fake_quantize_multiple_regions():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
op = relay.qnn.op.dequantize(op, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(op)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
w2 = relay.var("w2", shape=[64, 256], dtype="int8")
op = relay.op.nn.dense(
relay.qnn.op.dequantize(op, relay.const(1.0), zero),
relay.qnn.op.dequantize(w2, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
# We expect to ignore this sigmoid op since it's just outside a fake
# quantized region
op = relay.op.sigmoid(op)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 2, "nn.relu": 1}
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.max_pool2d": 1}
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"transpose": 1, "reshape": 1}
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
op = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"concatenate": 1}
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_extract_fused_functions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
from tvm.relay.testing.synthetic import get_workload
def get_conv_net():
"""This gets the net for a case described in fuse_ops.cc:
conv2d
/ | \
/ | \
op op op
\ | /
\ | /
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x2 = relay.nn.conv2d(y, relay.var("w3"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x3 = relay.nn.conv2d(y, relay.var("w4"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(x1, x2)
z = relay.add(x3, z)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 1
mod["main"] = mod["main"].with_attr("Primitive", tvm.tir.IntImm("int32", 1))
tvm.ir.structural_equal(list(items.values())[0], mod["main"])
def test_extract_conv_net():
mod = get_conv_net()
items = relay.analysis.extract_fused_functions(mod)
functions = list(items.values())
assert len(functions) == 2
x = functions[0]
y = functions[1]
def is_conv(func):
conv2d = relay.op.op.get("nn.conv2d")
call_node = func.body
return call_node.op == conv2d
def is_conv_add(func):
add = relay.op.op.get("add")
call_node = func.body
maybe_conv_module = tvm.IRModule.from_expr(call_node.args[0])
return call_node.op == add and is_conv(maybe_conv_module["main"])
# Function traversal order isn't obvious, so checking both orders is more consistent
assert (is_conv(x) and is_conv_add(y)) or (is_conv_add(x) and is_conv(y))
def test_extract_resnet():
mod, _params = get_workload()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 7
if __name__ == "__main__":
test_extract_identity()
test_extract_conv_net()
test_extract_resnet()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_extract_intermediate_expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import pytest
import tvm
from tvm import relay
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
|
|
split
|
|
|
elemwise add
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
tuple_0_add = relay.add(tuple_out[0], relay.const(1, dtype="float32"))
return tvm.IRModule.from_expr(tuple_0_add)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract():
dshape = (1, 1, 5, 1)
def before():
return get_conv_net()
def expected_0():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(y)
def expected_1():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(x1)
def expected_2():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def expected_3():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out.astuple())
def expected_4():
# check tuple node
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out[0])
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 0), expected_0()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 1), expected_1()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 2), expected_2()
)
assert tvm.ir.structural_equal(
(relay.analysis.extract_intermdeiate_expr(before(), 3)), expected_3()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 4), expected_4()
)
assert tvm.ir.structural_equal(relay.analysis.extract_intermdeiate_expr(before(), 5), before())
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_extract_operators.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import pytest
import tvm
from tvm import relay
from tvm.relay.testing.resnet import get_workload
from tvm.relay.testing import run_opt_pass
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 1
assert op_freqs["nn.conv2d"] == 1
def test_extract_conv_net():
mod = get_conv_net()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_fused():
mod = get_conv_net()
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(3)(mod)
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_resnet():
mod, _params = get_workload()
expected_op_freqs = {
"nn.batch_norm": 19,
"nn.conv2d": 21,
"nn.relu": 18,
"nn.max_pool2d": 1,
"add": 8,
"nn.global_avg_pool2d": 1,
"nn.batch_flatten": 1,
"nn.dense": 1,
"nn.bias_add": 1,
"nn.softmax": 1,
}
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == len(expected_op_freqs)
assert all([op_freqs[op] == expected_op_freqs[op] for op in expected_op_freqs])
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import detect_feature, Feature
from tvm.relay.transform import gradient
from tvm.relay.prelude import Prelude
from tvm.relay.testing import run_infer_type
def test_prelude():
p = Prelude()
feats = detect_feature(p.mod)
assert feats == set(
[
Feature.fVar,
Feature.fGlobalVar,
Feature.fConstant,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fIf,
Feature.fConstructor,
Feature.fMatch,
]
)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
mod = tvm.IRModule.from_expr(gradient(func))
mod = relay.transform.InferType()(mod)
back_func = mod["main"]
feats = detect_feature(back_func)
assert feats == set(
[
Feature.fVar,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fRefCreate,
Feature.fRefRead,
Feature.fRefWrite,
]
)
if __name__ == "__main__":
test_prelude()
test_ad()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_analysis_get_calibration_data.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.relay import transform
from tvm.relay.analysis import get_calibration_data
def check_data_size(mod, data):
assert len(data) == len(mod.functions) - 1
for key, value in mod.functions.items():
if key.name_hint != "main":
assert len(data[key]["inputs"]) == len(value.params)
if isinstance(value.body, relay.Tuple):
assert len(data[key]["outputs"]) == len(value.body.fields)
else:
assert len(data[key]["outputs"]) == 1
def test_simple_graph():
# A module with two subgraphs
mod = tvm.IRModule()
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z0 = x0 + y0
z1 = x0 - y0
z2 = relay.Tuple((z0, z1))
f0 = relay.Function([x0, y0], z2)
f0 = f0.with_attr("Compiler", "test_graph")
g0 = relay.GlobalVar("g0")
mod[g0] = f0
mod = relay.transform.InferType()(mod)
x1 = relay.var("x1", shape=(8, 8))
y1 = relay.var("y1", shape=(8, 8))
z1 = x1 - y1
f1 = relay.Function([x1, y1], z1)
f1 = f1.with_attr("Compiler", "test_graph")
g1 = relay.GlobalVar("g1")
mod[g1] = f1
mod = relay.transform.InferType()(mod)
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
c0 = relay.Call(g0, [x, y])
c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
fm = relay.Function([x, y, z], c1)
mod["main"] = fm
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
z_data = np.random.rand(8, 8).astype("float32")
data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})
# Check the number and orders
check_data_size(mod, data)
tvm.testing.assert_allclose(data[g0]["inputs"][0].numpy(), x_data)
tvm.testing.assert_allclose(data[g0]["inputs"][1].numpy(), y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][0].numpy(), x_data + y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][1].numpy(), x_data - y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][0].numpy(), x_data + y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][1].numpy(), z_data)
tvm.testing.assert_allclose(data[g1]["outputs"][0].numpy(), x_data + y_data - z_data)
def test_mobilenet_dnnl():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod = transform.AnnotateTarget(["dnnl"])(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
data = get_calibration_data(mod, {"data": i_data, **params})
# Check the number and orders
check_data_size(mod, data)
if __name__ == "__main__":
test_simple_graph()
test_mobilenet_dnnl()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_annotated_regions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
import tvm
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def check_region(region_set, target, args, nodes, rets):
region = region_set.get_region(args[0])
assert region
assert target == region.target
assert set(args) == set(region.args)
assert set(nodes) == set(region.nodes)
assert set(rets) == set(region.rets)
def test_region_set_creator_diamond():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test_target")
ce_2 = compiler_end(O_1, "test_target")
cb_2 = compiler_begin(ce_1, "test_target")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
ce_4 = compiler_end(O_3, "test_target")
diamond = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
diamond, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 4
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, ce_1, ce_2],
[ce_1, ce_2],
)
check_region(
region_set,
"test_target",
[cb_2],
[cb_2, O_2, ce_3],
[ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, ce_4],
[ce_4],
)
def test_region_set_creator_merged():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test_target")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
O_4 = relay.add(cb_3, cb_4)
O_5 = relay.Tuple([O_3, O_4])
ce_4 = compiler_end(O_5, "test_target")
merged = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
merged, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 3
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, O_2, ce_2, ce_3],
[ce_2, ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, O_4, O_5, ce_4],
[ce_4],
)
if __name__ == "__main__":
test_region_set_creator_diamond()
test_region_set_creator_merged()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_any.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te
from tvm.relay.loops import while_loop
from tvm.relay.testing import run_infer_type as infer_type
from tvm.topi.testing import searchsorted_ref
from utils import ref_funcs
from utils.assert_diagnostic import DiagnosticTesting
def int32(val):
return relay.const(val, "int32")
def any_dims(ndim):
shape = []
for _ in range(ndim):
shape.append(relay.Any())
return tuple(shape)
def check_result(
args,
mod,
expected,
flatten=False,
assert_shape=False,
only_vm=False,
targets=None,
disable_targets=None,
):
if not isinstance(expected, list):
expected = [expected]
for kind in ["debug", "vm"]:
targets = targets or tvm.testing.enabled_targets()
for tgt, dev in targets:
if disable_targets and tgt in disable_targets:
continue
if kind == "debug" and (only_vm or dev.device_type != tvm.cpu().device_type):
continue
result = relay.create_executor(kind, mod=mod, device=dev, target=tgt).evaluate()(*args)
if isinstance(result, tvm.runtime.container.ADT):
result = [r.numpy() for r in result]
else:
result = [result.numpy()]
for r, e in zip(result, expected):
if assert_shape:
assert r.shape == e, "Shape mismatch: expect %s but got %s." % (
str(e),
str(r),
)
else:
if flatten:
r = r.flatten()
e = e.flatten()
tvm.testing.assert_allclose(r, e, atol=2e-6)
def verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], op(x, y))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
y_np = np.random.uniform(size=y_np_shape).astype(dtype)
res_np = np_op(x_np, y_np)
check_result([x_np, y_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_broadcast():
# Test broadcast with 1s
verify_any_broadcast((relay.Any(),), (3, 2), (1,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (1, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (3, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (1, 2), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, relay.Any()), (1, 2), (3, 1), relay.add, np.add)
# Test broadcast with values other than 1
verify_any_broadcast((relay.Any(),), (3, 2), (2,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (3, 2), (3, 2), relay.add, np.add)
def verify_any_elemwise(x_shape, x_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_elemwise():
verify_any_elemwise((relay.Any(),), (3,), relay.sqrt, np.sqrt)
verify_any_elemwise((relay.Any(), 2), (5, 2), relay.negative, np.negative)
verify_any_elemwise((relay.Any(), relay.Any()), (5, 4), relay.exp, np.exp)
verify_any_elemwise((relay.Any(),), (3,), relay.round, np.round)
@tvm.testing.uses_gpu
def test_any_broadcast_fail():
# Test broadcast with incompatible values at runtime
def check_fail(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
try:
verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op)
except tvm._ffi.base.TVMError:
pass
else:
assert False
check_fail((relay.Any(),), (3, 2), (1,), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 2), (4, 2), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, relay.Any()), (1, 2), (4, 1), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 3), (1, 3), (3, 3), relay.add, np.add)
check_fail((relay.Any(),), (3, 2), (2), (4, 2), relay.add, np.add)
def verify_any_full_like(x_shape, x_np_shape, relay_op, np_op, dtype="float32"):
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay_op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full_like():
# zeros_like, ones_like
verify_any_full_like(any_dims(3), (2, 3, 5), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(
any_dims(5), (10, 11, 12, 13, 14), relay.zeros_like, np.zeros_like, "int32"
)
verify_any_full_like(any_dims(3), (2, 3, 5), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(5), (10, 11, 12, 13, 14), relay.ones_like, np.ones_like, "int32")
def verify_any_full(x_np_shape, relay_op, np_op, dtype="float32", value=None):
x = relay.var("x", shape=(len(x_np_shape),), dtype="int32")
mod = tvm.IRModule()
out = relay_op(x, dtype) if value is None else relay_op(relay.expr.const(value), x, dtype)
mod["main"] = relay.Function([x], out)
res_np = np_op(x_np_shape) if value is None else np_op(x_np_shape, value)
x_np = np.array(x_np_shape).astype("int32")
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full():
# zeros, ones, full
verify_any_full((2, 3, 5), relay.zeros, np.zeros, "float32")
verify_any_full((225, 115, 15), relay.zeros, np.zeros, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.zeros, np.zeros, "int32")
verify_any_full((2, 3, 5), relay.ones, np.ones, "float32")
verify_any_full((225, 115, 15), relay.ones, np.ones, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.ones, np.ones, "int32")
verify_any_full((10, 11, 12, 13, 14), relay.full, np.full, "float32", 2.0)
verify_any_full((1, 2, 3, 4), relay.full, np.full, "int32", -2)
@tvm.testing.uses_gpu
def test_any_concat():
x = relay.var("x", shape=(relay.Any(), 2), dtype="float32")
y = relay.var("y", shape=(1, 2), dtype="float32")
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(3, 2)).astype("float32")
y_np = np.random.uniform(size=(1, 2)).astype("float32")
ref = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_result([x_np, y_np], mod, ref)
num_inputs = 25
x = [relay.var("x", shape=(relay.Any(),), dtype="float32") for _ in range(num_inputs)]
z = relay.op.concatenate(x, axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function(x, z)
x_np = [np.random.uniform(size=(1,)).astype("float32") for _ in range(num_inputs)]
ref = np.concatenate(x_np, axis=0)
check_result(x_np, mod, ref)
def test_oshape(in_vars, axis, oshape):
z = relay.op.concatenate(in_vars, axis=axis)
mod = tvm.IRModule()
mod["main"] = relay.Function(in_vars, z)
typed_mod = relay.transform.InferType()(mod)
assert typed_mod["main"].body.checked_type == relay.TensorType(oshape, dtype="float32")
x = [relay.var("x", shape=(relay.Any(), 3), dtype="float32") for _ in range(3)]
x.append(relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32"))
test_oshape(x, 0, (relay.Any(), 3))
test_oshape(x, 1, (relay.Any(), relay.Any()))
# [(1, 3), (1, ?)] -> (2, ?)
x = [
relay.var("x", shape=(1, 3), dtype="float32"),
relay.var("x", shape=(1, relay.Any()), dtype="float32"),
]
test_oshape(x, 0, (2, relay.Any()))
test_oshape(x, 1, (1, relay.Any()))
def verify_any_reshape(x_shape, newshape, x_np_shape, out_shape, variable_newshape=False):
x = relay.var("x", shape=x_shape, dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=x_np_shape).astype("float32")
expected = data.reshape(out_shape)
params = [x]
args = [data]
if variable_newshape:
newshape_var = relay.var("newshape", shape=(len(newshape),), dtype="int64")
params.append(newshape_var)
args.append(np.array(newshape, dtype="int64"))
newshape = newshape_var
y = relay.reshape(relu_x, newshape=newshape)
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
check_result(args, mod, expected)
@tvm.testing.uses_gpu
def test_any_reshape():
for variable_newshape in [False, True]:
# Variable newshape only supports that output rank is the same as newshape
verify_any_reshape(any_dims(3), (1, -1), (2, 3, 4), (1, 24), variable_newshape)
verify_any_reshape(any_dims(3), (0, -1), (2, 3, 4), (2, 12), variable_newshape)
verify_any_reshape(any_dims(3), (0, -2), (2, 3, 4), (2, 3, 4))
verify_any_reshape(any_dims(3), (-4, -1, 2, -3), (6, 3, 4), (3, 2, 12))
verify_any_reshape(any_dims(3), (-4, 2, -1, -2), (6, 3, 4), (2, 3, 3, 4))
verify_any_reshape(any_dims(3), (1, -1, 0), (2, 3, 4), (1, 6, 4))
verify_any_reshape(any_dims(3), (-1, 1, 0), (2, 3, 4), (6, 1, 4))
def verify_any_one_hot(indices_shape, indices_np_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", shape=indices_shape, dtype="int32")
on_value_const = relay.const(on_value, dtype)
off_value_const = relay.const(off_value, dtype)
y = relay.one_hot(indices, on_value_const, off_value_const, depth, axis=axis, dtype=dtype)
params = [indices]
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
indices_npy = np.random.randint(0, depth, size=indices_np_shape).astype("int32")
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
args = [indices_npy]
check_result(args, mod, out_npy)
@tvm.testing.uses_gpu
def test_any_one_hot():
verify_any_one_hot(any_dims(1), (3,), 3, 1, 0, -1, "int32")
verify_any_one_hot(any_dims(2), (2, 2), 5, 0.5, -0.5, 1, "float32")
verify_any_one_hot(any_dims(4), (3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
def verify_any_argwhere(x_shape, x_np_shape, dtype="bool"):
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.argwhere(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.choice([0, 1, 2, 3], size=x_np_shape).astype(dtype)
expected = np.argwhere(data)
check_result([data], mod, expected, flatten=True)
@tvm.testing.uses_gpu
def test_any_argwhere():
verify_any_argwhere(any_dims(1), (5,))
verify_any_argwhere(any_dims(2), (5, 5))
verify_any_argwhere(any_dims(2), (5, 5), "int32")
verify_any_argwhere(any_dims(2), (5, 5), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5))
verify_any_argwhere(any_dims(4), (5, 5, 5, 5))
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5))
verify_any_argwhere(any_dims(1), (5,), "int32")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int32")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(1), (5,), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int8")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int8")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int8")
def verify_any_take(data_shape, indices_shape, axis, data_np_shape, indices_np_shape):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype="float32")
indices = relay.var("indices", shape=indices_shape, dtype="int32")
y = relay.take(data, indices, axis=axis)
mod["main"] = relay.Function([data, indices], y)
data_np = np.random.uniform(size=data_np_shape).astype("float32")
if axis is None:
max_index = data_np.size
else:
max_index = data_np.shape[axis]
indices_np = np.random.randint(max_index, size=indices_np_shape).astype("int32")
ref = np.take(data_np, indices_np, axis=axis)
check_result([data_np, indices_np], mod, ref)
@tvm.testing.uses_gpu
def test_any_take():
verify_any_take(any_dims(2), (1,), 0, (4, 5), (1,))
verify_any_take(any_dims(2), (), 0, (4, 5), ())
verify_any_take(any_dims(2), (), None, (4, 5), ())
verify_any_take(any_dims(3), any_dims(2), 1, (3, 4, 5), (2, 3))
verify_any_take(any_dims(2), any_dims(3), None, (4, 5), (2, 3, 4))
verify_any_take(any_dims(2), any_dims(4), -1, (4, 5), (2, 3, 4, 5))
def verify_any_tile(dshape, reps, np_dshape, np_reps):
mod = tvm.IRModule()
x = relay.var("x", shape=dshape, dtype="float32")
y = relay.tile(x, reps=reps)
mod["main"] = relay.Function([x], y)
x_data = np.random.uniform(size=np_dshape).astype("float32")
ref_res = np.tile(x_data, reps=np_reps)
check_result([x_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_tile():
verify_any_tile(any_dims(3), (3, 2, 1), (2, 3, 4), (3, 2, 1))
verify_any_tile(any_dims(3), (1, 2), (2, 3, 4), (1, 2))
verify_any_tile(any_dims(2), (3, 2, 1), (2, 3), (3, 2, 1))
verify_any_tile(any_dims(3), (1,), (2, 3, 4), (1,))
@tvm.testing.uses_gpu
def test_any_shape_of():
x = relay.var("x", shape=any_dims(2), dtype="float32")
y = relay.shape_of(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.uniform(size=(3, 4)).astype("float32")
check_result([data], mod, np.array([3, 4]).astype("int64"))
x = relay.var("x", shape=any_dims(3), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(1, "int32"))
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
check_result([data], mod, np.array(3).astype("int64"))
class TestAnyReduce:
config = {
"argmax": (relay.argmax, any_dims(3), None, False, False, (3, 4, 5), ()),
"argmin": (relay.argmin, any_dims(4), 1, False, True, (3, 4, 5, 6), (3, 1, 5, 6)),
"all": (relay.all, any_dims(3), (1, 2), True, False, (3, 4, 5), (4, 5)),
"max": (relay.max, any_dims(4), -1, True, True, (3, 4, 5, 6), (1, 1, 1, 6)),
"min": (relay.min, any_dims(3), (0, 1), False, False, (4, 5, 6), (6,)),
"prod": (relay.prod, any_dims(4), 2, True, True, (3, 4, 5, 6), (1, 1, 5, 1)),
"mean": (relay.mean, any_dims(2), 0, False, False, (1, 2), (2,)),
"variance": (relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6)),
}
(
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
) = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_any_reduce(
self,
target,
dev,
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_op == relay.all:
pytest.xfail("Known failing test case for vulkan runtime")
mod = tvm.IRModule()
dtype = "bool" if reduce_op == relay.all else "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = reduce_op(data, axis, keepdims, exclude)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)])
def verify_any_layout_transform(
data_shape, src_layout, dst_layout, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.layout_transform(data, src_layout, dst_layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_layout_transform():
verify_any_layout_transform(any_dims(4), "NCHW", "NHWC", (3, 4, 5, 6), (3, 5, 6, 4))
verify_any_layout_transform(
any_dims(5), "NCHW16c", "NCHW2c", (1, 2, 8, 8, 16), (1, 16, 8, 8, 2)
)
verify_any_layout_transform(any_dims(5), "NCHW6n", "NHWC", (3, 4, 5, 6, 6), (18, 5, 6, 4))
verify_any_layout_transform(any_dims(4), "NCHW", "NCHW4c", (3, 4, 5, 6), (3, 1, 5, 6, 4))
verify_any_layout_transform((16, 1), "CH", "C4cH", (16, 1), (4, 4, 1))
def verify_any_expand_dims(data_shape, axis, num_newaxis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.expand_dims(data, axis=axis, num_newaxis=num_newaxis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_expand_dims():
verify_any_expand_dims(any_dims(3), 1, 2, (1, 2, 3), (1, 1, 1, 2, 3))
verify_any_expand_dims(any_dims(3), -1, 2, (1, 2, 3), (1, 2, 3, 1, 1))
def verify_any_transpose(data_shape, axes, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.transpose(data, axes=axes)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.transpose(data_np, axes)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_transpose():
verify_any_transpose(any_dims(3), (1, 0, 2), (10, 3, 2))
verify_any_transpose(any_dims(3), None, (2, 3, 4))
verify_any_transpose(any_dims(6), (0, 1, 3, 2, 5, 4), (11, 12, 2, 1, 9, 17))
verify_any_transpose(any_dims(2), (-1, 0), (3, 2))
def verify_any_squeeze(data_shape, axis, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.squeeze(data, axis=axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.squeeze(data_np, axis)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_squeeze():
verify_any_squeeze((relay.Any(), relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze((1, relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze(
(1, relay.Any(), relay.Any(), 1, relay.Any(), relay.Any()), (0, 3), (1, 12, 2, 1, 9, 17)
)
@tvm.testing.uses_gpu
def test_any_reshape_like():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=(relay.Any(), 3, 10), dtype=dtype)
shape_like = relay.var("data", shape=(relay.Any(), 5, 6), dtype=dtype)
y = relay.reshape_like(data, shape_like)
mod["main"] = relay.Function([data, shape_like], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
shape_like_np = np.random.uniform(size=(3, 5, 6)).astype(dtype)
check_result([data_np, shape_like_np], mod, shape_like_np.shape, assert_shape=True)
def verify_any_conv2d(
data_shape,
kernel_shape,
strides,
padding,
dilation,
static_data_shape,
ref_out_shape,
data_layout="NCHW",
kernel_layout="OIHW",
use_cudnn=False,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4] if kernel_layout == "OIHW" else kernel_shape[0:2],
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
targets = None
if use_cudnn and tvm.get_global_func("tvm.contrib.cudnn.conv2d.forward", True):
targets = [("cuda -libs=cudnn", tvm.cuda(0))]
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=targets)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d():
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(2, 2),
(2, 64, 224, 224),
(2, 64, 222, 222),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
use_cudnn=True,
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(1, 1),
(1, 224, 224, 64),
(1, 224, 224, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(2, 2),
(2, 224, 224, 64),
(2, 222, 222, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
class TestAnyConv2dNCHWc:
data_shape = tvm.testing.parameter((relay.Any(), 8, 224, 224, 8))
kernel_shape = tvm.testing.parameter((8, 8, 3, 3, 8, 8))
strides = tvm.testing.parameter((1, 1))
padding = tvm.testing.parameter((1, 1))
data_layout = tvm.testing.parameter("NCHW8c")
kernel_layout = tvm.testing.parameter("OIHW8i8o")
out_layout = tvm.testing.parameter("NCHW8c")
dilation, static_data_shape, ref_out_shape = tvm.testing.parameters(
((1, 1), (1, 8, 224, 224, 8), (1, 8, 224, 224, 8)),
((2, 2), (2, 8, 224, 224, 8), (2, 8, 222, 222, 8)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_conv2d_NCHWc(
self,
target,
dev,
data_shape,
kernel_shape,
strides,
padding,
dilation,
data_layout,
kernel_layout,
out_layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.contrib_conv2d_nchwc(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4],
channels=kernel_shape[0] * kernel_shape[-1],
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout=out_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result(
[data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
def verify_any_conv1d_transpose_ncw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv1d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_conv1d_transpose_ncw():
verify_any_conv1d_transpose_ncw(
(relay.Any(), 64, 224),
(64, 192, 3),
(1,),
(1,),
(1,),
1,
(2, 64, 224),
(2, 192, 224),
(0, 0),
)
verify_any_conv1d_transpose_ncw(
(relay.Any(), 32, 224),
(32, 64, 3),
(2,),
(1,),
(1,),
1,
(1, 32, 224),
(1, 64, 448),
(1, 1),
)
def verify_any_conv2d_transpose_nchw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:4],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d_transpose_nchw():
verify_any_conv2d_transpose_nchw(
(relay.Any(), 64, 224, 224),
(64, 192, 3, 3),
(1, 1),
(1, 1),
(1, 1),
1,
(2, 64, 224, 224),
(2, 192, 224, 224),
(0, 0),
)
verify_any_conv2d_transpose_nchw(
(relay.Any(), 32, 224, 224),
(32, 64, 3, 3),
(2, 2),
(1, 1),
(1, 1),
1,
(1, 32, 224, 224),
(1, 64, 448, 448),
(1, 1),
)
def verify_any_pool2d(
pool_type,
data_shape,
pool_size,
strides,
dilation,
padding,
layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.max_pool2d if pool_type == "max" else relay.nn.avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, pool_size, strides, dilation, padding, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_pool2d():
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any()),
(3, 3),
(1, 1),
(1, 1),
(1, 1),
"NCHW",
(2, 3, 220, 220),
(2, 3, 220, 220),
)
verify_any_pool2d(
"avg",
(relay.Any(), relay.Any(), relay.Any(), 4),
(1, 1),
(2, 2),
(1, 1),
(0, 0),
"NHWC",
(3, 220, 220, 4),
(3, 110, 110, 4),
)
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
(3, 3),
(2, 2),
(1, 1),
(1, 1),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 110, 110, 4),
)
def verify_any_global_pool2d(pool_type, data_shape, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.global_max_pool2d if pool_type == "max" else relay.nn.global_avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_global_pool2d():
verify_any_global_pool2d(
"max", (relay.Any(), 3, relay.Any(), relay.Any()), "NCHW", (2, 3, 220, 220), (2, 3, 1, 1)
)
verify_any_global_pool2d(
"avg", (relay.Any(), relay.Any(), relay.Any(), 4), "NHWC", (3, 220, 220, 4), (3, 1, 1, 4)
)
verify_any_global_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 1, 1, 4),
)
def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
mod["main"] = relay.Function([data], y.astuple())
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
for kind in ["vm"]:
result = relay.create_executor(kind, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
data_np
)
for ret, ref_ret in zip(result, ref_out_shape):
assert ret.numpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % (
str(ref_ret),
str(ret.numpy().shape),
)
@tvm.testing.uses_gpu
def test_any_split():
verify_any_split((relay.Any(), 4), 2, -1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 4), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), relay.Any()), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), 12), (8,), 1, (7, 12), [(7, 8), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (8,), 1, (7, 12), [(7, 8), (7, 4)])
@tvm.testing.uses_gpu
def test_any_batch_flatten():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=any_dims(3), dtype=dtype)
y = relay.nn.batch_flatten(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
ref_out_shape = (3, 30)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
# TODO(tvm-team) Fix dense schedule
@tvm.testing.known_failing_targets("cuda", "vulkan")
class TestAnyDense:
(
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
) = tvm.testing.parameters(
(any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8)),
(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=weight_shape, dtype=dtype)
y = relay.nn.dense(data, weight, units)
mod["main"] = relay.Function([data, weight], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
weight_np = np.random.uniform(size=static_weight_shape).astype(dtype)
check_result(
[data_np, weight_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
@tvm.testing.parametrize_targets("cuda -libs=cublas")
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense_cublas(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
):
self.test_any_dense(
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
)
class TestAnyBatchMatmul:
dtype = tvm.testing.parameter("float32")
executor_kind = tvm.testing.parameter("vm", "debug")
(x_shape, y_shape) = tvm.testing.parameters(
((1, 16, 32), (1, 32, 16)),
((5, 16, 32), (5, 32, 16)),
((5, 16, 32), (5, 32, 20)),
((30, 16, 32), (30, 32, 20)),
)
# any_x = tvm.testing.parameter("none", "batch")
# any_y = tvm.testing.parameter("none", "batch", "all")
any_x, any_y = tvm.testing.parameters(
("none", "batch"), ("none", "all"), ("batch", "none"), ("batch", "batch"), ("batch", "all")
)
transpose_x = tvm.testing.parameter(True, False)
transpose_y = tvm.testing.parameter(True, False)
@tvm.testing.fixture
def x_var_shape(self, x_shape, any_x):
if any_x == "none":
return x_shape
elif any_x == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate(x_shape))
elif any_x == "all":
return tuple(relay.Any() for _ in x_shape)
@tvm.testing.fixture
def y_var_shape(self, y_shape, any_y):
if any_y == "none":
return y_shape
elif any_y == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate(y_shape))
elif any_y == "all":
return tuple(relay.Any() for _ in y_shape)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_batch_matmul(
self,
target,
dev,
x_shape,
y_shape,
any_x,
any_y,
x_var_shape,
y_var_shape,
transpose_x,
transpose_y,
executor_kind,
dtype,
):
if transpose_x:
x_shape = (x_shape[0], x_shape[2], x_shape[1])
x_var_shape = (x_var_shape[0], x_var_shape[2], x_var_shape[1])
if transpose_y:
y_shape = (y_shape[0], y_shape[2], y_shape[1])
y_var_shape = (y_var_shape[0], y_var_shape[2], y_var_shape[1])
x = relay.var("x", relay.TensorType(x_var_shape, dtype))
y = relay.var("y", relay.TensorType(y_var_shape, dtype))
z = relay.nn.batch_matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
func = relay.Function([x, y], z)
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=transpose_x, trans_y=transpose_y)
mod = tvm.ir.IRModule.from_expr(func)
z = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate()(
x_np, y_np
)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5)
@tvm.testing.uses_gpu
def verify_any_pad(data_shape, pad_width, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.pad(data_np, pad_width)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_pad():
verify_any_pad(any_dims(3), ((0, 0), (1, 1), (2, 2)), (1, 2, 3))
verify_any_pad(any_dims(4), ((1, 0), (1, 3), (0, 2), (9, 0)), (13, 11, 3, 1))
def verify_any_dilate(data_shape, strides, static_data_shape, dilation_value=None):
assert len(data_shape) == len(strides)
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if dilation_value is None:
y = relay.nn.dilate(data, strides)
else:
y = relay.nn.dilate(data, strides, dilation_value)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_shape = tuple(
(static_data_shape[i] - 1) * strides[i] + 1 for i in range(len(static_data_shape))
)
if dilation_value is None:
dilation_value = 0.0
ref_out = np.ones(shape=ref_shape, dtype=dtype)
ref_out = dilation_value * ref_out
ref_out[tuple(slice(None, None, strides[i]) for i in range(len(data_shape)))] = data_np
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_dilate():
verify_any_dilate(any_dims(1), (1,), (1,))
verify_any_dilate(any_dims(1), (1,), (5,))
verify_any_dilate(any_dims(1), (5,), (5,))
verify_any_dilate(any_dims(3), (1, 1, 1), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 2), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 5), (1, 2, 3))
verify_any_dilate(any_dims(3), (3, 7, 5), (1, 2, 3))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4), 1.0)
def verify_any_softmax(data_shape, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.softmax(data, axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_softmax():
verify_any_softmax(any_dims(3), -1, (1, 2, 3), (1, 2, 3))
verify_any_softmax(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_relu(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.relu(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_relu():
verify_any_relu(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_relu(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_prelu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
alpha = relay.const(np.array([alpha]), dtype=dtype)
y = relay.nn.prelu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_prelu():
verify_any_prelu(any_dims(3), 1, (1, 2, 3), (1, 2, 3))
verify_any_prelu(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_leaky_relu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.leaky_relu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_leaky_relu():
verify_any_leaky_relu(any_dims(3), 0.1, (1, 2, 3), (1, 2, 3))
verify_any_leaky_relu(any_dims(4), 0.2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_bias_add(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
bias = relay.const(np.random.randn(1), dtype=dtype)
y = relay.nn.bias_add(data, bias)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_bias_add():
verify_any_bias_add(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_bias_add(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_topk(data_shape, kval, np_dshape, dtype, ret_type="indices", const_k=False):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype=dtype)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
if const_k:
k = relay.const(kval)
args = [data]
in_vals = [np_data]
else:
k = relay.var("k", shape=(), dtype="int32")
args = [data, k]
in_vals = [np_data, kval]
out = relay.topk(data, k, ret_type=ret_type)
if ret_type == "both":
out = out[0]
mod["main"] = relay.Function(args, out)
sorted = np.argsort(-np_data)
if len(np_dshape) == 2:
ref_out = sorted[:, 0:kval]
else:
ref_out = sorted[0:kval]
check_result(in_vals, mod, ref_out)
@tvm.testing.uses_gpu
def test_any_topk():
verify_any_topk(any_dims(1), 5, (10,), "float32")
verify_any_topk(any_dims(2), 2, (6, 3), "int32")
verify_any_topk(any_dims(2), 3, (6, 3), "float32", const_k=True)
verify_any_topk(any_dims(1), 0, (0,), "float32", ret_type="both")
def verify_any_get_valid_counts(num_anchor_real, dtype, targets=None):
mod = tvm.IRModule()
batch_size = 1
num_anchor = relay.Any()
data = relay.var("data", shape=(batch_size, num_anchor, 5), dtype=dtype)
np_data = np.random.uniform(size=(batch_size, num_anchor_real, 5)).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=np_data.shape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor_real))
score_threshold = 0.95
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor_real):
score = np_data[i, j, 0]
if score > score_threshold:
for k in range(5):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(5):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
z = relay.vision.get_valid_counts(data, score_threshold, 0, score_index=0)
mod["main"] = relay.Function([data], z.astuple())
check_result([np_data], mod, [np_out1, np_out2, np_out3], targets=targets)
@tvm.testing.uses_gpu
def test_any_get_valid_counts():
verify_any_get_valid_counts(10, "float32")
# opencl seems to have issues with empty size buffer
# Check failed: err_code == CL_SUCCESS == false: OpenCL Error,
# code=-61: CL_INVALID_BUFFER_SIZE
targets = []
for tgt, dev in tvm.testing.enabled_targets():
if "opencl" not in tgt:
targets.append((tgt, dev))
verify_any_get_valid_counts(0, "float32", targets=targets)
@tvm.testing.uses_gpu
def test_fused_ops():
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32")
y0 = x + relay.const(1.0, "float32")
y1 = y0 * relay.const(2.0, "float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(5, 4)).astype("float32")
check_result([data], mod, (data + 1) * 2)
@tvm.testing.uses_gpu
def test_arange_with_dynamic_shape():
# m, n, k = relay.ShapeVar('m'), relay.ShapeVar('n'), relay.ShapeVar('k')
m, n, k = relay.Any(), relay.Any(), relay.Any()
x = relay.var("x", shape=(m, n, k), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(0, "int32"))
y2 = relay.op.arange(y1, dtype="int32")
y3 = y2 + relay.const(1, dtype="int32")
data = np.random.rand(10, 5, 3).astype("float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y3)
check_result([data], mod, np.array(range(10)).astype("int32") + 1)
def verify_any_random_strided_slice(
begin_shape,
end_shape,
strides_shape,
data_shape,
slice_mode="end",
const_attrs=False,
):
# Generate random numpy input data
np_begin = np.random.randint(2, size=begin_shape, dtype="int32")
np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
np_strides = np.random.randint(
1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32"
)
verify_any_strided_slice(
np_begin, np_end, np_strides, data_shape, slice_mode=slice_mode, const_attrs=const_attrs
)
def verify_any_strided_slice(
np_begin,
np_end,
np_strides,
data_shape,
axes=None,
slice_mode="end",
const_attrs=False,
):
np_data = np.random.uniform(size=data_shape).astype("float32")
# target numpy result
ref_res = tvm.topi.testing.strided_slice_python(
np_data, np_begin, np_end, np_strides, slice_mode, axes
)
# Relay Module
mod = tvm.IRModule()
data = relay.var("data", shape=any_dims(len(data_shape)), dtype="float32")
if const_attrs:
begin = relay.const(np_begin)
end = relay.const(np_end)
strides = relay.const(np_strides)
args = [data]
np_inputs = [np_data]
else:
begin = relay.var("begin", shape=np_begin.shape, dtype="int32")
end = relay.var("end", shape=np_end.shape, dtype="int32")
strides = relay.var("strides", shape=np_strides.shape, dtype="int32")
args = [data, begin, end, strides]
np_inputs = [np_data, np_begin, np_end, np_strides]
y = relay.strided_slice(
data, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
mod["main"] = relay.Function(args, y)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_strided_slice():
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21))
verify_any_random_strided_slice((3,), (3,), (3,), (15, 17, 21))
verify_any_random_strided_slice((3,), (3,), (3,), (23, 29, 41))
verify_any_random_strided_slice((4,), (4,), (4,), (40, 50, 60, 70))
verify_any_random_strided_slice((3,), (3,), (3,), (15, 17, 21), slice_mode="size")
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21), const_attrs=True)
begin = np.array([0, 1000000]).astype("int32")
end = np.array([1000000, -1000000]).astype("int32")
strides = np.array([1, -1]).astype("int32")
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=False)
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=True)
verify_any_strided_slice(begin, end, strides, (15, 17, 21), axes=[0, 2], const_attrs=True)
@tvm.testing.uses_gpu
def test_recursive_concat():
"""
fn @concat_loop(%i: int32, %st: (any, 1)) -> (any, 1) {
if (%i < 10) {
let %i = reshape(cast(i, "float32"), newshape=(1, ))
let %new_st = concatenate((st, i), axis=0)
concat_loop(%i + 1, )
} else {
st
}
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(relay.Any(), 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
mod = tvm.IRModule()
mod["main"] = func
data = np.array(0.0, dtype="int32")
ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32")
check_result([data], mod, ref)
@tvm.testing.uses_gpu
def test_recursive_concat_with_wrong_annotation():
"""
v0.0.1
fn (%start: int32) {
%7 = {
let %while_loop = fn (%i: int32, %st: Tensor[(1, 1), int32]) {
%0 = less(%i, 10)
%1 = min(%0)
if (%1) {
%2 = add(%i, 1)
%3 = reshape(%i, newshape=[1, 1])
%4 = (%st, %3)
/* The result of concat should be 1,1 but it is 2, 1. */
%5 = concatenate(%4)
%while_loop(%2, %5)
} else {
(%i, %st)
}
}
%6 = reshape(0, newshape=[1, 1])
%while_loop(%start, %6)
}
%7.1
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(1, 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
with DiagnosticTesting() as diagnostics:
diagnostics.assert_message(
"The Relay type checker is unable to show the following types match:\n"
" Tensor[(2, 1), int32]\n"
" Tensor[(1, 1), int32]\n"
"In particular:\n"
" dimension 0 conflicts: 2 does not match 1."
)
func = infer_type(func)
@tvm.testing.uses_gpu
def test_tuple_get_item():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
indices_or_sections = 2
axis = 1
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
y = relay.expr.TupleGetItem(y.astuple(), 0)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 2)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_mixed_input_type():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
tensor_type = relay.TensorType(data_shape, dtype)
tuple_type = relay.TupleType([tensor_type, tensor_type])
data0 = relay.var("d0", type_annotation=relay.TupleType([tuple_type, tensor_type]))
data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
data_tuple = relay.expr.TupleWrapper(data0, 2)
nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
y = nested_data_tuple[1] * data_tuple[1] + data1
mod["main"] = relay.Function([data0, data1], y)
data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 4)
check_result(
[[[data_np0, data_np0], data_np0], data_np1],
mod,
ref_out_shape,
assert_shape=True,
only_vm=True,
)
def verify_any_crop_and_resize(
data_shape,
boxes_shape,
box_indices_shape,
crop_size,
layout,
static_boxes,
static_box_indices_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
indices_dtype = "int32"
data = relay.var("data", shape=data_shape, dtype=dtype)
boxes = relay.var("boxes", shape=boxes_shape, dtype=dtype)
box_indices = relay.var("box_indices", shape=box_indices_shape, dtype=indices_dtype)
y = relay.image.crop_and_resize(data, boxes, box_indices, crop_size, layout)
mod["main"] = relay.Function([data, boxes, box_indices], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
check_result([data_np, boxes_np, box_indices_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_crop_and_resize():
verify_any_crop_and_resize(
data_shape=(1, 234, 234, 256),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NHWC",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 14, 14, 256),
)
verify_any_crop_and_resize(
data_shape=(1, 256, 234, 234),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NCHW",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 256, 14, 14),
)
def verify_any_mirror_pad(data_shape, pad_width, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.mirror_pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_mirror_pad():
verify_any_mirror_pad(
data_shape=(1, 256, 232, 232),
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
static_data_shape=(1, 256, 232, 232),
ref_out_shape=(1, 256, 234, 234),
)
def verify_any_ndarray_size(data_np_shape):
v = relay.var("v", shape=any_dims(len(data_np_shape)), dtype="float32")
n = relay.ndarray_size(v, dtype="int32")
mod = tvm.IRModule()
mod["main"] = relay.Function([v], n)
np_data = np.zeros(data_np_shape, dtype="float32")
ref_res = np.size(np_data)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_ndarray_size():
verify_any_ndarray_size((2,))
verify_any_ndarray_size((2, 2))
verify_any_ndarray_size((1, 2, 3, 4))
def verify_any_resize2d(data_shape, scale, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if layout == "NHWC":
size = (data_shape[1] * scale, data_shape[2] * scale)
else:
size = (data_shape[2] * scale, data_shape[3] * scale)
y = relay.image.resize2d(data, size, None, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_resize():
verify_any_resize2d(
data_shape=(relay.Any(), 4, 4, 4),
scale=2,
layout="NHWC",
static_data_shape=(1, 4, 4, 4),
ref_out_shape=(1, 8, 8, 4),
)
verify_any_resize2d(
data_shape=(relay.Any(), 8, 17, 20),
scale=3,
layout="NCHW",
static_data_shape=(2, 8, 17, 20),
ref_out_shape=(2, 8, 51, 60),
)
def verify_any_grid_sample(data_shape, grid_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
grid = relay.var("grid", shape=grid_shape, dtype=dtype)
y = relay.image.grid_sample(data, grid)
mod["main"] = relay.Function([data, grid], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape).astype(dtype)
check_result([data_np, grid_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_grid_sample():
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 8, 8),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 8, 8),
)
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 32, 32),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 32, 32),
)
def verify_any_affine_grid(num_batch, static_num_batch, target_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data_shape = (num_batch, 2, 3)
static_data_shape = (static_num_batch, 2, 3)
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.image.affine_grid(data, target_shape)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_affine_grid():
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=1,
target_shape=(16, 32),
ref_out_shape=(1, 2, 16, 32),
)
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=8,
target_shape=(32, 32),
ref_out_shape=(8, 2, 32, 32),
)
def test_any_consecutive_broadcast():
dtype = "float32"
data0 = relay.var("data0", shape=any_dims(2), dtype=dtype)
data1 = relay.var("data1", shape=any_dims(2), dtype=dtype)
data2 = relay.var("data2", shape=any_dims(2), dtype=dtype)
data3 = relay.var("data3", shape=any_dims(2), dtype=dtype)
out0 = data0 + data1
out1 = data0 * data1
out2 = out0 - out1
out3 = data2 + data3
out4 = data2 * data3
out5 = out3 - out4
out6 = out2 * out5
mod = tvm.IRModule()
mod["main"] = relay.Function([data0, data1, data2, data3], out6)
np_data0 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 4)).astype(dtype)
np_data2 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data3 = np.random.uniform(size=(2, 4)).astype(dtype)
ref_res = ((np_data0 + np_data1) - (np_data0 * np_data1)) * (
(np_data2 + np_data3) - (np_data2 * np_data3)
)
check_result([np_data0, np_data1, np_data2, np_data3], mod, ref_res)
def test_reshape_concat():
dtype = "float32"
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate([relay.op.reshape(d0, [-1]), relay.op.reshape(d1, [-1])], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 5, 2)).astype(dtype)
ref_res = np.concatenate([np.reshape(np_data0, [-1]), np.reshape(np_data1, [-1])], axis=0)
check_result([np_data0, np_data1], mod, ref_res)
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(2), dtype=dtype)
s0 = relay.var("s0", shape=any_dims(3), dtype=dtype)
s1 = relay.var("s1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate(
[relay.op.reshape_like(d0, s0), relay.op.reshape_like(d1, s1)], axis=0
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1, s0, s1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(8, 5)).astype(dtype)
np_shape_like0 = np.random.uniform(size=(2, 2, 5)).astype(dtype)
np_shape_like1 = np.random.uniform(size=(4, 2, 5)).astype(dtype)
ref_res = np.concatenate(
[np.reshape(np_data0, np_shape_like0.shape), np.reshape(np_data1, np_shape_like1.shape)],
axis=0,
)
check_result([np_data0, np_data1, np_shape_like0, np_shape_like1], mod, ref_res)
def test_any_adv_index():
data = relay.var("data", shape=(5, relay.Any(), relay.Any()), dtype="float32")
index0 = relay.var("index0", shape=(1, relay.Any()), dtype="int64")
index1 = relay.var("index1", shape=(relay.Any(), 1), dtype="int64")
out = relay.adv_index([data, index0, index1])
mod = tvm.IRModule()
mod["main"] = relay.Function([data, index0, index1], out)
np_data_shape = (5, 5, 10)
np_index0_shape = (1, 4)
np_index1_shape = (4, 1)
np_data = np.random.uniform(size=np_data_shape).astype("float32")
np_index0 = np.random.uniform(0, np_data_shape[0], size=np_index0_shape).astype("int64")
np_index1 = np.random.uniform(0, np_data_shape[0], size=np_index1_shape).astype("int64")
ref_res = np_data[tuple([np_index0, np_index1])]
print(ref_res.shape)
check_result([np_data, np_index0, np_index1], mod, ref_res)
def verify_any_repeat(data_shape, np_dshape, repeats, axis):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.repeat(data, repeats, axis)
mod["main"] = relay.Function([data], y)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
ref_res = np.repeat(np_data, repeats, axis)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_repeat():
verify_any_repeat(any_dims(2), (1, 2), 2, 0)
verify_any_repeat(any_dims(1), (3,), 3, -1)
verify_any_repeat(any_dims(4), (2, 1, 1, 4), 4, 2)
def verify_any_stack(data_shape, np_dshape, num_data, axis):
mod = tvm.IRModule()
dtype = "float32"
inputs = []
for i in range(num_data):
inputs.append(relay.var("data{}".format(i), shape=data_shape, dtype=dtype))
y = relay.stack(inputs, axis)
mod["main"] = relay.Function(inputs, y)
np_inputs = []
for _ in range(num_data):
np_inputs.append(np.random.uniform(size=np_dshape).astype(dtype))
ref_res = np.stack(np_inputs, axis)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_stack():
verify_any_stack(any_dims(2), (1, 2), 3, 0)
verify_any_stack(any_dims(1), (3,), 4, -1)
verify_any_stack(any_dims(4), (2, 1, 1, 4), 2, 2)
def verify_any_where(
cond_shape, x_shape, y_shape, cond_np_shape, x_np_shape, y_np_shape, y_np_shape_invalid=None
):
dtype = "float32"
cond = relay.var("cond", shape=cond_shape, dtype="bool")
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
z = relay.where(cond, x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([cond, x, y], z)
cond_np = np.random.randn(*cond_np_shape) > 0
x_np = np.random.randn(*x_np_shape).astype(dtype)
y_np = np.random.randn(*y_np_shape).astype(dtype)
expected = np.where(cond_np, x_np, y_np)
check_result([cond_np, x_np, y_np], mod, expected)
# verify invalid broadcasting check
if y_np_shape_invalid:
y_np_bad = np.random.randn(*y_np_shape_invalid).astype(dtype)
try:
check_result([cond_np, x_np, y_np_bad], mod, expected)
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
assert "Invalid broadcast shapes" in error_msg
@tvm.testing.uses_gpu
def test_any_where():
verify_any_where(any_dims(1), (5,), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (5,), (5,))
verify_any_where((5,), any_dims(1), any_dims(1), (5,), (5,), (5,))
# where with broadcast
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (1,), (5,))
verify_any_where(any_dims(1), any_dims(2), any_dims(2), (5,), (5, 5), (5, 5))
verify_any_where(any_dims(1), any_dims(1), any_dims(2), (5,), (5,), (5, 5))
verify_any_where(
any_dims(2), any_dims(2), any_dims(2), (3, 4), (3, 1), (1, 4), y_np_shape_invalid=(2, 4)
)
# Test scalar where in a dynamically shaped graph
x = relay.var("x", shape=any_dims(1), dtype="int64")
y = relay.var("y", shape=any_dims(2), dtype="float32")
left = relay.take(x, relay.const(1, dtype="int32")) + relay.const(4, "int64")
right = relay.const(4, "int64")
where = relay.where(relay.const(False, "bool"), left, right)
z = relay.take(y, where, axis=1)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.randn(2).astype("int64")
y_np = np.random.randn(2, 6).astype("float32")
expected = y_np[:, 4]
check_result([x_np, y_np], mod, expected)
@tvm.testing.uses_gpu
def test_non_max_suppression():
x0 = relay.var("x0", relay.ty.TensorType((1, relay.Any(), 6), "float32"))
x1 = relay.var("x1", relay.ty.TensorType((1,), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((1, relay.Any()), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=0.5,
force_suppress=True,
top_k=2,
return_indices=True,
invalid_to_bottom=False,
)
z = z.astuple()
func = relay.Function([x0, x1, x2, x3], z)
mod = tvm.IRModule()
mod["main"] = func
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_valid_box_count = np.array([[2]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
np_data = np.zeros((1, 0, 6)).astype("float32")
np_valid_count = np.array([0]).astype("int32")
np_indices = np.zeros((1, 0)).astype("int32")
np_max_output_size = -1
np_indices_result = np.zeros((1, 0))
np_valid_box_count = np.array([[0]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression():
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="onnx",
):
batch_size = boxes_np.shape[0]
num_classes = scores_np.shape[1]
num_boxes = relay.Any()
boxes = relay.var("boxes", relay.ty.TensorType((batch_size, num_boxes, 4), "float32"))
scores = relay.var(
"scores", relay.ty.TensorType((batch_size, num_classes, num_boxes), "float32")
)
nms_out = relay.vision.all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_format
)
if output_format == "onnx":
three = relay.const(np.array([3]), dtype="int64")
begin = relay.const(np.array([0, 0]), dtype="int64")
end = relay.op.concatenate([nms_out[1], three], axis=0)
strides = relay.const(np.array([1, 1]), dtype="int64")
out = relay.op.strided_slice(nms_out[0], begin, end, strides)
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, [expected])
else:
out = nms_out.tuple_value
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, expected)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.5, 0.5, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.8, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.4
expected = np.array([[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
expected = [
np.array(
[[[0, 4], [0, 2], [1, 4], [1, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
),
np.array(
[
[
0.9,
0.6,
0.9,
0.8,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
]
),
np.array([4]),
]
verify_all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="tensorflow",
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 0.9, 1.2],
]
]
).astype(np.float32)
scores = np.array([[[0.2, 0.3], [0.3, 0.2]]]).astype(np.float32)
iou_threshold = 0.3
score_threshold = 0.15
expected = np.array([[0, 0, 1], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
# zero box detection case
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
]
]
).astype(np.float32)
scores = np.array([[[0.2]]]).astype(np.float32)
score_threshold = 0.4
expected = np.zeros((0, 3))
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(data_shape, indices_shape, data_shape_np, indices_shape_np, batch_dims=0):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather_nd(x, y, batch_dims=batch_dims, index_rank=indices_shape[0])
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = ref_funcs.gather_nd(data_np, indices_np, batch_dims)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather_nd((2, 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_nd((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_nd((relay.Any(), 2), (1, relay.Any()), (10, 2), (1, 10), 1)
verify_gather_nd(
(relay.Any(), 2, 2, 3, 4), (3, relay.Any(), relay.Any()), (3, 2, 2, 3, 4), (3, 3, 2), 2
)
@tvm.testing.uses_gpu
def test_scatter_nd():
def verify_scatter_nd(data_np, indices_np, updates_np, ref_res):
indices_shape = (2, relay.Any())
updates_shape = (relay.Any(),)
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", relay.TensorType(indices_shape, str(indices_np.dtype)))
updates = relay.var("updates", relay.TensorType(updates_shape, str(updates_np.dtype)))
out = relay.op.scatter_nd(data, indices, updates, "add")
mod = tvm.IRModule()
mod["main"] = relay.Function([data, indices, updates], out)
check_result([data_np, indices_np, updates_np], mod, [ref_res])
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data_shape, indices_shape, data_shape_np, indices_shape_np, axis):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather(x, axis, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = tvm.topi.testing.gather_python(data_np, axis, indices_np)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather((relay.Any(),), (relay.Any(),), (10,), (10,), 0)
verify_gather((2, 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), relay.Any()), (relay.Any(), relay.Any()), (2, 3), (1, 3), 0)
@tvm.testing.uses_gpu
def test_searchsorted():
def verify_searchsorted(
sorted_sequence_shape, values_shape, sorted_sequence_shape_np, values_shape_np
):
x = relay.var("x", relay.TensorType(sorted_sequence_shape, "float32"))
y = relay.var("y", relay.TensorType(values_shape, "float32"))
z = relay.searchsorted(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.sort(np.random.uniform(size=sorted_sequence_shape_np).astype("float32"), axis=-1)
y_np = np.random.uniform(size=values_shape_np).astype("float32")
ref_res = searchsorted_ref(x_np, y_np, False, "int32")
check_result([x_np, y_np], mod, [ref_res])
for shape_np, values_shape_np in zip([(8, 9, 10), (10,), (11,)], [(8, 9, 20), (5,), (8, 9, 7)]):
sorted_sequence_shape = (relay.Any(),) * len(shape_np)
values_shape = (relay.Any(),) * len(values_shape_np)
verify_searchsorted(
sorted_sequence_shape,
values_shape,
shape_np,
values_shape_np,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout rewrite support for whole neural networks"""
import sys
import tempfile
import pytest
import numpy as np
import tvm
from tvm import relay, auto_scheduler
from tvm.contrib import graph_executor
import tvm.testing
def get_np_array(var, dtype):
return np.random.randn(*[int(x) for x in var.type_annotation.shape]).astype(dtype)
def get_relay_conv2d(
outc=32,
inc=32,
height=14,
width=14,
kh=3,
kw=3,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NHWC",
):
dtype = "float32"
if layout == "NHWC":
kernel_layout = "HWIO"
d = relay.var("data", shape=(batch, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kh, kw, inc, outc), dtype=dtype)
elif layout == "NCHW":
kernel_layout = "OIHW"
d = relay.var("data", shape=(batch, inc, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kh, kw), dtype=dtype)
y = relay.nn.conv2d(
d,
w,
padding=pad,
kernel_size=(kh, kw),
strides=(stride, stride),
dilation=(dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_conv3d(
outc=8,
inc=8,
depth=8,
height=7,
width=7,
kd=1,
kh=1,
kw=1,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NDHWC",
):
dtype = "float32"
if layout == "NDHWC":
kernel_layout = "DHWIO"
d = relay.var("data", shape=(batch, depth, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kd, kh, kw, inc, outc), dtype=dtype)
elif layout == "NCDHW":
kernel_layout = "OIDHW"
d = relay.var("data", shape=(batch, inc, depth, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kd, kh, kw), dtype=dtype)
y = relay.nn.conv3d(
d,
w,
padding=pad,
kernel_size=(kd, kh, kw),
strides=(stride, stride, stride),
dilation=(dilation, dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_dense(m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(m, k), dtype=dtype)
w = relay.var("weight", shape=(n, k), dtype=dtype)
y = relay.nn.dense(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_batchmm(batch=4, m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(batch, m, k), dtype=dtype)
w = relay.var("weight", shape=(batch, n, k), dtype=dtype)
y = relay.nn.batch_matmul(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def tune_and_check(mod, data, weight, target, dev):
# Extract tasks from a relay program
tasks, task_weights = auto_scheduler.extract_tasks(
mod, target=target, params={"weight": weight}
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
# Tune tasks
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=1,
num_measures_per_round=1,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
# Compile
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay.build(mod, target=target, params={"weight": weight})
# Compile without auto-scheduler for correctness check
with tvm.transform.PassContext(opt_level=0):
lib2 = relay.build(mod, target=target, params={"weight": weight})
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
# Check correctness
actual_output = get_output(data, lib)
expected_output = get_output(data, lib2)
tvm.testing.assert_allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
# layout rewriting only works on CPU targets
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d(target, dev):
mod, data, weight = get_relay_conv2d(kh=1, kw=1)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d_winograd(target, dev):
mod, data, weight = get_relay_conv2d(outc=128, kh=3, kw=3)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv3d(target, dev):
mod, data, weight = get_relay_conv3d()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_dense(target, dev):
mod, data, weight = get_relay_dense()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_batch_matmul(target, dev):
mod, data, weight = get_relay_batchmm()
tune_and_check(mod, data, weight, target, dev)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_auto_scheduler_task_extraction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test task extraction for auto-scheduler"""
import json
import tempfile
import pytest
import tvm.relay.testing
import tvm.testing
from tvm import _ffi as _ffi_api
from tvm import auto_scheduler, relay
def get_network(name, batch_size=1, layout="NHWC"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefer NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
elif layout == "NCDHW":
image_shape = (3, 16, 224, 224)
elif layout == "NDHWC":
image_shape = (3, 224, 224, 16)
else:
raise ValueError("Invalid layout: " + layout)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet-50":
mod, params = relay.testing.resnet.get_workload(
num_layers=50, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "winograd-test":
input_shape = [1, 23, 40, 32]
data = relay.var("data", shape=input_shape, dtype="float32")
net = relay.testing.layers.conv2d(
data=data,
channels=128,
kernel_size=3,
strides=1,
padding=1,
data_layout="NHWC",
kernel_layout="HWIO",
name="",
)
bias = relay.var("conv1_bias")
net = relay.nn.bias_add(net, bias, 3)
net = relay.nn.relu(net)
mod, params = relay.testing.create_workload(net)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size, layout=layout)
elif name == "mlp":
data = relay.var("data", shape=(batch_size, 32))
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=32)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=32)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
mlp = act2
args = relay.analysis.free_vars(act2)
mlp = relay.Function(args, mlp)
mod, params = relay.testing.init.create_workload(mlp)
else:
raise ValueError("Unsupported network: " + name)
return mod, params
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"params",
[
("mlp", "NHWC", 1, 2),
("resnet-18", "NHWC", 24, 25),
("resnet-18", "NCHW", 24, 25),
("mobilenet", "NHWC", 22, 30),
("mobilenet", "NCHW", 22, 30),
("resnet3d-18", "NCDHW", 23, 24),
("resnet3d-18", "NDHWC", 23, 24),
],
)
def test_task_extraction_cuda(params):
target = tvm.target.Target("cuda")
network, layout, expected_task, expected_weights = params
mod, params = get_network(network, layout=layout)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for task, weight in zip(tasks, task_weights):
print(task.desc, task.workload_key, weight)
assert len(tasks) == expected_task
assert sum(task_weights) == expected_weights
@pytest.mark.parametrize(
"params",
[
# Relay FuseOps puts two conv2ds to separate functions and results in two tasks.
("basic_func", 2, False),
# Relay FuseOps will not break the primitive function and result in one task.
("fused_func", 1, False),
# The Relay function without complex ops will not form a task by default.
("simple_func", 0, False),
# Every Relay function becomes a task regardless what ops in its body.
("simple_func", 1, True),
# The Relay function without any reduce op is considered as a simple task.
("shape_of_func", 0, False),
("shape_of_func", 1, True),
# The Relay function with dynamic shape inputs/outputs will not be extracted.
("dyn_shape_func", 0, False),
# The Conv2D in the Relay function with control flow could still be a task.
# Also, two identical Conv2D should only be one task with weight=2.
("control_flow_func", 1, False),
# The first function with unsupported op (NMS) will not be extracted.
("func_w_unsupported_op", 1, True),
],
)
def test_task_extraction_cpu(params):
ishape = (1, 3, 224, 224)
w1shape = (32, 3, 3, 3)
w2shape = (32, 32, 3, 3)
dtype = "float32"
target = tvm.target.Target("llvm")
def get_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
conv2d = relay.nn.conv2d(data, weight1, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
conv2d = relay.nn.conv2d(relu, weight2, kernel_size=(3, 3), padding=(1, 1))
out = relay.nn.relu(conv2d)
return relay.Function([data, weight1, weight2], out)
def get_fused_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
fused_func = get_func()
# Set to primitive to keep fuse_ops untouch.
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
call = relay.Call(fused_func, [data, weight1, weight2])
return relay.Function([data, weight1, weight2], call)
def get_simple_func():
data = relay.var("data", relay.TensorType((1, 2, 3), "float32"))
out = relay.image.affine_grid(data, (150, 150))
return relay.Function([data], out)
def get_shape_of_func():
data = relay.var("data", shape=(relay.Any(), 28, 28), dtype="float32")
out = relay.shape_of(data)
return relay.Function([data], out)
def get_func_with_dynamic_shape():
data = relay.var("data", shape=(relay.Any(), 32), dtype="float32")
out = relay.max(data)
return relay.Function(relay.analysis.free_vars(out), out)
def get_func_with_control_flow():
data = relay.var("data", shape=(1, 3, 224, 224))
weight = relay.var("weight", shape=(3, 3, 3, 3))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.zeros(shape=(1, 3, 224, 224), dtype="float32")
false_branch = relay.nn.conv2d(data, weight, kernel_size=(3, 3), channels=3, padding=(1, 1))
false_branch = relay.nn.conv2d(
false_branch, weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
return relay.Function([data, weight, eq1, eq2], out)
def get_func_with_unsupported_op():
def get_postproc_func():
data = relay.var("data", shape=((1, 3, 6)), dtype=dtype)
out = relay.nn.relu(data)
func = relay.Function([data], out)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return func
cls_prob = relay.var("cls_prob", relay.ty.TensorType((1, 3, 3), "float32"))
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, 3 * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, 3, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
out = relay.Call(get_postproc_func(), [nms])
return relay.Function([cls_prob, loc_pred, anchors], out)
func_map = {
"basic_func": get_func,
"fused_func": get_fused_func,
"simple_func": get_simple_func,
"shape_of_func": get_shape_of_func,
"dyn_shape_func": get_func_with_dynamic_shape,
"control_flow_func": get_func_with_control_flow,
"func_w_unsupported_op": get_func_with_unsupported_op,
}
def verify_task_extraction(func_name, expected_task, include_simple_tasks=False):
func = func_map[func_name]()
mod = tvm.IRModule.from_expr(func)
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"], None, target, include_simple_tasks=include_simple_tasks
)
assert len(tasks) == expected_task
assert len(task_weights) == expected_task
verify_task_extraction(*params)
def test_dump_workload_to_dag_extract_tasks():
mod, _ = get_network("mobilenet", layout="NHWC")
with tempfile.NamedTemporaryFile() as f:
tasks, _ = auto_scheduler.extract_tasks(
mod["main"], None, "llvm", include_simple_tasks=True, dump_workload_to_dag_log=f.name
)
expected = {task.workload_key: str(task.compute_dag) for task in tasks}
actual = json.load(f)
assert expected == actual
def test_custom_hash_func_extract_tasks():
@_ffi_api.register_func("auto_scheduler.compute_dag.hash_func")
def counting_unique_hash(str_dag):
ret = counting_unique_hash.i
counting_unique_hash.i += 1
return ret
counting_unique_hash.i = 0
mod, _ = get_network("mobilenet", layout="NHWC")
tasks, _ = auto_scheduler.extract_tasks(mod["main"], None, "llvm", include_simple_tasks=True)
hash_values = []
for task in tasks:
# task.workload_key should look like
# [43, [3, 3, 1024, 1], [1024], [3, 3, 1024, 1]] where the first int is the result of the hash
# Extract the hash and keep track of every hash
hash_value = int(task.workload_key[1:].split(",")[0])
hash_values.append(hash_value)
# All values are unique, and we know the min and max
# This is a sufficient condition to know that hashes in hash_values are an increasing list
# of hashes up to counting_unique_hash.i - 1
assert len(hash_values) == len(set(hash_values))
assert min(hash_values) == 0
assert max(hash_values) == counting_unique_hash.i - 1
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_auto_scheduler_tuning.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test end-to-end network tuning with auto-scheduler"""
import tempfile
import numpy as np
from tvm import auto_scheduler, relay
from tvm.contrib import graph_executor
import tvm.testing
from test_auto_scheduler_task_extraction import get_network
def tune_network(network, target):
# Extract tasks
mod, params = get_network(network)
target = tvm.target.Target(target)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
# Tuning
measure_ctx = auto_scheduler.LocalRPCMeasureContext(timeout=60, device=0)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=100,
num_measures_per_round=2,
early_stopping=1,
runner=measure_ctx.runner,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
del measure_ctx
# Compile with the history best
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target=target, params=params)
# Also test that multiple log files can be loaded.
with auto_scheduler.ApplyHistoryBest([log_file, log_file]) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to load multiple log files jointly."
# Confirm iterables can be directly loaded.
loaded_recs = auto_scheduler.dispatcher.load_records(log_file)
with auto_scheduler.ApplyHistoryBest(iter(loaded_recs)) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to ingest logs from an interator."
# Sample a schedule when missing
with auto_scheduler.ApplyHistoryBestOrSample(None, num_measure=2):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib2 = relay.build(mod, target=target, params=params)
# Compile without auto-scheduler and any other optimization for correctness check
with tvm.transform.PassContext(opt_level=0):
ref_lib = relay.build(mod, target=target, params=params)
# Check the correctness
def get_output(data, lib):
dev = tvm.cuda()
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
np.random.seed(0)
if network == "mlp":
data = np.random.uniform(size=(1, 32))
elif network == "winograd-test":
data = np.random.uniform(size=(1, 23, 40, 32))
else:
raise ValueError("Unknown network: " + network)
actual_output1 = get_output(data, lib)
actual_output2 = get_output(data, lib2)
expected_output = get_output(data, ref_lib)
tvm.testing.assert_allclose(actual_output1, expected_output, rtol=1e-4, atol=1e-4)
tvm.testing.assert_allclose(actual_output2, expected_output, rtol=1e-4, atol=1e-4)
@tvm.testing.requires_cuda
def test_tuning_cuda():
tune_network("mlp", "cuda")
tune_network("winograd-test", "cuda")
if __name__ == "__main__":
test_tuning_cuda()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_autotvm_task_extraction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test task extraction for autotvm"""
import tvm.relay.testing
from tvm import relay
from tvm import autotvm
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=batch_size)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(num_layers=18, batch_size=batch_size)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size)
input_shape = (batch_size, 100)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape
def test_task_extraction():
target = "llvm"
mod_list = []
params_list = []
conv2d = relay.op.get("nn.conv2d")
conv3d = relay.op.get("nn.conv3d")
conv2d_transpose = relay.op.get("nn.conv2d_transpose")
dense = relay.op.get("nn.dense")
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d,)
)
assert len(tasks) == 12
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv2d,))
assert len(tasks) == 12
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(dense,)
)
assert len(tasks) == 2
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 2
mod, params, _ = get_network("resnet-18", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(mod, target=target, params=params)
assert len(tasks) == 14
mod, params, _ = get_network("resnet3d-18", batch_size=1)
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv3d,))
assert len(tasks) == 12
mod, params, _ = get_network("mobilenet", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 21
mod, params, _ = get_network("dcgan", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d_transpose,)
)
assert len(tasks) == 4
tasks = autotvm.task.extract_from_multiple_program(
mod_list, params_list, target=target, ops=(conv2d,)
)
assert len(tasks) == 31
def test_task_extraction_for_dense_int8_cuda():
target = "cuda"
dense = relay.op.get("nn.dense")
def get_net(batch, in_dim, out_dim, dtype, out_dtype):
data = tvm.relay.var("data", shape=[batch, in_dim], dtype=dtype)
weight = tvm.relay.var("weight", shape=[out_dim, in_dim], dtype=dtype)
out = relay.nn.dense(data, weight, out_dtype=out_dtype)
mod, params = relay.testing.create_workload(out)
return mod, params
mod, params = get_net(1, 16, 32, "float32", "float32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_small_batch.gpu"
mod, params = get_net(1, 16, 32, "int8", "int32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_int8.cuda"
if __name__ == "__main__":
test_task_extraction()
test_task_extraction_for_dense_int8_cuda()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_backend_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from unittest.mock import patch
import tvm
import json
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.op import add
import tvm.testing
from tvm.relay.testing import mlp
from tvm import rpc
from tvm.contrib import utils
# @tq, @jr should we put this in testing ns?
def check_rts(expr, args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on both the evaluator and TVM runtime.
Parameters
----------
expr:
The expression to evaluate
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
eval_result = relay.create_executor("debug", mod=mod).evaluate(expr)(*args)
rts_result = relay.create_executor("graph", mod=mod).evaluate(expr)(*args)
tvm.testing.assert_allclose(eval_result.numpy(), rts_result.numpy())
tvm.testing.assert_allclose(eval_result.numpy(), expected_result)
def test_add_op_scalar():
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=()) # Default to float32
y = relay.var("y", shape=()) # Default to float32
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")),
(np.float32(10.0), np.float32(1.0)),
(10.0, 1.0),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar_int():
"""
test_add_op_scalar_int:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(), dtype="int32")
y = relay.var("y", shape=(), dtype="int32")
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")),
(np.int32(10), np.int32(1)),
(10, 1),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_tensor():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(10, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_with_params():
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).numpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
def test_plan_memory():
# it is sufficient to cycle through two memories.
x = relay.var("x", shape=(10,))
y = relay.var("x", shape=(1,))
y2 = relay.exp(y)
z = relay.add(x, y2)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(0)(mod)
func = mod["main"]
mod = relay.transform.InferType()(mod)
memory_plan = relay.backend._backend.GraphPlanMemory(func)
storage_ids = set()
device_types = set()
storage_sizes = {}
for k, v in memory_plan.expr_to_storage_info.items():
for x in v.storage_ids:
storage_ids.add(x)
storage_sizes[x] = v.storage_sizes
for x in v.device_types:
device_types.add(x)
# Current rule requires vars have unique storage id
# because we don't do inplace, we will need another
# two alternating temporary space.
assert len(storage_ids) == 4, f"found storage_ids: {storage_ids}"
assert len(device_types) == 1
assert len(storage_sizes) == 4
# Check the specific size of each sid
assert (
storage_sizes[0][0] == 40
and storage_sizes[1][0] == 4
and storage_sizes[2][0] == 4
and storage_sizes[3][0] == 40
)
def test_plan_2d_memory():
"""Verification if GraphPlanMemory manages 2d memory reffered as
global.texture* memory scopes in json file."""
global_virtual_device = tvm.target.VirtualDevice(memory_scope="global")
texture_virtual_device = tvm.target.VirtualDevice(memory_scope="global.texture")
metatable = {
"VirtualDevice": [
global_virtual_device,
texture_virtual_device,
]
}
mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 32, 40, 40), float32],
%data2: Tensor[(1, 32, 40, 40), float32]) {
%0 = fn (%a, Primitive=1) {
layout_transform(%a, src_layout="NCHW", dst_layout="NCHW4c")
};
%1 = %0(%data1);
%3 = %0(%data2);
%5 = fn (%a {virtual_device=meta[VirtualDevice][0]}, // global
%b {virtual_device=meta[VirtualDevice][0]}, // global
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%6 = %5(%1, %3);
%7 = fn (%a {virtual_device=meta[VirtualDevice][1]}, // texture
%b {virtual_device=meta[VirtualDevice][0]}, // global
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%8 = %7(%6, %3);
%9 = fn (%a {virtual_device=meta[VirtualDevice][1]}, // texture
%b {virtual_device=meta[VirtualDevice][1]}, // texture
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%10 = %9(%8, %6);
%11 = fn (%a,
virtual_device=meta[VirtualDevice][0], // global
Primitive=1) {
layout_transform(%a, src_layout="NCHW4c", dst_layout="NCHW")
};
%11(%10)
}
""",
"from_string",
None,
metatable,
)
GPU_DEVICE = tvm.device("cuda")
HOST_TARGET = tvm.target.Target("llvm")
GPU_TARGET = tvm.target.Target("cuda").with_host(HOST_TARGET)
GPU = tvm.target.VirtualDevice(GPU_DEVICE, GPU_TARGET) # device_type=2
CTXT = tvm.transform.PassContext(config={"relay.fallback_device_type": GPU.device_type_int})
config = tvm.target.make_compilation_config(CTXT, GPU_TARGET)
mod = relay.transform.InferType()(mod)
# PlanDevices should succeed.
mod = relay.transform.PlanDevices(config)(mod)
func = mod["main"]
memory_plan = relay.backend._backend.GraphPlanMemory(func)
virtual_devices = {}
# We do not have execution ordered information, the only order that we can stick
# in this place - storage_id
# for above graph we know that
# We have
# - 8 manageable storages for above graph
# - 5 of them are buffers
# - 3 of them are textures (2d storages)
# - 1 of buffer will be reused, since we have storage id maped data, we will have 4th
# storage id reuesed and hidden in virtual_devices map
# - no textures are reused so far
for k, v in memory_plan.expr_to_storage_info.items():
virtual_devices[v.storage_ids[0]] = v.virtual_devices[0].memory_scope
# Check the scopes according to abvoce expectaions
assert (
virtual_devices[0] == "global"
and virtual_devices[1] == "global"
and virtual_devices[2] == "global"
and virtual_devices[3] == "global"
and virtual_devices[4] == "global.texture"
and virtual_devices[5] == "global.texture"
and virtual_devices[6] == "global.texture"
)
def test_reshape_nop():
# test that reshape can be turned into nop
x = relay.var("x", shape=(10, 4))
xx = relay.abs(x)
y = relay.expand_dims(xx, axis=1)
t0 = relay.reshape(y, (1, 40))
t1 = relay.abs(y)
z0 = relay.reshape(t0, (2, 20))
z1 = relay.sqrt(t1)
z2 = relay.reshape(t1, (1, 40))
func = relay.Function([x], relay.Tuple([z0, z1, z2]))
x_data = np.random.rand(10, 4).astype("float32")
graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
graph_json_str = graph.get_graph_json()
graph_json = json.loads(graph_json_str)
# reshape must force sharing memory
storage_ids = graph_json["attrs"]["storage_id"][1]
assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
assert graph_json["nodes"][2]["attrs"]["func_name"] == "__nop"
assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"
gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))
gmod.set_input(x=x_data)
gmod.run()
z0_np = x_data.reshape(2, 20)
z1_np = np.sqrt(
np.abs(
x_data.reshape(
10,
1,
4,
)
)
)
z2_np = np.abs(x_data).reshape(1, 40)
tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
@tvm.testing.uses_gpu
def test_gru_like():
def unit(rnn_dim):
X = relay.var("X", shape=(1, rnn_dim))
W = relay.var("y", shape=(3 * rnn_dim, rnn_dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def unit_numpy(X, W):
prod = np.dot(X, W.transpose())
splits = np.split(prod, indices_or_sections=3, axis=1)
return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])
dtype = "float32"
rnn_dim = 1000
x = np.random.rand(1, rnn_dim).astype(dtype)
y = np.random.rand(3 * rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005
out_shape = (1, rnn_dim)
z = unit(rnn_dim)
for target, dev in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target)
m = graph_executor.create(graph, lib, dev)
m.set_input("X", tvm.nd.array(x.astype(dtype)))
m.set_input("y", tvm.nd.array(y.astype(dtype)))
m.set_input(**params)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()
ref = unit_numpy(x, y)
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
def test_compile_nested_tuples():
x = relay.var("x", shape=(10,))
x1 = x + relay.const(1.0)
x2 = x1 + relay.const(1.0)
x3 = x2 + relay.const(1.0)
x4 = x3 + relay.const(1.0)
out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])])
func = relay.Function([x], out)
graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
x_data = np.random.uniform(size=(10,)).astype(np.float32)
mod.set_input(x=x_data)
mod.run()
assert mod.get_num_outputs() == 4
ref = x_data + 1
for i in range(mod.get_num_outputs()):
out = mod.get_output(i).numpy()
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
ref = ref + 1
def test_compile_return_empty_tuple():
x = relay.var("x", shape=[16], dtype="float32")
mod = tvm.IRModule.from_expr(relay.Function([x], relay.Tuple([])))
graph, lib, _ = relay.build(mod, "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.run()
@tvm.testing.uses_gpu
def test_compile_fused_identity_cast():
# a fused function that would optimized to identity
x = relay.var("x", shape=[16], dtype="float32")
y = relay.cast(x, "float32")
func1 = relay.Function([x], y).with_attr("Primitive", 1)
# a fused function with param pass-through
x = relay.var("x", shape=[16], dtype="float32")
y = relay.add(x, relay.const(3.14, "float32"))
func2 = relay.Function([x], relay.Tuple([x, y])).with_attr("Primitive", 1)
x_global = relay.var("xx", shape=[16], dtype="float32")
tup = func2(x_global)
y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))
mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))
for target, device in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, _ = relay.build(mod, target=target)
executor = graph_executor.create(graph, lib, device=device)
executor.run()
def test_graph_executor_nested_tuples():
x, y, z, w = [relay.var(c, shape=(2, 3), dtype="float32") for c in "xyzw"]
out = relay.Tuple([x, relay.Tuple([y, relay.Tuple([z, w])])])
func = relay.Function([x, y, z, w], out)
f = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(func), device=tvm.cpu(0), target="llvm"
).evaluate()
data = [np.random.uniform(size=(2, 3)).astype("float32") for _ in "xyzw"]
out = f(*data)
assert len(out) == 2
tvm.testing.assert_allclose(out[0].numpy(), data[0])
assert len(out[1]) == 2
tvm.testing.assert_allclose(out[1][0].numpy(), data[1])
assert len(out[1][1]) == 2
tvm.testing.assert_allclose(out[1][1][0].numpy(), data[2])
tvm.testing.assert_allclose(out[1][1][1].numpy(), data[3])
def test_graph_executor_api():
dname_0, dname_1 = "data_0", "data_1"
data_0, data_1 = [relay.var(c, shape=(1, 1), dtype="float32") for c in [dname_0, dname_1]]
net = relay.add(data_0, data_1)
func = relay.Function((data_0, data_1), net)
lib = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0)))
assert mod.get_input_index(dname_1) == 1
assert mod.get_input_index(dname_0) == 0
assert mod.get_input_index("Invalid") == -1
shape_dict, dtype_dict = mod.get_input_info()
assert isinstance(shape_dict, tvm.container.Map)
assert isinstance(dtype_dict, tvm.container.Map)
for data in [data_0, data_1]:
name = data.name_hint
ty = data.type_annotation
# verify shape
assert name in shape_dict
assert isinstance(shape_dict[name], tvm.runtime.container.ShapeTuple)
assert shape_dict[name] == tvm.runtime.container.ShapeTuple([i.value for i in ty.shape])
# verify dtype
assert name in dtype_dict
assert isinstance(dtype_dict[name], tvm.runtime.container.String)
assert dtype_dict[name] == ty.dtype
@tvm.testing.requires_llvm
def test_benchmark():
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target="llvm", params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, tvm.cpu())
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == result.median
assert result.mean > 0
assert len(result.results) == 2
with patch.object(
tvm.runtime.module.Module,
"time_evaluator",
return_value=lambda: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),
) as method:
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == 2.5
assert result.median == 2.0
assert result.max == 5
assert result.min == 1
assert result.std == 1.5
@tvm.testing.parametrize_targets("cuda", "llvm")
def test_benchmark_end_to_end(dev, target):
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target=target, params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(result.results) == 2
@tvm.testing.requires_cuda
def test_benchmark_end_to_end_rpc():
server = rpc.Server("127.0.0.1")
remote = rpc.connect(server.host, server.port)
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target="cuda", params=params)
temp = utils.tempdir()
path = temp.relpath("library.so")
lib.export_library(path)
remote.upload(path)
rlib = remote.load_module("library.so")
dev = remote.device("cuda")
exe = graph_executor.create(lib.get_graph_json(), rlib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(result.results) == 2
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_backend_interpreter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import testing
from tvm import nd
from tvm import relay
from tvm.runtime import container
from tvm.relay.backend.interpreter import RefValue, ConstructorValue
from tvm.relay.scope_builder import ScopeBuilder
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
# TODO(tqchen) add more types once the schedule register is fixed.
for target in ["llvm"]:
dev = tvm.device(target, 0)
if not testing.device_enabled(target):
return
func = relay.create_executor(mod=mod, device=dev, target=target).evaluate(expr)
result = func if args is None else func(*args)
# use testing which also set atol
testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_tuple_value():
tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)])
np.testing.assert_allclose(tv[0].data.numpy(), 1)
np.testing.assert_allclose(tv[1].data.numpy(), 2)
np.testing.assert_allclose(tv[2].data.numpy(), 3)
def test_tuple_getitem():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
check_eval(func, [], 1)
def test_id():
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
check_eval(ident, [one], one)
def test_add_const():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
check_eval(func, [], 2)
def test_mul_param():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
check_eval(func, [x_data, y_data], x_data * y_data)
def test_equal():
i = relay.var("i", shape=[], dtype="int32")
j = relay.var("i", shape=[], dtype="int32")
z = relay.equal(i, j)
func = relay.Function([i, j], z, ret_type=relay.TensorType([], "bool"))
i_data = relay.const(0, "int32")
j_data = relay.const(0, "int32")
check_eval(func, [i_data, j_data], True)
def test_subtract():
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
check_eval(func, [i_data], 0)
def test_simple_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
accum_data = np.array(0, dtype="int32")
check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)
def test_ref():
mod = tvm.IRModule()
three_with_ref = relay.GlobalVar("three_with_ref")
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
mod[three_with_ref] = relay.Function([], body)
check_eval(three_with_ref, [], 3, mod=mod)
def test_binds():
x = relay.var("x")
y = relay.add(x, x)
xx = np.ones((10, 20))
res = relay.create_executor().evaluate(y, binds={x: xx}).numpy()
testing.assert_allclose(xx + xx, res)
def test_kwargs_params():
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.var("z", shape=(1, 10))
f = relay.Function([x, y, z], x + y + z)
x_data = np.random.rand(1, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
z_data = np.random.rand(1, 10).astype("float32")
params = {"y": y_data, "z": z_data}
res = relay.create_executor().evaluate(f)(x_data, **params)
testing.assert_allclose(res.numpy(), x_data + y_data + z_data)
def test_function_taking_adt_ref_tuple():
mod = tvm.IRModule()
prelude = relay.prelude.Prelude(mod)
_, cons, nil = prelude.mod.get_type("List")
nil_value = ConstructorValue(nil.tag, [], nil)
cons_value = ConstructorValue(
cons.tag,
[nd.array(np.random.rand(1, 10).astype("float32")), nil_value],
cons,
)
ref_value = RefValue(nd.array(np.random.rand(1, 10).astype("float32")))
tuple_value = container.tuple_object(
[nd.array(np.random.rand(1, 10).astype("float32")) for _ in range(10)]
)
id_func = relay.create_executor(mod=mod).evaluate(prelude.id)
res_nil = id_func(nil_value)
assert res_nil.tag == nil_value.tag
assert len(res_nil.fields) == 0
res_cons = id_func(cons_value)
assert res_cons.tag == cons_value.tag
assert len(res_cons.fields) == len(cons_value.fields)
testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy())
assert isinstance(res_cons.fields[1], ConstructorValue)
assert res_cons.fields[1].tag == nil.tag
assert len(res_cons.fields[1].fields) == 0
res_ref = id_func(ref_value)
testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())
res_tuple = id_func(tuple_value)
for i in range(10):
testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy())
def test_tuple_passing():
x = relay.var(
"x",
type_annotation=relay.ty.TupleType(
[relay.ty.TensorType((), "int64"), relay.ty.TensorType((), "int64")]
),
)
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = tvm.IRModule({})
gv = relay.GlobalVar("main")
mod[gv] = fn
mod = relay.transform.InferType()(mod)
dev = tvm.cpu()
target = tvm.target.Target("llvm")
f = relay.create_executor(mod=mod, device=dev, target=target).evaluate(gv)
# First use a Python tuple.
out = f((10, 8))
testing.assert_allclose(out.numpy(), np.array(10))
# Second use a tuple value.
value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])
out = f(value_tuple)
testing.assert_allclose(out.numpy(), np.array(11))
def test_dynamic():
n = 3
m = 2
x = relay.Var("x", relay.TensorType([relay.Any(), m], "float32"))
y = relay.Var("y", relay.TensorType([relay.Any(), m], "float32"))
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n, m)).astype("float32")
y_np = np.random.uniform(size=(n, m)).astype("float32")
expected = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_eval(None, [x_np, y_np], expected, mod)
def test_ref_global_from_expr():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float32"))
mod = tvm.IRModule()
mod["add"] = relay.Function([x, y], relay.add(x, y))
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
expr = relay.Call(mod.get_global_var("add"), [relay.const(x_np), relay.const(y_np)])
check_eval(expr, None, expected, mod)
def test_keyword_args():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float32"))
z = relay.add(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
actual = relay.create_executor(mod=mod).evaluate()(y=y_np, x=x_np)
testing.assert_allclose(actual.numpy(), expected)
# TODO(mbs): Support? Would help reduce wasted work when we need to prepare
# multiple functions w.r.t. the same module.
@pytest.mark.skip(reason="closures are currently not directly Python callable")
def test_functional_returns():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
f = relay.Function([x], x)
t = relay.Tuple([f, f])
c = np.random.rand(n).astype("float32")
result1, result2 = relay.create_executor().evaluate(t)
testing.assert_allclose(result1(c).numpy(), c)
testing.assert_allclose(result2(c).numpy(), c)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.target.target import Target
from tvm.relay.backend import Runtime, Executor, graph_executor_codegen
@pytest.mark.parametrize(
"test_target,unsupported_config",
[
["c", "-runtime=c"],
["c", "-system-lib=1"],
["c", "-executor=aot"],
["c", "-interface-api=c"],
["c", "-unpacked-api=1"],
["c", "-link-params=1"],
],
)
def test_deprecated_target_parameters(test_target, unsupported_config):
with pytest.raises(ValueError) as e_info:
Target(f"{test_target} {unsupported_config}")
assert f"Cannot recognize '{unsupported_config}" in str(e_info.execption)
def test_build_relay_graph_():
"""Test to build a simple relay graph by using APIs directly"""
def build_graph(mod, target):
target, target_host = tvm.target.Target.canon_target_and_host(target)
mod, _ = relay.optimize(mod, target)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
_, lowered_funcs, _ = grc.codegen(mod, mod["main"])
_ = relay.backend._backend.build(lowered_funcs, target)
def add(shape, dtype):
lhs = relay.var("A", shape=shape, dtype=dtype)
rhs = relay.var("B", shape=shape, dtype=dtype)
out = relay.add(lhs, rhs)
expr = relay.Function((lhs, rhs), out)
mod = tvm.IRModule.from_expr(expr)
return mod
build_graph(add((1, 8), "float32"), tvm.target.Target("llvm"))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_call_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
import pytest
import tvm
from tvm import relay
def test_callgraph_construct():
mod = tvm.IRModule({})
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
mod["g1"] = relay.Function([x, y], x + y)
call_graph = relay.analysis.CallGraph(mod)
assert "g1" in str(call_graph)
assert tvm.ir.structural_equal(mod, call_graph.module)
def test_print_element():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
mod["g0"] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
mod["g1"] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
assert "#refs = 0" in str(call_graph.print_var("g0"))
assert "#refs = 0" in str(call_graph.print_var("g1"))
def test_global_call_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.global_call_count(g0) == 0
assert call_graph.global_call_count(g1) == 1
assert call_graph.global_call_count("main") == 2
def test_ref_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 1
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_nested_ref():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 2
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_recursive_func():
mod = tvm.IRModule({})
x = relay.var("x", shape=[], dtype="int32")
fn0 = relay.Function([x], x)
gx = relay.GlobalVar("gx")
mod[gx] = fn0
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
global_call = gx(i)
rec_call = relay.Call(sum_up, [one_less]) + global_call
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Compiler", "a")
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.is_recursive(sum_up)
assert call_graph.ref_count(sum_up) == 2
assert call_graph.ref_count(gx) == 1
assert call_graph.ref_count("main") == 0
if __name__ == "__main__":
pytest.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_change_batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import synthetic
from tvm.relay import transform
def test_change_batch_synthetic():
net, params = synthetic.get_workload()
new_net = transform.ChangeBatch({net["main"].params[0]: 0}, batch_size=123)(net)
assert new_net["main"].checked_type.ret_type.shape[0] == 123
if __name__ == "__main__":
test_change_batch_synthetic()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_cmp_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
a = relay.Var("a")
b = relay.expr.const(1.0, dtype="float32")
c = a < b
d = relay.less(a, b)
assert c.astext() == d.astext()
c = a > b
d = relay.greater(a, b)
assert c.astext() == d.astext()
c = a >= b
d = relay.greater_equal(a, b)
assert c.astext() == d.astext()
c = a <= b
d = relay.less_equal(a, b)
assert c.astext() == d.astext()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_const.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
from tvm.relay.frontend.common import infer_type
from tvm.relay import op as _op
def test_const_dtype():
strides = (1, 1)
np_array = np.array(strides).astype("int32")
strides = _op.const(np_array, dtype="int64")
# strides needs to be autoconverted to int64 on Windows
assert infer_type(strides).checked_type.dtype == np.dtype(np.int64)
a = tvm.nd.array(np.random.randint(0, high=255, size=(2, 3), dtype="uint8"))
a = _op.const(a, dtype="uint8")
aa = a.data.numpy()
assert aa.dtype == np.dtype(np.uint8)
b = _op.const(1, dtype="int8")
bb = b.data.numpy()
assert bb.dtype == np.dtype(np.int8)
kshape = (3, 10, 3, 3)
w = relay.const(np.zeros(kshape, dtype="float32"))
assert w.data.numpy().dtype == np.dtype(np.float32)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_cpp_build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import relay, runtime
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def test_basic_build():
tgt = "llvm"
dev = tvm.cpu()
# func
a = relay.var("a", dtype="float32", shape=(16, 8))
b = relay.var("b", dtype="float32", shape=(8, 8))
c = relay.var("c", dtype="float32", shape=(16, 8))
x = relay.nn.dense(a, b)
y = relay.nn.relu(x)
z = y + c
func = relay.Function([a, b, c], z)
A = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
B = tvm.nd.array(np.random.uniform(-1, 1, (8, 8)).astype("float32"), device=dev)
C = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
params = {"b": B, "c": C}
# build
targets = {tvm.tir.IntImm("int32", dev.device_type): tgt}
mod = tvm.IRModule.from_expr(func)
func_in_mod = mod["main"]
assert mod["main"] == func_in_mod, "cannot compare function to itself"
lib = relay.build(mod, targets, "llvm", params=params)
assert mod["main"] == func_in_mod, "relay.build changed module in-place"
# test
rt = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
rt.set_input("a", A)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(
out.numpy(),
np.maximum(np.dot(A.numpy(), B.numpy().T), 0) + C.numpy(),
atol=1e-5,
rtol=1e-5,
)
@tvm.testing.requires_cuda
def test_fp16_build():
dtype = "float16"
dev = tvm.cuda(0)
if dtype == "float16" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
x = relay.var("x", dtype=dtype, shape=(4, 4))
y = relay.var("y", dtype=dtype, shape=(4, 4))
z = x + y
func = relay.Function([x, y], z)
X = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
Y = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
params = {
"x": X,
"y": Y,
}
# build
g_json, mmod, params = relay.build(func, "cuda", params=params)
# test
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt.load_params(runtime.save_param_dict(params))
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy() + Y.numpy(), atol=1e-5, rtol=1e-5)
@tvm.testing.requires_llvm
def test_bf16_build():
data = relay.var("data", shape=(1, 3, 224, 224), dtype="float32")
weight = relay.var("weight", shape=(64, 3, 7, 7), dtype="float32")
bn_gamma = relay.var("gamma", shape=(64,), dtype="float32")
bn_beta = relay.var("beta", shape=(64,), dtype="float32")
bn_mean = relay.var("mean", shape=(64,), dtype="float32")
bn_var = relay.var("var", shape=(64,), dtype="float32")
params = {
"weight": np.random.uniform(-1, 1, size=(64, 3, 7, 7)).astype("float32"),
"gamma": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"beta": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"mean": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"var": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
}
conv_bf16 = relay.nn.conv2d(
relay.cast(data, "bfloat16"),
relay.cast(weight, "bfloat16"),
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
out_dtype="bfloat16",
)
bn_bf16 = relay.nn.batch_norm(
conv_bf16,
relay.cast(bn_gamma, "bfloat16"),
relay.cast(bn_beta, "bfloat16"),
relay.cast(bn_mean, "bfloat16"),
relay.cast(bn_var, "bfloat16"),
)
relu_bf16 = relay.nn.relu(bn_bf16[0])
maxpool_bf16 = relay.nn.max_pool2d(relu_bf16, pool_size=(2, 2), strides=(2, 2))
avgpool_bf16 = relay.nn.avg_pool2d(maxpool_bf16, pool_size=(2, 2), strides=(2, 2))
flattened_bf16 = relay.nn.batch_flatten(avgpool_bf16)
softmax_bf16 = relay.nn.softmax(flattened_bf16)
mod_bf16 = tvm.IRModule.from_expr(softmax_bf16)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod_bf16, target="llvm", params=params)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fp16_conversion(target, dev):
if target == "cuda" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
n = 10
for (src, dst) in [("float32", "float16"), ("float16", "float32")]:
x = relay.var("x", relay.TensorType((n,), src))
y = x.astype(dst)
func = relay.Function([x], y)
# init input
X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)
# build
with tvm.transform.PassContext(opt_level=1):
g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), target)
# test
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt.set_input("x", X)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy().astype(dst), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_basic_build()
test_fp16_build()
test_fp16_conversion()
test_bf16_build()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_dataflow_pattern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import
import numpy as np
import tvm
from tvm import relay
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.dataflow_pattern import *
from tvm.relay.testing import run_opt_pass
# NB: 1 corresponds to the C++ enum that specicfies this
# we loose the type safety due to the Python/C++ calling
# convention.
K_ELEMWISE = 0
K_BROADCAST = 1
## NODE TESTS
def test_expr_pattern():
ep = is_expr(relay.var("x", shape=(4, 1)))
assert isinstance(ep, ExprPattern)
assert isinstance(ep.expr, relay.Var)
def test_var_pattern():
v = is_var("x")
assert isinstance(v, VarPattern)
assert v.name == "x"
def test_constant_pattern():
c = is_constant()
assert isinstance(c, ConstantPattern)
def test_wildcard_pattern():
wc = wildcard()
assert isinstance(wc, WildcardPattern)
def test_CallPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
assert isinstance(c, CallPattern)
assert isinstance(c.args[0], WildcardPattern)
assert isinstance(c.args[1], WildcardPattern)
def test_FunctionPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
f = FunctionPattern([wc1, wc2], c)
assert isinstance(f, FunctionPattern)
assert isinstance(f.params[0], WildcardPattern)
assert isinstance(f.params[1], WildcardPattern)
assert isinstance(f.body, CallPattern)
assert isinstance(f.body.args[0], WildcardPattern)
assert isinstance(f.body.args[1], WildcardPattern)
def test_TuplePattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
assert isinstance(t, TuplePattern)
assert isinstance(t.fields[0], WildcardPattern)
assert isinstance(t.fields[1], WildcardPattern)
def test_TupleGetItemPattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
tgi = is_tuple_get_item(t, 1)
assert isinstance(tgi, TupleGetItemPattern)
assert isinstance(tgi.tuple, TuplePattern)
assert isinstance(tgi.tuple.fields[0], WildcardPattern)
assert isinstance(tgi.tuple.fields[1], WildcardPattern)
def test_AltPattern():
is_add_or_sub = is_op("add") | is_op("subtract")
assert isinstance(is_add_or_sub, AltPattern)
def test_TypePattern():
ttype = relay.TensorType((10, 10), "float32")
ty_pat = has_type(ttype)
assert isinstance(ty_pat, TypePattern)
assert ty_pat.type == ttype
def test_DataTypePattern():
dtype = "float16"
pattern = has_dtype(dtype)
assert isinstance(pattern, DataTypePattern)
assert pattern.dtype == dtype
def test_ShapePattern():
shape = [10, 10]
pattern = has_shape(shape)
assert isinstance(pattern, ShapePattern)
assert tvm.ir.structural_equal(pattern.shape, shape)
def test_AttrPattern():
op = is_op("add").has_attr({"TOpPattern": K_ELEMWISE})
assert isinstance(op, AttrPattern)
assert op.attrs["TOpPattern"] == K_ELEMWISE
def test_IfPattern():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
assert isinstance(pat, IfPattern)
assert isinstance(pat.cond, CallPattern)
assert isinstance(pat.true_branch, VarPattern)
assert isinstance(pat.false_branch, VarPattern)
def test_LetPattern():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
assert isinstance(pat, LetPattern)
assert isinstance(pat.var, VarPattern)
assert isinstance(pat.value, CallPattern)
assert isinstance(pat.body, VarPattern)
## MATCHER TESTS
def test_match_op():
assert is_op("add").match(relay.op.op.get("add"))
def test_no_match_op():
assert not is_op("add").match(relay.op.op.get("subtract"))
def test_match_op_or():
is_add_or_sub = is_op("add") | is_op("subtract")
assert is_add_or_sub.match(relay.op.op.get("add"))
assert is_add_or_sub.match(relay.op.op.get("subtract"))
def test_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(is_var("x"), is_var("y"))
assert add_pattern.match(x + y)
assert add_pattern.match(y + x)
mul_pattern = is_op("multiply")(is_var("x"), is_var("y"))
assert mul_pattern.match(x * y)
assert mul_pattern.match(y * x)
def test_no_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("subtract")(is_var("x"), is_var("y"))
assert add_pattern.match(x - y)
assert not add_pattern.match(y - x)
add_pattern = is_op("divide")(is_var("x"), is_var("y"))
assert add_pattern.match(x / y)
assert not add_pattern.match(y / x)
def test_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert add_pattern.match(x + y)
# Match call with any number of inputs
call_pattern = wildcard()(None)
assert call_pattern.match(relay.op.nn.relu(x))
assert call_pattern.match(relay.op.add(x, y))
def test_no_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert not add_pattern.match(x - y)
def test_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert func_pattern.match(relay.Function([x, y], x + y))
# Match Function with any number of inputs
func_pattern = FunctionPattern(None, wildcard())
assert func_pattern.match(relay.Function([x], x))
assert func_pattern.match(relay.Function([x, y], x + y))
def test_no_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert not func_pattern.match(relay.Function([x, y], x - y))
def test_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
cond = x < y
assert pat.match(relay.expr.If(cond, x, y))
def test_no_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
assert not pat.match(relay.expr.If(x > y, x, y))
assert not pat.match(relay.expr.If(x < y, y, x))
def test_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
cond = x < y
assert pat.match(relay.expr.Let(lv, cond, lv))
def test_no_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
assert not pat.match(relay.expr.Let(lv, x > y, lv))
assert not pat.match(relay.expr.Let(lv, x < y, lv * x))
def test_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
assert pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert pattern.match(relu)
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.optional(is_op("nn.relu")).optional(is_op("tanh"))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
tanh = relay.op.tanh(conv2d)
tanh2 = relay.op.tanh(relu)
relu2 = relay.op.nn.relu(tanh)
assert pattern.match(conv2d)
assert pattern.match(relu)
assert pattern.match(tanh)
assert pattern.match(tanh2)
assert not pattern.match(relu2)
def test_no_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = conv2d + w
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
def test_match_const():
conv2d = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = is_op("nn.bias_add")(conv2d, wildcard())
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.var("w", shape=(3, 3, 3, 3))
b = relay.var("b", shape=(3,))
conv2d = relay.op.nn.conv2d(x, w)
out = relay.op.nn.bias_add(conv2d, b)
func = relay.Function([x, w, b], out)
mod = tvm.IRModule.from_expr(func)
assert not pattern.match(mod["main"].body)
mod["main"] = bind_params_by_name(mod["main"], {"w": tvm.nd.array(np.ones(shape=(3, 3, 3, 3)))})
assert pattern.match(mod["main"].body)
def test_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
assert tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern) # Match any index
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 0))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
# Match tuple with any inputs
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x,)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y, z)), axis=0))
def test_no_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add"), wildcard()))
assert not tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert not tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
def test_match_type():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert ty_pat.match(x)
def test_no_match_type():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert not ty_pat.match(x)
def test_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_dtype("float32")
assert ty_pat.match(x)
def test_no_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_dtype("float32")
assert not ty_pat.match(x)
def test_match_shape():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_shape((10, 10))
assert ty_pat.match(x)
def test_no_match_shape():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_shape((10, 5))
assert not ty_pat.match(x)
def test_match_op_attr():
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert op_pat.match(x + y)
def test_no_match_op_attr():
op = is_op("nn.dense").has_attr({"TOpPattern": K_ELEMWISE})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(relay.op.nn.dense(x, y))
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(x - y)
z = relay.var("z")
assert not op_pat.match(relay.Let(z, x + y, z))
def test_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
assert pattern.match(f)
def test_no_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("RandomTest", "add")
assert not pattern.match(f)
f = relay.Function([x, y], x + y).with_attr("Composite", "conv_bias")
assert not pattern.match(f)
def test_match_call_attr():
# String attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NCHW"})
x = relay.var("x")
y = relay.var("y")
assert is_conv2d.match(relay.op.nn.conv2d(x, y))
# Array attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[3, 3])
assert is_conv2d.match(out)
# non-operator call
attr_dict = {"call_attr": "attr"}
call_has_attr = wildcard()(wildcard()).has_attr(attr_dict)
call_attr = tvm.ir.make_node("DictAttrs", **attr_dict)
a = relay.Var("a")
b = relay.Var("b")
assert call_has_attr.match(relay.Call(a, [b], attrs=call_attr))
# empty attrs should match anything
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
call_has_empty_attrs = wildcard()(wildcard()).has_attr({})
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=empty_attrs))
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=call_attr))
def test_no_match_call_attr():
x = relay.var("x")
y = relay.var("y")
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NHWC"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"RandomAttr": "NCHW"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
# Array attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[2, 1])
assert not is_conv2d.match(out)
# non-operator calls
call_has_attr = wildcard()(wildcard()).has_attr({"call_attr": "attr"})
wrong_key = tvm.ir.make_node("DictAttrs", **{"wrong": "attr"})
wrong_value = tvm.ir.make_node("DictAttrs", **{"call_attr": "wrong"})
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
a = relay.Var("a")
b = relay.Var("b")
# attrs left undefined
assert not call_has_attr.match(relay.Call(a, [b]))
# wrong attrs
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_key))
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_value))
assert not call_has_attr.match(relay.Call(a, [b], attrs=empty_attrs))
def test_match_call_attr_dtype():
is_cast = is_op("cast")(wildcard()).has_attr({"dtype": "float32"})
x = relay.var("x")
assert is_cast.match(relay.op.cast(x, "float32"))
def test_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
# Check
assert not diamond.match(leaky_relu)
assert not diamond.match(relu)
def test_match_fake_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
def test_at_most_one_parent():
# Pattern
P = is_op("nn.conv2d")(wildcard(), wildcard()) # 'parent'
I = is_op("nn.relu")(wildcard()) # 'intermediate' ('path' in the code)
C = is_op("add")(wildcard(), wildcard()) # 'child'
pattern = dominates(P, I, C)
# n6(P)
# / \
# n7 \
# / \
# n8(P) n10(I)
# \ /
# n9(I) /
# \ /
# n11(C)
x = relay.var("x")
w = relay.var("w")
n6 = relay.op.nn.conv2d(x, w) # matches P
n7 = relay.op.tanh(n6) # does not match I
n8 = relay.op.nn.conv2d(n7, w) # matches P
n9 = relay.op.nn.relu(n8) # matches I
n10 = relay.op.nn.relu(n6) # matches I
n11 = relay.add(n9, n10) # matches C
# Does not match: Can't match the parent pattern P at both 8 and 6.
# Note that if we did allow P to be used twice the implementation would
# need to be changed to not 'jump over' n7.
assert not pattern.match(n11)
def test_match_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Deeper Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Single Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert diamond.match(out)
# Fuzzy path/nested Diamond
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
assert diamond.match(out)
def test_not_match_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Fake Diamond
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Add op that doesn't match K_ELEMWISE
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Relu on the input instead of the conv
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(inp)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# No conv
inp = relay.var("input")
relu = relay.op.nn.relu(inp)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert not diamond.match(out)
def test_match_typed_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_typed_dominator():
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 1, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float16"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
def test_rewrite():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
out = rewrite(TestRewrite(), x + y)
assert sub_pattern.match(out)
def test_rewrite_func():
x = relay.var("x")
w = relay.var("w")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
inpf = relay.var("input")
weightf = relay.var("weight")
func = relay.Function(
[inpf, weightf], relay.op.nn.relu(relay.op.nn.conv2d(inpf, weightf)), attrs=None
)
out = rewrite(TestRewrite(), func(x, w) + y)
assert sub_pattern.match(out)
def test_rewrite_func_with_attr():
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
a = relay.var("a")
b = relay.var("b")
c = relay.Call(f, [a, b])
c_abs = relay.abs(c)
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = wildcard().has_attr({"Composite": "add"})(wildcard(), wildcard())
def callback(self, pre, post, node_map):
return post.args[0] + post.args[1]
out = rewrite(TestRewrite(), c_abs)
inlined_add_pattern = is_op("abs")(is_op("add")(wildcard(), wildcard()))
assert inlined_add_pattern.match(out)
def test_nested_rewrite():
class PatternCallback(DFPatternCallback):
def __init__(self, pattern):
super(PatternCallback, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return post
def gen():
x = relay.var("x")
y = relay.var("y")
y_add = relay.add(y, y)
n0 = relay.add(x, y_add)
n1 = relay.add(x, n0)
return relay.add(n1, n0)
def pattern():
a = wildcard()
b = wildcard()
n0 = is_op("add")(a, b)
n1 = is_op("add")(n0, a)
return is_op("add")(n0, n1)
out = gen()
pat = pattern()
new_out = rewrite(PatternCallback(pat), out)
assert tvm.ir.structural_equal(out, new_out)
def test_not_fuse_multi_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
out = out + conv2d
# Check
assert not diamond.match(out)
class BatchnormCallback(DFPatternCallback):
def __init__(self):
super(BatchnormCallback, self).__init__()
self.x = wildcard()
self.var = wildcard()
self.mean = wildcard()
self.beta = wildcard()
self.gamma = wildcard()
self.eps = is_constant()
self.pattern = (
self.gamma * (self.x - self.mean) / is_op("sqrt")(self.var + self.eps) + self.beta
)
def callback(self, pre, post, node_map):
x = node_map[self.x][0]
var = node_map[self.var][0]
mean = node_map[self.mean][0]
beta = node_map[self.beta][0]
gamma = node_map[self.gamma][0]
eps = node_map[self.eps][0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.numpy().item())[0]
def test_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_no_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
fake_BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
out = rewrite(BatchnormCallback(), fake_BN)
assert tvm.ir.structural_equal(out, fake_BN)
def test_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
bn2 = relay.op.nn.batch_norm(bn, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_partial_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn2 = relay.op.nn.batch_norm(BN, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_fuse_batchnorm_commutation():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
# commute add
BN = beta + gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5))
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate divide/multiply
BN = (gamma * (x - mean)) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate multiply/divide
BN = gamma * ((x - mean) / relay.op.sqrt(var + relay.const(1e-5))) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_quadruple_rewrite_dominator():
class DominatorRemovalCallback(DFPatternCallback):
def __init__(self):
super(DominatorRemovalCallback, self).__init__()
self.inp = wildcard()
self.weight = wildcard()
is_conv2d = is_op("nn.conv2d")(self.inp, self.weight)
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(
wildcard()
) | is_op("add")(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
self.pattern = dominates(is_conv2d, is_unary_elemwise, reduction)
def callback(self, pre, post, node_map):
inp = node_map[self.inp][0]
weight = node_map[self.weight][0]
return relay.op.nn.conv2d(inp, weight)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Deeper Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Single Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Fuzzy path/nested Diamond
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
one = relay.op.nn.conv2d(inp, weight)
two = relay.op.nn.conv2d(one, weight)
three = relay.op.nn.conv2d(two, weight)
four = relay.op.nn.conv2d(three, weight)
assert tvm.ir.structural_equal(DominatorRemovalCallback().rewrite(out), four)
def algebraic_simplify(expr):
zero = is_expr(relay.const(0)) | is_expr(relay.const(0.0))
one = is_expr(relay.const(1)) | is_expr(relay.const(1.0))
class ElwiseNullCallback(DFPatternCallback):
def callback(self, pre, post, node_map):
return node_map[self.x][0] # pylint: disable=no-member
class AddCallback(ElwiseNullCallback):
def __init__(self):
super(AddCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x + zero
class SubCallback(ElwiseNullCallback):
def __init__(self):
super(SubCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x - zero
class MulCallback(ElwiseNullCallback):
def __init__(self):
super(MulCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x * one
class DivCallback(ElwiseNullCallback):
def __init__(self):
super(DivCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x / one
class MulZeroCallback(ElwiseNullCallback):
def __init__(self):
super(MulZeroCallback, self).__init__()
self.x = zero
self.pattern = self.x * wildcard()
class ZeroDivCallback(ElwiseNullCallback):
def __init__(self):
super(ZeroDivCallback, self).__init__()
self.x = zero
self.pattern = self.x / wildcard()
return rewrite(
[
AddCallback(),
SubCallback(),
MulCallback(),
DivCallback(),
MulZeroCallback(),
ZeroDivCallback(),
],
expr,
)
def test_algebraic_simplify():
x = relay.Var("x")
y = relay.Var("y")
one = relay.const(1)
zero = relay.const(0)
onef = relay.const(1.0)
zerof = relay.const(0.0)
assert algebraic_simplify(x + zero) == x
assert algebraic_simplify(x + zerof) == x
assert algebraic_simplify(zero + x) == x
assert algebraic_simplify(zerof + x) == x
assert algebraic_simplify(x - zero) == x
assert algebraic_simplify(x - zerof) == x
assert algebraic_simplify(x * one) == x
assert algebraic_simplify(x * onef) == x
assert algebraic_simplify(one * x) == x
assert algebraic_simplify(onef * x) == x
assert algebraic_simplify(x * zero) == zero
assert algebraic_simplify(x * zerof) == zerof
assert algebraic_simplify(x / one) == x
assert algebraic_simplify(x / onef) == x
assert algebraic_simplify(zero / x) == zero
assert algebraic_simplify(zerof / x) == zerof
assert tvm.ir.structural_equal(
algebraic_simplify((x + zero * y) / one + (y * one) - zero / x), x + y
)
def test_double_partition():
# Pattern 1
conv2d_p = is_op("nn.conv2d")(wildcard(), wildcard())
bias_add_p = is_op("nn.bias_add")(conv2d_p, wildcard())
relu_p = is_op("nn.relu")(bias_add_p)
# Graph
x = relay.var("input")
w = relay.var("weight")
b = relay.var("bias")
w2 = relay.var("weight")
b2 = relay.var("bias")
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
conv2d2 = relay.op.nn.conv2d(relu, w2)
bias_add2 = relay.op.nn.bias_add(conv2d2, b2)
partitioned = bias_add2
for pat, label in [(relu_p, "conv_bias_relu"), (bias_add_p, "conv_bias")]:
partitioned = pat.partition(partitioned, {"Composite": label})
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func0 = (
relay.Function(
[inpf, weightf, biasf],
relay.op.nn.relu(relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)),
)
.with_attr("Composite", "conv_bias_relu")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
)
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func1 = (
relay.Function(
[inpf, weightf, biasf], relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)
)
.with_attr("Composite", "conv_bias")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
)
expected = func1(func0(x, w, b), w2, b2)
assert tvm.ir.structural_equal(partitioned, expected)
def test_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
def generate_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
out = generate_diamond(inp * inp, weight * weight)
# Check
partitioned = diamond.partition(out)
i = relay.Var("input")
w = relay.Var("weight")
f = relay.Function([i, w], generate_diamond(i, w)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_"
)
assert tvm.ir.structural_equal(partitioned, f(inp * inp, weight * weight))
def test_quadruple_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
def classic_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Deeper Branch
def deeper_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Single Branch
def single_branch(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
return relu + tanh
# Fuzzy path/nested Diamond
def nested_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return tanh + leaky_relu
partitioned = diamond.partition(
nested_diamond(
single_branch(deeper_diamond(classic_diamond(inp, weight), weight), weight), weight
)
)
functions = []
partition_names = [
"nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_add_",
"nn.conv2d_nn.relu_add_tanh_nn.leaky_relu_add_",
]
for i, f in enumerate([classic_diamond, deeper_diamond, single_branch, nested_diamond]):
inpf = relay.var("input")
weightf = relay.var("weight")
functions.append(
relay.Function([inpf, weightf], f(inpf, weightf)).with_attr(
"PartitionedFromPattern", partition_names[i]
)
)
reference = functions[3](
functions[2](functions[1](functions[0](inp, weight), weight), weight), weight
)
assert tvm.ir.structural_equal(partitioned, reference)
def get_BN(x, var, mean, beta, gamma, eps):
return gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
def test_partition_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = get_BN(x, var, mean, beta, gamma, eps)
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
# Put the arguments in toplogological order for the reference
f = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN)
reference = f(gamma, x, mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_partition_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + eps) + beta
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
f1 = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
# The partitioner doesn't replace duplicates, so we use two copies of the function
xf2 = relay.var("xf2")
varf2 = relay.var("varf2")
meanf2 = relay.var("meanf2")
betaf2 = relay.var("betaf2")
gammaf2 = relay.var("gammaf2")
f2 = relay.Function(
[gammaf2, xf2, meanf2, varf2, betaf2], get_BN(xf2, varf2, meanf2, betaf2, gammaf2, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN2)
reference = f2(gamma, f1(gamma, x, mean, var, beta), mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_overlappting_partitions():
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
assert tuple_get_item_node.partition(add) == add
def test_partition_overused():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
out = relu + conv2d
assert pattern.partition(out) == out
def test_partition_fuzzy_tuple():
x = relay.var("x")
y = relay.var("y")
z = x + y
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(args, body):
return relay.Function(args, body).with_attr("PartitionedFromPattern", "Tuple_concatenate_")
def concat(*args):
return relay.op.concatenate(relay.expr.Tuple(args), axis=0)
one = concat_pattern.partition(concat(x))
assert tvm.ir.structural_equal(one, create_func([xp], concat(xp))(x))
two = concat_pattern.partition(concat(x, y))
assert tvm.ir.structural_equal(two, create_func([xp, yp], concat(xp, yp))(x, y))
three = concat_pattern.partition(concat(x, y, z))
assert tvm.ir.structural_equal(three, create_func([xp, yp, zp], concat(xp, yp, zp))(x, y, z))
def test_partition_fuzzy_function_args():
func_pattern = FunctionPattern(None, wildcard() + wildcard())(None) + wildcard()
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
b = relay.var("b")
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(call):
N = len(call.op.params)
new_params = [relay.var(str(i)) for i in range(N + 1)]
label = "add_FunctionCall_add_"
if N == 3:
label = "add_" + label
return relay.Function(
new_params, relay.Call(call.op, (new_params[0:-1])) + new_params[-1]
).with_attr("PartitionedFromPattern", label)(*([x, y, z][0:N] + [b]))
f1 = relay.Function([xp], xp + xp)(x)
one = func_pattern.partition(f1 + b)
assert tvm.ir.structural_equal(one, create_func(f1))
f2 = relay.Function([xp, yp], xp + yp)(x, y)
two = func_pattern.partition(f2 + b)
assert tvm.ir.structural_equal(two, create_func(f2))
f3 = relay.Function([xp, yp, zp], xp + yp + zp)(x, y, z)
three = func_pattern.partition(f3 + b)
assert tvm.ir.structural_equal(three, create_func(f3))
def test_partition_check():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(is_var("input"), wildcard()))
def check(pre):
return pre.args[0].attrs.data_layout == "NCHW"
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
xf = relay.var("input")
wf = relay.var("weight")
conv2df = relay.op.nn.conv2d(xf, wf)
reluf = relay.op.nn.relu(conv2df)
func = relay.Function([xf, wf], reluf).with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
reference = func(x, w)
partitioned = pattern.partition(relu, check=check)
assert tvm.ir.structural_equal(partitioned, reference)
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
assert relu == pattern.partition(relu, check=check)
def test_partition_check_types():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
def check(pre):
conv = pre.args[0]
return (conv.attrs.data_layout == "NCHW") and bool(conv.checked_type.shape[0] == 1)
x = relay.var("input", shape=(1, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
partitioned = pattern.partition(relu, check=check)
assert partitioned.op.attrs["PartitionedFromPattern"] == "nn.conv2d_nn.relu_"
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
x = relay.var("input", shape=(2, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
def conv_bias_relu(x, w, b):
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
return relu
def test_partition_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = conv2d.optional(lambda x: is_op("nn.bias_add")(x, wildcard()))
pattern1 = is_op("nn.relu")(bias)
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
pattern2 = bias.optional(lambda x: is_op("nn.relu")(x))
relu = conv_bias_relu(x, w, b)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
assert pattern1.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern1.partition(relu))
assert pattern2.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern2.partition(relu))
def test_partition_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], is_op("nn.conv2d")(wc_x1, wc_w1))
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_partition_optional_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern0 = FunctionPattern(
[wc_x1, wc_w1], is_op("sigmoid")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
func_pattern1 = FunctionPattern(
[wc_x1, wc_w1], is_op("nn.relu")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
pattern = func_pattern0(wc_x, wc_w) | func_pattern1(wc_x, wc_w)
func = relay.Function([x1, w1], relay.nn.relu(relay.nn.conv2d(x1, w1)))
expr = func(x, w) + b
x2 = relay.var("x2")
w2 = relay.var("w2")
func2 = relay.Function([x2, w2], func(x2, w2)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_FunctionCall_"
)
expr2 = func2(x, w) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_rewrite_function_with_fuzzy_body():
"""Allow Rewriting a function with a fuzzy body via dominator analysis"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return x + w
out = rewrite(TestRewrite(), expr)
assert tvm.ir.structural_equal(out, x + w + b)
def test_partition_function_with_fuzzy_body():
"""
Allow Rewriting a function with a fuzzy body via dominator analysis
"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_match_match():
add_pattern = is_op("add")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
mod = tvm.IRModule({})
tvm.relay.prelude.Prelude(mod)
# Apply rewrite on IR including relay.Match
out = rewrite(TestRewrite(), mod["tensor_concatenate_int64"])
assert tvm.ir.structural_equal(mod["tensor_concatenate_int64"], out)
def test_partition_constant_embedding():
x = relay.var("x")
w = relay.var("w")
wc = relay.const(1)
b = relay.var("b")
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
embeded_func = relay.Function([xf, bf], conv_bias_relu(xf, wc, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
lifted_func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
relu = conv_bias_relu(x, w, b)
reluc = conv_bias_relu(x, wc, b)
# Check lifting of wildcard matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(lifted_func(x, wc, b), pattern.partition(reluc))
# Check lifting of input matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(reluc, pattern.partition(reluc)) # Constants are not Inputs
# Check embedding of constant matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant()), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check embedding of constant ExprPatterns
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_expr(wc)), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var() | is_constant()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches with the other ordering
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant() | is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
def test_rewrite_once():
# This class recursively removes the arguments to concat until there is nothing left to concatenate.
class ConcatRewriter(DFPatternCallback):
def __init__(self, rewrite_once):
super().__init__(rewrite_once=rewrite_once)
self.pattern = is_op("concatenate")(None)
def callback(self, pre, post, node_map):
concat_args = post.args[0]
# Remove the last argument
new_args = [concat_args[i] for i in range(len(concat_args) - 1)]
if new_args:
return relay.op.concatenate(relay.expr.Tuple(new_args), axis=0)
else:
return concat_args
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
concat = relay.op.concatenate(relay.expr.Tuple([x, y, z]), axis=0)
# Let the rewriter run recursively
out = rewrite(ConcatRewriter(False), concat)
expected = relay.expr.Tuple([x])
assert tvm.ir.structural_equal(out, expected)
# Run the rewriter once
out = rewrite(ConcatRewriter(True), concat)
expected = relay.op.concatenate(relay.expr.Tuple([x, y]), axis=0)
assert tvm.ir.structural_equal(out, expected)
def test_matched_outside_but_dominated():
"""In this example the pattern matches the nn.conv2d/add/multiply flow. Even though the
add output is consumed by the sigmoid, the sigmoid itself is dominated by the multiply.
So partitioning can proceed, all be it with a duplication of the add."""
in_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%0 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%1 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = nn.conv2d(%0, %1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%5 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%6 = add(%4, %5);
%7 = sigmoid(%6);
%8 = multiply(%6, %7);
layout_transform(%8, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
expected_mod = tvm.parser.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%5 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%6 = nn.conv2d(%4, %5, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%7 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%8 = add(%6, %7);
%9 = sigmoid(%8);
%10 = fn (%FunctionVar_0_0, %FunctionVar_0_1, %FunctionVar_0_2, %FunctionVar_0_3, PartitionedFromPattern="nn.conv2d_add_multiply_") {
%0 = nn.conv2d(%FunctionVar_0_0, %FunctionVar_0_1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%1 = add(%0, %FunctionVar_0_2);
multiply(%1, %FunctionVar_0_3)
};
%11 = %10(%4, %5, %7, %9);
layout_transform(%11, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
pattern = is_op("multiply")(
is_op("add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard()), wildcard()
)
actual_mod = tvm.IRModule.from_expr(pattern.partition(in_mod["main"]))
actual_mod = relay.transform.InferType()(actual_mod)
tvm.ir.assert_structural_equal(actual_mod, expected_mod)
def test_partition_parallel_branch_with_same_input():
"""In this example, conv2d's two consumer(add and multiply) on two different branches are
merged into one partition, make sure that the partitioned function has no redundant parameters"""
# Pattern
path1 = is_op("multiply")(wildcard(), wildcard())
path2 = is_op("add")(wildcard(), wildcard())
pattern = is_op("add")(path1, path2)
i = relay.Var("input")
w = relay.Var("weight")
l = relay.Var("left")
r = relay.Var("right")
conv2d = relay.op.nn.conv2d(i, w)
branch1 = relay.multiply(l, conv2d)
branch2 = relay.add(conv2d, r)
add = relay.add(branch1, branch2)
lf = relay.Var("leftf")
mf = relay.Var("midf")
rf = relay.Var("rightf")
f = relay.Function([lf, mf, rf], (lf * mf) + (mf + rf)).with_attr(
"PartitionedFromPattern", "multiply_add_add_"
)
partitioned = pattern.partition(add)
reference = f(l, conv2d, r)
assert tvm.ir.structural_equal(partitioned, reference)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_debug.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay import var, const, create_executor
from tvm.relay.op import debug
_test_debug_hit = False
def test_debug():
global _test_debug_hit
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(1, "int32")})
assert _test_debug_hit
assert result.numpy() == 1
def test_debug_with_expr():
global _test_debug_hit
_test_debug_hit = False
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x + x * x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(2, "int32")})
assert _test_debug_hit
assert result.numpy() == 6
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm import TVMError
from tvm.relay.backend import Executor
def test_create_executor():
executor = Executor("aot")
assert executor.name == "aot"
def test_create_executor_with_options():
executor = Executor("aot", {"interface-api": "c"})
assert executor.name == "aot"
assert executor["interface-api"] == "c"
def test_create_executor_with_default():
executor = Executor("graph")
assert not executor["link-params"]
def test_attr_check():
executor = Executor("aot", {"interface-api": "c"})
assert "woof" not in executor
assert "interface-api" in executor
def test_create_executor_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor("woof", {})
def test_create_executor_attr_not_found():
with pytest.raises(TVMError, match='Attribute "woof" is not available on this Executor'):
Executor("aot", {"woof": "bark"})
def test_create_executor_attr_type_incorrect():
with pytest.raises(
TVMError,
match='Attribute "interface-api" should have type "runtime.String"'
' but instead found "IntImm"',
):
Executor("aot", {"interface-api": True})
def test_list_executors():
assert "aot" in Executor.list_registered()
@pytest.mark.parametrize("executor", [Executor("aot").name, "aot"])
def test_list_executor_options(executor):
aot_options = Executor.list_registered_options(executor)
assert "interface-api" in aot_options
assert aot_options["interface-api"] == "runtime.String"
def test_list_executor_options_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor.list_registered_options("woof")
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_expr_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import ExprFunctor, ExprMutator, ExprVisitor
def check_visit(expr):
try:
ef = ExprFunctor()
ef.visit(expr)
assert False
except NotImplementedError:
pass
ev = ExprVisitor()
ev.visit(expr)
em = ExprMutator()
assert em.visit(expr)
def test_constant():
check_visit(relay.const(1.0))
def test_tuple():
t = relay.Tuple([relay.var("x", shape=())])
check_visit(t)
def test_var():
v = relay.var("x", shape=())
check_visit(v)
def test_global():
v = relay.GlobalVar("f")
check_visit(v)
def test_function():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
params = [x, y]
body = x + y
ret_type = relay.TensorType(())
type_params = []
attrs = None # How to build?
f = relay.Function(params, body, ret_type, type_params, attrs)
check_visit(f)
def test_call():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
call = relay.op.add(x, y)
check_visit(call)
def test_let():
x = relay.var("x", shape=())
value = relay.const(2.0)
body = x + x
l = relay.Let(x, value, body)
check_visit(l)
def test_ite():
cond = relay.var("x", shape=(), dtype="bool")
ite = relay.If(cond, cond, cond)
check_visit(ite)
def test_get_item():
t = relay.Tuple([relay.var("x", shape=())])
t = relay.TupleGetItem(t, 0)
check_visit(t)
def test_ref_create():
r = relay.expr.RefCreate(relay.const(1.0))
check_visit(r)
def test_ref_read():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefRead(ref)
check_visit(r)
def test_ref_write():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefWrite(ref, relay.const(2.0))
check_visit(r)
def test_memo():
expr = relay.const(1)
for _ in range(100):
expr = expr + expr
check_visit(expr)
def test_match():
p = relay.prelude.Prelude()
check_visit(p.mod[p.map])
def test_match_completeness():
p = relay.prelude.Prelude()
_, _, nil = p.mod.get_type("List")
for completeness in [True, False]:
match_expr = relay.adt.Match(nil, [], complete=completeness)
result_expr = ExprMutator().visit(match_expr)
# ensure the mutator doesn't mangle the completeness flag
assert result_expr.complete == completeness
if __name__ == "__main__":
test_constant()
test_tuple()
test_var()
test_global()
test_function()
test_call()
test_let()
test_ite()
test_ref_create()
test_ref_read()
test_ref_write()
test_memo()
test_match()
test_match_completeness()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_external_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
import sys
from collections import OrderedDict
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, runtime
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
from utils.external_codegen import (
update_lib,
set_external_func_attr,
parametrize_external_codegen_checks,
parametrize_external_json_codegen_checks,
check_graph_executor_result,
check_vm_result,
)
@parametrize_external_codegen_checks
def test_multi_node_subgraph(check_result):
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# subgraph0
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
# subgraph1
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op(check_result):
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op_int(check_result):
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc(check_result):
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
# subgraph for mul
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y])
# subgraph for add
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
# subgraph for sub
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
inputs = OrderedDict(
[
("y", y_data),
("x", x_data),
]
)
check_result(mod, inputs, (2, 2), (y_data * y_data) - (x_data + x_data))
# TODO(mbs): The check_aot_executor_result does not support the list-of-targets, mostly because
# tvm.testing.aot.compile_and_run requires the target to be a kind name string, and
# tvm.testing.aot.compile_models requires a single Target object. However, code outside of
# tvm.testing.aot is ready for this more general form.
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_with_target_instance(check_result):
shape = (8, 8)
dtype = "int32"
def make_mod():
x0 = relay.var("x0", shape=shape, dtype=dtype)
y0 = relay.var("y0", shape=shape, dtype=dtype)
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
call = relay.Call(f, [x, y])
return tvm.IRModule.from_expr(call)
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
# The header attribute is just whitespace, so compilation is as usual.
good_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "// Good"}, host=host_target
)
# The header attribute is ill-formed, so compilation is expected to fail.
bogus_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "Bogus"}, host=host_target
)
mod = make_mod()
x_data = np.random.rand(*shape).astype(dtype)
y_data = np.random.rand(*shape).astype(dtype)
expected_result = x_data + y_data
inputs = {"x": x_data, "y": y_data}
check_result(
mod, inputs, shape, expected_result, target=[generic_target, good_extern_codegen_target]
)
with pytest.raises(RuntimeError):
check_result(
mod,
inputs,
shape,
expected_result,
target=[generic_target, bogus_extern_codegen_target],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_consts(check_result):
shape = (8, 8)
dtype = "float32"
x = relay.var("x", shape=shape)
y0_data = np.random.uniform(0, 1, shape).astype(dtype)
x0 = relay.var("x0", shape=shape)
y0_const = relay.const(y0_data, dtype)
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
# Note that while the VMCompiler get_params() will return all 'parameters' from both
# TVM and external codegen compiled code, the GraphExecutor.get_params() will return only
# those from non-external modules. So in the following we'll test by execution rather than
# test by inspection.
x_data = np.random.rand(*shape).astype(dtype)
inputs = {"x": x_data}
expected_result = x_data + y0_data
check_result(mod, inputs, shape, expected_result, target="llvm")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_padding(check_result):
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1))
f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data1, weight1])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 54, 50, 6), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_const(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5)
def test_load_params_with_constants_in_ext_codegen():
# After binding params and partitioning graph_module.get_params()
# might contain parameters that are not an graph executor input but
# for example constants in external function.
y_in = np.ones((1,)).astype("float32")
params = {"y": y_in}
mod = tvm.IRModule()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1,))
xcb = compiler_begin(x, "ccompiler")
ycb = compiler_begin(y, "ccompiler")
z = relay.add(xcb, ycb)
zce = compiler_end(z, "ccompiler")
mod["main"] = relay.Function([x, y], zce)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = relay.transform.PartitionGraph()(mod)
graph_module = relay.build(mod, target="llvm", params=params)
# Params will be stored in metadata module.
assert len(graph_module.get_params()) == 0
lib = update_lib(graph_module.get_lib())
rt_mod = tvm.contrib.graph_executor.create(graph_module.get_graph_json(), lib, tvm.cpu(0))
rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_bind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test bind function."""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm import TVMError
def test_bind_params():
x = relay.var("x")
y = relay.var("y")
z = relay.add(x, y)
f = relay.Function([x, y], z)
fbinded = relay.bind(f, {x: relay.const(1, "float32")})
fexpected = relay.Function([y], relay.add(relay.const(1, "float32"), y))
assert tvm.ir.structural_equal(fbinded, fexpected)
zbinded = relay.bind(z, {y: x})
zexpected = relay.add(x, x)
assert tvm.ir.structural_equal(zbinded, zexpected)
def test_bind_duplicated_params():
a = relay.var("a", shape=(1,))
aa = relay.var("a", shape=(1,))
s = a + aa
func = relay.Function([a, aa], s)
with pytest.raises(TVMError):
relay.build_module.bind_params_by_name(func, {"a": [1.0]})
if __name__ == "__main__":
test_bind_params()
test_bind_duplicated_params()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for module functionality."""
import tvm
from tvm import te
from tvm import relay
from tvm.relay.prelude import Prelude
def constructor_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[1:] + optional_ctors[1:] + nat_ctors[1:] + rose_ctors[1:]
def adt_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[:1] + optional_ctors[:1] + nat_ctors[:1] + rose_ctors[:1]
def test_constructor_tag_round_trip():
mod1 = tvm.IRModule()
p1 = Prelude(mod1)
p1.mod.import_from_std("nat.rly")
mod2 = tvm.IRModule()
p2 = Prelude(mod2)
p2.mod.import_from_std("nat.rly")
# ensure hashes match across modules
ctors1 = constructor_list(p1)
ctors2 = constructor_list(p2)
for i in range(len(ctors1)):
tag = ctors1[i].tag
ctor = mod2.get_constructor(tag)
assert ctor == ctors2[i]
assert ctor.name_hint == ctors1[i].name_hint
def test_constructor_tag_differences():
# ensure that if we have the type data for a given ADT, the tags
# for the constructors of the *same ADT* are simple offsets from
# each other
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
adts = adt_list(p)
for adt in adts:
data = mod[adt]
for i in range(len(data.constructors) - 1):
ctor1 = data.constructors[i]
ctor2 = data.constructors[i + 1]
assert ctor2.tag - ctor1.tag == 1
# make sure there is something present at the MSB
assert ctor1.tag - i != 0
assert ctor2.tag - (i + 1) != 0
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_nodes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test ir"""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.tir.expr import *
from tvm.relay import op
import numpy as np
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
# Span
def test_span():
span = relay.Span(None, 1, 2, 3, 4)
assert span.source_name == None
assert span.line == 1
assert span.end_line == 2
assert span.column == 3
assert span.end_column == 4
assert span.same_as(span)
assert span == span
assert isinstance(span, relay.base.Span)
str(span)
# span is not a node so we can't use graph_equal
# to test the round trip
back = tvm.ir.load_json(tvm.ir.save_json(span))
assert back.source_name == span.source_name
assert back.line == span.line
assert back.end_line == span.end_line
assert back.column == span.column
assert back.end_column == span.end_column
def test_constant():
arr = tvm.nd.array(10)
const = relay.Constant(arr)
assert const.data == arr
assert const.span == None
str(const)
check_json_roundtrip(const)
def test_tuple():
fields = tvm.runtime.convert([])
tup = relay.Tuple(fields)
assert tup.fields == fields
assert tup.span == None
str(tup)
check_json_roundtrip(tup)
def test_local_var():
name_hint = "s"
lv = relay.Var(name_hint)
assert lv.name_hint == name_hint
assert lv.type_annotation is None
# assert lv.span == None todo(@jroesch): what do we do about spans
str(lv)
check_json_roundtrip(lv)
t1 = relay.ty.TensorType((), "float")
lv = relay.Var(name_hint, t1)
assert lv.name_hint == name_hint
assert lv.type_annotation == t1
def test_global_var():
name_hint = "g"
gv = relay.GlobalVar(name_hint)
gv.name_hint == name_hint
# assert lv.span == None todo(@jroesch): what do we do about spans
str(gv)
check_json_roundtrip(gv)
def test_function():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.Var(n) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
fn = fn.with_attr("test_attribute", "value")
fn = fn.with_attr("test_attribute1", "value1")
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
assert fn.attrs["test_attribute"] == "value"
assert fn.attrs["test_attribute1"] == "value1"
str(fn)
check_json_roundtrip(fn)
def test_function_attrs():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.var(n, shape=(5, 2)) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
model_params = {}
for param in params[:1]:
cty = param.type_annotation
tensor = np.random.rand(*[int(sh) for sh in cty.shape]).astype(cty.dtype)
model_params[param] = relay.Constant(tvm.nd.array(tensor))
fn = fn.with_attr("__params__", model_params)
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
str(fn)
check_json_roundtrip(fn)
json_str = tvm.ir.save_json(fn)
fn_after = tvm.ir.load_json(json_str)
model_params_after = fn_after.attrs["__params__"]
after_keys = [item[0] for item in model_params_after.items()]
for key1, key2 in zip(model_params, after_keys):
assert key1.name_hint == key2.name_hint
p1 = model_params[key1]
p2 = model_params_after[key2]
np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy())
def test_call():
op = relay.Var("f")
arg_names = ["a", "b", "c", "d"]
args = tvm.runtime.convert([relay.Var(n) for n in arg_names])
call = relay.Call(op, args, None, None)
assert call.op == op
assert call.args == args
assert call.span == None
str(call)
check_json_roundtrip(call)
def test_let():
lv = relay.Var("x")
ty = None
arr = tvm.nd.array(10)
value = relay.Constant(arr)
# I would prefer that the order of arguments
# matches syntax let x: t = v in b
let = relay.Let(lv, value, lv)
assert let.var == lv
assert let.value == value
assert let.body == lv
assert let.span == None
str(let)
check_json_roundtrip(let)
def test_if():
cond = relay.Var("cond")
left = relay.Var("left")
right = relay.Var("right")
ife = relay.If(cond, left, right)
assert ife.cond == cond
assert ife.true_branch == left
assert ife.false_branch == right
assert ife.span == None
str(ife)
check_json_roundtrip(ife)
def test_tuple_get_item():
tup = relay.Var("tuple")
get = relay.TupleGetItem(tup, 1)
assert get.tuple_value == tup
assert get.index == 1
str(get)
check_json_roundtrip(get)
def test_op():
add = op.op.get("add")
check_json_roundtrip(add)
def test_conv2d_attrs():
data = relay.var("data", shape=(1, 3, 224, 224))
param = relay.var("param", shape=(64, 3, 7, 7))
out = op.nn.conv2d(data, param, strides=(2, 2), padding=(3, 3), channels=64, kernel_size=(7, 7))
check_json_roundtrip(out)
# Commented due to weird memory allocation issue
# def test_large_grpah():
# Test large graphs to avoid stack overflow in serialize/deserialize
# size = int(1e5)
# var = [relay.var("var_" + str(i), shape=(2, 3)) for i in range(size)]
# body = var[-1]
# for i in range(size, 1, -1):
# body = relay.Let(var[i - 1], op.add(var[i - 2], var[i - 2]), body)
# func = relay.Function([var[0]], body)
# check_json_roundtrip(func)
if __name__ == "__main__":
test_span()
test_constant()
test_tuple()
test_local_var()
test_global_var()
test_function()
test_function_attrs()
test_call()
test_let()
test_if()
test_tuple_get_item()
test_op()
test_conv2d_attrs()
# Commented due to weird memory allocation issue
# test_large_grpah()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.op import op as _op
def test_op_attr():
log_op = relay.op.get("log")
@tvm.ir.register_op_attr("exp", "ftest")
def test(x):
return x + 1
assert log_op.num_inputs == 1
assert log_op.get_attr("ftest") is None
assert relay.op.get("exp").get_attr("ftest")(1) == 2
def test_op_reset_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
# Register fadd1 and fadd2 attributes.
tvm.ir.register_op_attr("exp", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd2", add2)
# Reset log fadd1 attr.
log_op = relay.op.get("log")
log_op.reset_attr("fadd1")
# Check that fadd1 attr is resetted.
assert log_op.get_attr("fadd1") is None
# Check that fadd1 attr of other ops are intact.
assert relay.op.get("exp").get_attr("fadd1")(1) == 2
# Check that other attrs of the log op are intact.
assert relay.op.get("log").get_attr("fadd2")(1) == 3
def test_op_temp_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
# Set original attr value is add1.
tvm.ir.register_op_attr("sqrt", "ftest", add1)
with TempOpAttr("sqrt", "ftest", add2):
# Check that the attr value is updated to add2.
assert relay.op.get("sqrt").get_attr("ftest")(1) == 3
# Check that the attr value is recovered to add1.
assert relay.op.get("sqrt").get_attr("ftest")(1) == 2
def test_op_level1():
x = relay.Var("x")
for op_name in ["log", "exp", "sqrt", "rsqrt", "tanh"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 1
assert y.args[0] == x
def test_op_level3():
x = relay.Var("x")
for op_name in ["ceil", "floor", "trunc", "round", "abs", "negative"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 3
assert y.args[0] == x
def test_op_register():
"""Tests register_op functionality."""
op_name = "custom_op"
_op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
_op.get(op_name).set_num_inputs(2)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
# call default relation functions
_op.get(op_name).add_type_rel("Identity")
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
assert _op.get(op_name).name == op_name
assert _op.get(op_name).num_inputs == 2
assert _op.get(op_name).get_attr("TOpPattern") == _op.OpPattern.ELEMWISE
assert _op.get(op_name).get_attr("TOpIsStateful") == False
if __name__ == "__main__":
test_op_attr()
test_op_reset_attr()
test_op_temp_attr()
test_op_level1()
test_op_level3()
test_op_register()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
import tvm.relay.testing
from numpy import isclose
from typing import Union
SEMVER = '#[version = "0.0.5"]\n'
BINARY_OPS = {
"*": relay.multiply,
"/": relay.divide,
"+": relay.add,
"-": relay.subtract,
"<": relay.less,
">": relay.greater,
"<=": relay.less_equal,
">=": relay.greater_equal,
"==": relay.equal,
"!=": relay.not_equal,
}
TYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
"int8x4",
"uint1x4",
"float16x4",
}
LIST_DEFN = """
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
def assert_graph_equal(lhs, rhs):
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True)
def graph_equal(lhs, rhs):
return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True)
def roundtrip_expr(expr):
text = tvm.relay.Expr.astext(expr, show_meta_data=False)
x = tvm.parser.parse_expr(text)
assert_graph_equal(x, expr)
# Testing Utilities for expressions.
def roundtrip(expr):
x = tvm.parser.fromtext(expr.astext())
assert_graph_equal(x, expr)
def parse_text(code):
expr = tvm.parser.parse_expr(code)
roundtrip_expr(expr)
return expr
def parses_as(code, expr):
# type: (str, relay.Expr) -> bool
parsed = parse_text(code)
result = graph_equal(parsed, expr)
return result
# Testing Utilities for full modules.
def parse_module(code):
mod = tvm.parser.parse(SEMVER + code)
roundtrip(mod)
return mod
def assert_parses_as(code, expr):
parsed = parse_text(code)
assert_graph_equal(parsed, expr)
def assert_parse_module_as(code, mod):
mod = tvm.relay.transform.InferType()(mod)
parsed = parse_module(code)
assert_graph_equal(parsed, mod)
def get_scalar(x):
# type: (relay.Constant) -> (Union[float, int, bool])
return x.data.numpy().item()
int32 = relay.scalar_type("int32")
_ = relay.Var("_")
X = relay.Var("x")
Y = relay.Var("y")
X_ANNO = relay.Var("x", int32)
Y_ANNO = relay.Var("y", int32)
UNIT = relay.Tuple([])
def test_comments():
assert_parses_as(
"""
// This is a line comment!
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
This is still a block comment!
*/
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
/*Block comment is recursive!*/
*/
()
""",
UNIT,
)
def test_int_literal():
assert isinstance(parse_text("1"), relay.Constant)
assert isinstance(parse_text("1").data, tvm.nd.NDArray)
assert get_scalar(parse_text("1")) == 1
assert get_scalar(parse_text("10")) == 10
assert get_scalar(parse_text("0")) == 0
assert get_scalar(parse_text("-100")) == -100
assert get_scalar(parse_text("-05")) == -5
assert get_scalar(parse_text("9223372036854775807")) == 9223372036854775807
assert get_scalar(parse_text("-42i")) == -42
assert get_scalar(parse_text("-42i16")) == -42
assert get_scalar(parse_text("-42i32")) == -42
assert get_scalar(parse_text("-42i64")) == -42
assert_parses_as("-42i16", relay.const(-42, "int16"))
assert_parses_as("-42i32", relay.const(-42, "int32"))
assert_parses_as("-42i", relay.const(-42, "int32"))
assert_parses_as("-42", relay.const(-42, "int32"))
assert_parses_as("-42i64", relay.const(-42, "int64"))
assert_parses_as("2147483647", relay.const(2147483647, "int32"))
assert_parses_as("2147483648", relay.const(2147483648, "int64"))
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("2147483648i32")
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("32768i16")
def test_float_literal():
assert get_scalar(parse_text("1.0f")) == 1.0
assert isclose(get_scalar(parse_text("1.56667f")), 1.56667)
assert get_scalar(parse_text("0.0f")) == 0.0
assert get_scalar(parse_text("-10.0f")) == -10.0
# scientific notation
assert isclose(get_scalar(parse_text("1e-1f")), 1e-1)
assert get_scalar(parse_text("1e+1f")) == 1e1
assert isclose(get_scalar(parse_text("1E-1f")), 1e-1)
assert get_scalar(parse_text("1E+1f")) == 1e1
assert isclose(get_scalar(parse_text("1.0e-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0e+1f")) == 1.0e1
assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0E+1f")) == 1.0e1
assert get_scalar(parse_text("3f16")) == 3.0
assert get_scalar(parse_text("3f32")) == 3.0
assert_parses_as("3f16", relay.const(3.0, "float16"))
assert_parses_as("3f32", relay.const(3.0, "float32"))
assert_parses_as("3f", relay.const(3.0, "float32"))
assert_parses_as("3f64", relay.const(3.0, "float64"))
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("3.40283e+38f32")
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("65505f16")
def test_bool_literal():
assert get_scalar(parse_text("True")) == True
assert get_scalar(parse_text("False")) == False
assert_parses_as("True", relay.const(True, "bool"))
def test_negative():
# need to handle parsing non-literal operations
# assert isinstance(parse_text("let %x = 1; -%x").body, relay.Call)
assert get_scalar(parse_text("--10")) == 10
assert get_scalar(parse_text("---10")) == -10
def test_bin_op():
for bin_op in BINARY_OPS.keys():
assert_parses_as(
"1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1))
)
def test_parens():
assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1"))
assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)"))
def test_op_assoc():
assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1"))
assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))"))
def test_vars():
# var
var = parse_text("let %foo = (); %foo")
assert isinstance(var.body, relay.Var)
assert var.body.name_hint == "foo"
# global var
global_var = parse_text("@foo")
assert isinstance(global_var, relay.GlobalVar)
assert global_var.name_hint == "foo"
# operator id
op = parse_text("add")
assert isinstance(op, tvm.ir.Op)
assert op.name == "add"
# operator id with prefix
op = parse_text("nn.global_avg_pool2d")
assert isinstance(op, tvm.ir.Op)
assert op.name == "nn.global_avg_pool2d"
def test_meta_ref():
with pytest.raises(tvm.error.DiagnosticError):
meta_op = parse_text("meta[type_key][1337]")
assert meta_op.attrs.node_type_key == "type_key"
assert meta_op.attrs.node_index == 1337
def test_let():
assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT))
assert_parses_as(
"""
let %x = 1;
let %y = 2;
()
""",
relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)),
)
def test_seq():
assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT))
assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT))
def test_graph():
code = "%0 = (); %1 = 1; (%0, %0, %1)"
assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)]))
def test_graph_single():
assert_parses_as("%1 = (); %1", relay.Tuple([]))
def test_let_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let @x = 1; ()")
def test_let_op():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let x = 1; ()")
def test_tuple():
assert_parses_as("()", relay.Tuple([]))
assert_parses_as("(0,)", relay.Tuple([relay.const(0)]))
assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)]))
assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)]))
def test_tuple_proj():
x = relay.var("x", shape=())
assert_parses_as(
"free_var %x: float32; %x((%x,).0, %x)",
relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]),
)
def test_func():
# 0 args
assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, []))
# 1 arg
assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, []))
# 2 args
assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, []))
# annotations
assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, []))
# Refactor the attribute syntax and printing.
#
# # attributes
# assert_parses_as(
# "fn (n=5) { () }",
# relay.Function([], UNIT, None, None, tvm.ir.make_node("DictAttrs", n=relay.const(5)))
# )
# TODO(@jmp): Crashes if %x isn't annnotated.
def test_defn():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
%x
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_recursive_call():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
@id(%x)
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_ifelse():
assert_parses_as(
"""
if (True) {
0
} else {
1
}
""",
relay.If(relay.const(True), relay.const(0), relay.const(1)),
)
def test_ifelse_scope():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
if (True) {
let %x = ();
()
} else {
%x
}
"""
)
def test_ref():
program = """
#[version = "0.0.5"]
def @main(%x: float32) {
%0 = ref(%x);
ref_write(%0, 1f);
ref_read(%0)
}
"""
tvm.parser.parse(program)
def test_call():
# select right function to call: simple ident case
id_func = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
10 * %id(10)
""",
relay.Let(
id_func,
relay.Function([X], X, None, []),
relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])),
),
)
# 0 args
constant = relay.Var("constant")
assert_parses_as(
"""
let %constant = fn () { 0 };
%constant()
""",
relay.Let(
constant,
relay.Function([], relay.const(0), None, []),
relay.Call(constant, [], None, None),
),
)
# 1 arg
id_var = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
%id(1)
""",
relay.Let(
id_var,
relay.Function([X], X, None, []),
relay.Call(id_var, [relay.const(1)], None, None),
),
)
# 2 args
multiply = relay.Var("multiply")
assert_parses_as(
"""
let %multiply = fn (%x, %y) { %x * %y };
%multiply(0, 0)
""",
relay.Let(
multiply,
relay.Function([X, Y], relay.multiply(X, Y), None, []),
relay.Call(multiply, [relay.const(0), relay.const(0)], None, None),
),
)
# anonymous function
assert_parses_as(
"""
(fn (%x) { %x })(0)
""",
relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None),
)
# curried function
curried_mult = relay.Var("curried_mult")
assert_parses_as(
"""
let %curried_mult =
fn (%x) {
fn (%y) {
%x * %y
}
};
%curried_mult(0);
%curried_mult(0)(0)
""",
relay.Let(
curried_mult,
relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []),
relay.Let(
_,
relay.Call(curried_mult, [relay.const(0)], None, None),
relay.Call(
relay.Call(curried_mult, [relay.const(0)], None, None),
[relay.const(0)],
None,
None,
),
),
),
)
# op
assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None))
# Types
def test_incomplete_type():
assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT))
def test_builtin_types():
for builtin_type in TYPES:
parse_text("let %_ : {} = (); ()".format(builtin_type))
def test_tensor_type():
assert_parses_as(
"let %_ : Tensor[(), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(?, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT),
)
def test_function_type():
assert_parses_as(
"""
let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([], int32, [], [])),
relay.Function([], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32], int32, [], [])),
relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
relay.Function(
[relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []
),
UNIT,
),
)
def test_tuple_type():
assert_parses_as(
"""
let %_: () = (); ()
""",
relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT),
)
assert_parses_as(
"""
let %_: (int32,) = (0,); ()
""",
relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT),
)
assert_parses_as(
"""
let %_: (int32, int32) = (0, 1); ()
""",
relay.Let(
relay.Var("_", relay.TupleType([int32, int32])),
relay.Tuple([relay.const(0), relay.const(1)]),
UNIT,
),
)
def test_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { Nil }
""",
mod,
)
def test_adt_any():
code = """
type my_dtype {
my_cons(Tensor[(?, 1), uint16]),
}
"""
mod = parse_module(code)
items = mod.type_definitions.items()
global_type_var, type_data = items[0]
assert global_type_var.name_hint == "my_dtype"
ctors = type_data.constructors
assert len(ctors) == 1
my_cons = ctors[0]
assert my_cons.name_hint == "my_cons"
ty_shape = my_cons.inputs[0].shape
assert isinstance(ty_shape[0], tvm.tir.Any)
assert ty_shape[1] == 1
def test_empty_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { }
""",
mod,
)
def test_multiple_cons_defn():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
prog = relay.TypeData(
list_var,
[typ_var],
[
relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var),
relay.Constructor("Nil", [], list_var),
],
)
mod[list_var] = prog
assert_parse_module_as(LIST_DEFN, mod)
def test_multiple_type_param_defn():
glob_typ_var = relay.GlobalTypeVar("Either")
typ_var_a = relay.TypeVar("A")
typ_var_b = relay.TypeVar("B")
prog = relay.TypeData(
glob_typ_var,
[typ_var_a, typ_var_b],
[
relay.Constructor("Left", [typ_var_a], glob_typ_var),
relay.Constructor("Right", [typ_var_b], glob_typ_var),
],
)
mod = tvm.IRModule()
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Either[A, B] {
Left(A),
Right(B),
}
""",
mod,
)
def test_match():
# pair each match keyword with whether it specifies a complete match or not
match_keywords = [("match", True), ("match?", False)]
for (match_keyword, is_complete) in match_keywords:
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
length_var = relay.GlobalVar("length")
typ_var = relay.TypeVar("A")
input_type = list_var(typ_var)
input_var = relay.Var("xs", input_type)
rest_var = relay.Var("rest")
cons_case = relay.Let(
relay.var("", type_annotation=None),
UNIT,
relay.add(relay.const(1), relay.Call(length_var, [rest_var])),
)
body = relay.Match(
input_var,
[
relay.Clause(
relay.PatternConstructor(
cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)]
),
cons_case,
),
relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)),
],
complete=is_complete,
)
length_func = relay.Function([input_var], body, int32, [typ_var])
mod[length_var] = length_func
assert_parse_module_as(
"""
%s
def @length[A](%%xs: List[A]) -> int32 {
%s (%%xs) {
Cons(_, %%rest : List[A]) => {
();
1 + @length(%%rest)
},
Nil => 0,
}
}
"""
% (LIST_DEFN, match_keyword),
mod,
)
def test_adt_cons_expr():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
make_singleton_var = relay.GlobalVar("make_singleton")
input_var = relay.Var("x", int32)
make_singleton_func = relay.Function(
[input_var], cons_constructor(input_var, nil_constructor()), list_var(int32)
)
mod[make_singleton_var] = make_singleton_func
assert_parse_module_as(
"""
%s
def @make_singleton(%%x: int32) -> List[int32] {
Cons(%%x, Nil)
}
"""
% LIST_DEFN,
mod,
)
def test_duplicate_adt_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_module(
"""
%s
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
% LIST_DEFN
)
def test_duplicate_adt_cons():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Haha { Lmao }
"""
)
def test_duplicate_adt_cons_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Lmao { Ayy }
"""
)
def test_duplicate_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
def @id[A](%x: A) -> A { x }
def @id[A](%x: A) -> A { x }
"""
)
def test_extern_adt_defn():
mod = tvm.IRModule()
extern_var = relay.GlobalTypeVar("T")
typ_var = relay.TypeVar("A")
extern_def = relay.TypeData(extern_var, [typ_var], [])
mod[extern_var] = extern_def
assert_parse_module_as(
"""
extern type T[A]
""",
mod,
)
def test_import_grad():
mod = tvm.IRModule()
mod.import_from_std("gradient.rly")
def test_mlp():
mod, _ = relay.testing.mlp.get_workload(1)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def inline_params(mod, params):
main_fn = mod["main"]
str_to_var = {}
for param in main_fn.params:
str_to_var[param.name_hint] = param
bind_map = {}
for param in params:
bind_map[str_to_var[param]] = relay.const(params[param])
body = relay.bind(main_fn.body, bind_map)
main_fn = relay.Function(relay.analysis.free_vars(body), body)
mod._add("main", main_fn, True)
return mod
def test_mlp_inlined_params():
mod, params = relay.testing.mlp.get_workload(1)
mod = inline_params(mod, params)
mod = relay.transform.InferType()(mod)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def test_tuple_return_value():
program = """
type Box[T] {
constructor(T)
}
def @example() {
%0 = ();
%1 = constructor(%0);
%2 = constructor(0f);
(%1, %2,)
}
"""
parse_module(program)
def test_parse_if_in_binding():
program = """
def @example(%b: bool) {
%0 = if (%b) {
1
} else {
0
};
%0
}
"""
parse_module(program)
def test_op_string_attr():
call = parse_text(
"""
free_var %x: Tensor[(1, 32, 32, 3), float32];
free_var %y: Tensor[(1, 1, 3, 3), float32];
nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO")
"""
)
assert isinstance(call.op, tvm.ir.Op)
assert call.op.name == "nn.conv2d"
assert call.attrs.data_layout == "NHWC"
assert call.attrs.kernel_layout == "HWIO"
def test_load_prelude():
mod = tvm.IRModule()
mod.import_from_std("prelude.rly")
tvm.parser.parse(mod.astext())
def test_call_attrs():
def get_func(shape, dtype):
x0 = relay.var("data", shape=shape, dtype=dtype)
w0 = relay.var("weight", shape=shape, dtype=dtype)
a = relay.nn.dense(x0, w0)
b = relay.nn.relu(a)
d = relay.add(b, relay.const(1.0, dtype=dtype))
return relay.Function([x0, w0], d)
# build relay graph
shape = (2, 4)
dtype = "float32"
sub_func = get_func(shape, dtype)
p0 = relay.var("p0", shape=shape, dtype=dtype)
p1 = relay.var("p1", shape=shape, dtype=dtype)
attr = tvm.ir.make_node("attrs.TestAttrs", name="func_call_attrs")
call = relay.Call(sub_func, [p0, p1], attrs=attr)
func = relay.Function([p0, p1], call)
# build relay module
mod = tvm.IRModule()
mod["main"] = func
mod = tvm.relay.transform.InferType()(mod)
# assert equal
program = """
def @main(%p0: Tensor[(2, 4), float32], %p1: Tensor[(2, 4), float32]) {
%2 = fn (%data: Tensor[(2, 4), float32], %weight: Tensor[(2, 4), float32]) {
%0 = nn.dense(%data, %weight, units=None);
%1 = nn.relu(%0);
add(%1, 1f)
};
%2(%p0, %p1, name="func_call_attrs", attrs_type_key="attrs.TestAttrs")
}
"""
parsed = parse_module(program)
assert_graph_equal(parsed, mod)
def test_tokenize_inf():
x = relay.var("x", shape=(3, 4), dtype="float32")
y = relay.clip(x, -np.inf, np.inf)
f = relay.Function([x], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.AnnotateSpans()(mod)
def test_func_attrs():
attrs = tvm.ir.make_node("DictAttrs", **{"Primitive": 1, "relay.reshape_only": 1})
x = relay.var("x", shape=(2, 3))
func = relay.Function([x], relay.reshape(x, (-1,)), attrs=attrs)
assert_parses_as(func.astext(), func)
def test_init_module_and_metatable():
init_metatable = {"relay.Constant": [relay.const(np.random.rand(2, 3), dtype="float32")]}
init_module = tvm.parser.fromtext(
SEMVER
+ """
def @f(%y : Tensor[(2, 3), float32]) -> Tensor[(2, 3), float32] {
negative(%y)
}
""",
)
mod = tvm.parser.parse(
SEMVER
+ """
def @main(%x: Tensor[(2, 3), float32]) {
add(@f(%x), meta[relay.Constant][0])
}
""",
"from_string",
init_module,
init_metatable,
)
roundtrip(mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_structural_equal_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.testing import run_opt_pass
def consistent_equal(x, y, map_free_vars=False):
struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)
struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)
xhash = tvm.ir.structural_hash(x, map_free_vars)
yhash = tvm.ir.structural_hash(y, map_free_vars)
if struct_equal0 != struct_equal1:
raise ValueError(
"Non-communicative {} vs {}, sequal0={}, sequal1={}".format(
x, y, struct_equal0, struct_equal1
)
)
# NOTE: hash colision can happen but should be rare.
# we can confirm that hash colison doesn't happen for our testcases
if struct_equal0 != (xhash == yhash):
raise ValueError(
"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format(
x, y, struct_equal0, xhash, yhash
)
)
return struct_equal0
def test_tensor_type_sequal():
t1 = relay.TensorType((3, 4), "float32")
t2 = relay.TensorType((3, 4), "float32")
t3 = relay.TensorType((3, 4, 5), "float32")
assert t1 == t2
assert t1 != t3
t1 = relay.TensorType((), "float32")
t2 = relay.TensorType((), "float32")
assert t1 == t2
def test_incomplete_type_sequal():
t1 = relay.IncompleteType(relay.TypeKind.ShapeVar)
t2 = relay.IncompleteType(relay.TypeKind.Type)
t3 = relay.IncompleteType(relay.TypeKind.Type)
# only equal when there is pointer equality
assert t2 == t2
assert t1 == t1
assert t1 != t2
assert t2 != t3
def test_type_param_sequal():
t1 = relay.TypeVar("v1", relay.TypeKind.Type)
t2 = relay.TypeVar("v2", relay.TypeKind.ShapeVar)
t3 = relay.TypeVar("v3", relay.TypeKind.Type)
# only pointer equality and eq_map allow equal params
assert t1 == t1
assert t2 == t2
assert t1 != t2 # different kind
assert t1 != t3 # not in eq_map
# function types are the only way to put type params
# in eq map
ft1 = relay.FuncType(
tvm.runtime.convert([]), t1, tvm.runtime.convert([t1]), tvm.runtime.convert([])
)
ft2 = relay.FuncType(
tvm.runtime.convert([]), t3, tvm.runtime.convert([t3]), tvm.runtime.convert([])
)
# actually an invalid type because t2 is wrong kind
ft3 = relay.FuncType(
tvm.runtime.convert([]), t2, tvm.runtime.convert([t2]), tvm.runtime.convert([])
)
assert ft1 == ft2
assert ft1 != ft3 # kinds still do not match
def test_func_type_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tp3 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr1 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp1, tp3]), 1, None)
tr2 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp2, tp4]), 1, None)
tr3 = relay.TypeRelation(identity, tvm.runtime.convert([tp1, tp3]), 1, None)
ft = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
translate_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp2, tp4]),
tvm.runtime.convert([tr2]),
)
assert ft == translate_vars
different_args = relay.FuncType(
tvm.runtime.convert([t1]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([tr1])
)
assert ft != different_args
different_order = relay.FuncType(
tvm.runtime.convert([t2, t1]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != different_order
no_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([])
)
assert ft != no_rel
more_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp1, tp2, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != more_vars
all_the_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp2, tp3, tp4]),
tvm.runtime.convert([tr1, tr2]),
)
assert ft != all_the_vars
different_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr3]),
)
assert ft != different_rel
more_rels = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1, tr3]),
)
assert ft != more_rels
def test_tuple_type_sequal():
t1 = relay.TensorType((1, 2, 3), "float32")
t2 = relay.TensorType((1, 2, 3, 4), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tup1 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup2 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup3 = relay.TupleType(tvm.runtime.convert([t2, t1, tp1]))
tup4 = relay.TupleType(tvm.runtime.convert([t1, t2, tp2]))
# as long as types are alpha-equal and in same order,
# tuples should be alpha-equal
assert tup1 == tup2
assert tup1 != tup3
assert tup1 != tup4
def test_type_relation_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
# functions are compared only by pointer equality so
# we need to be sure to use the same pointers
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
same = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_func = relay.TypeRelation(identity, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_order = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t1]), 1, attr1)
diff_args = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t3]), 1, attr1)
diff_attr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr2)
same_attr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1_same)
bigger = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 2, attr1)
diff_num_inputs = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 1, attr2)
# func, number of args, input count, and order should be the same
assert tr == same
assert tr != diff_func
assert tr != diff_order
assert tr != diff_args
assert tr != diff_attr
assert tr == same_attr
assert tr != bigger
assert bigger != diff_num_inputs
def test_type_call_sequal():
h1 = relay.GlobalTypeVar("h1")
h2 = relay.GlobalTypeVar("h2")
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
t4 = relay.TensorType((), "float32")
tc = relay.TypeCall(h1, [t1, t2, t3])
same = relay.TypeCall(h1, [t1, t2, t3])
different_func = relay.TypeCall(h2, [t1, t2, t3])
different_arg = relay.TypeCall(h1, [t1, t2, t4])
fewer_args = relay.TypeCall(h1, [t1, t2])
more_args = relay.TypeCall(h1, [t1, t2, t3, t4])
different_order_args = relay.TypeCall(h1, [t3, t2, t1])
assert tc == same
assert tc != different_func
assert tc != fewer_args
assert tc != more_args
assert tc != different_order_args
def test_constant_sequal():
x = relay.const(1)
y = relay.const(2)
assert consistent_equal(x, x)
assert not consistent_equal(x, y)
assert consistent_equal(x, relay.const(1))
def test_type_node_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
v1 = relay.TypeVar("v1", 0)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
def test_type_node_incompatible_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.Var("v2")
assert not consistent_equal(v1, v2)
def test_expr_node_incompatible_sequal():
v1 = relay.Var("v1")
v2 = relay.PatternVar(relay.Var("v2"))
assert not consistent_equal(v1, v2)
def test_var_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# normally only pointer equality
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
# let node allows for setting the eq_map
l1 = relay.Let(v1, relay.const(1), v1)
l2 = relay.Let(v2, relay.const(1), v2)
l3 = relay.Let(v1, relay.const(1), v2)
assert consistent_equal(l1, l2)
assert not consistent_equal(l1, l3)
# type annotations
tt1 = relay.TensorType([], "int32")
tt2 = relay.TensorType([], "int32")
tt3 = relay.TensorType([], "int64")
v3 = relay.Var("v3", tt1)
v4 = relay.Var("v4", tt2)
v5 = relay.Var("v5", tt3)
l4 = relay.Let(v3, relay.const(1), v3)
l5 = relay.Let(v4, relay.const(1), v4)
l6 = relay.Let(v5, relay.const(1), v5)
# same annotations
assert consistent_equal(l4, l5)
# different annotations
assert not consistent_equal(l4, l6)
# one null annotation
assert not consistent_equal(l1, l4)
def test_global_var_sequal():
v1 = relay.GlobalVar("v1")
v2 = relay.GlobalVar("v2")
# only pointer equality suffices (smoke test)
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
def test_tuple_sequal():
v0 = relay.Var("v0")
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# unit value is a valid tuple
assert consistent_equal(relay.Tuple([]), relay.Tuple([]))
tup = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
same = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
assert consistent_equal(tup, same)
# use the eq_map
let_tup = relay.Let(v1, tup, v1)
let_mapped = relay.Let(
v2, relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]), v2
)
assert consistent_equal(let_tup, let_mapped)
more_fields = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)]), v2]
)
assert not consistent_equal(tup, more_fields)
fewer_fields = relay.Tuple([v1, relay.const(2), relay.const(3)])
assert not consistent_equal(tup, fewer_fields)
different_end = relay.Tuple([v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(5)])])
assert not consistent_equal(tup, different_end)
different_start = relay.Tuple(
[v2, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]
)
assert not consistent_equal(tup, different_start)
longer_at_end = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4), relay.const(5)])]
)
assert not consistent_equal(tup, longer_at_end)
def test_tuple_get_item_sequal():
x = relay.Var("x")
y = relay.Var("y")
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(y, 1))
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 2))
assert consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))
def test_function_attr():
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
func0 = relay.Function([x0, w00, w01, w02], q00)
func0 = func0.with_attr("FuncName", "a")
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
func1 = relay.Function([x1, w10, w11, w12], q10)
func1 = func1.with_attr("FuncName", "b")
assert not consistent_equal(func0, func1)
def test_function_sequal():
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((4, 5, 6), "int8")
tt3 = relay.TupleType([tt1, tt2])
v1 = relay.Var("v1", tt1)
v2 = relay.Var("v2", tt2)
v3 = relay.Var("v3", tt3)
v4 = relay.Var("v4", tt2)
vret = relay.Constant(tvm.nd.array(np.ones(1)))
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tp3 = relay.TypeVar("tp3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("tp4", relay.TypeKind.ShapeVar)
basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
basic_tps = [tp1, tp2]
func = relay.Function([v1, v2], v1, tt2, basic_tps)
mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
assert consistent_equal(func, mapped)
fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
assert not consistent_equal(func, fewer_params)
more_params = relay.Function(
[relay.Var("v3", tt1), relay.Var("v4", tt2), relay.Var("v2", tt2)], v4, tt2, basic_tps
)
assert not consistent_equal(func, more_params)
params_unordered = relay.Function([v2, v1], v1, tt2, basic_tps)
assert not consistent_equal(func, params_unordered)
params_mismatch = relay.Function([v1, v3], v1, tt2, basic_tps)
assert not consistent_equal(func, params_mismatch)
# also would not typecheck
ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
assert not consistent_equal(func, ret_type_mismatch)
# also mis-typed
different_body = relay.Function(basic_args, v3, tt2, basic_tps)
assert not consistent_equal(func, different_body)
fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
assert not consistent_equal(func, fewer_type_params)
more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
assert not consistent_equal(func, more_type_params)
type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
assert not consistent_equal(func, type_params_unordered)
different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
assert not consistent_equal(func, different_type_params)
# a well-typed example that also differs in body, ret type, and type params
tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
assert not consistent_equal(func, tupled_example)
# nullable
no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
# both null
assert consistent_equal(no_ret_type, no_ret_type)
# one null
assert not consistent_equal(func, no_ret_type)
assert not consistent_equal(no_ret_type, func)
def test_call_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((), "int8")
basic_args = [relay.const(1), relay.const(2), v2, relay.Tuple([])]
# manually writing out args to ensure that args does not rely on
# pointer equality
call = relay.Call(v1, [relay.const(1), relay.const(2), v2, relay.Tuple([])], attr1, [tt1])
same = relay.Call(v1, basic_args, attr1, [tt1])
assert consistent_equal(call, same)
different_fn = relay.Call(v2, basic_args, attr1, [tt1])
assert not consistent_equal(call, different_fn)
fewer_args = relay.Call(v1, [relay.const(1), relay.const(2), v2], attr1, [tt1])
assert not consistent_equal(call, fewer_args)
reordered_args = relay.Call(
v1, [relay.const(2), relay.const(1), relay.Tuple([]), v2], attr1, [tt1]
)
assert not consistent_equal(call, reordered_args)
different_args = relay.Call(v1, [relay.const(1), relay.const(2), relay.const(3)], attr1, [tt1])
assert not consistent_equal(call, different_args)
more_args = relay.Call(
v1,
[relay.const(1), relay.const(2), v2, relay.Tuple([]), relay.const(3), relay.const(4)],
attr1,
[tt1],
)
assert not consistent_equal(call, more_args)
different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
assert not consistent_equal(call, different_attrs)
same_attrs = relay.Call(v1, basic_args, attr1_same, [tt1])
assert consistent_equal(call, same_attrs)
no_type_args = relay.Call(v1, basic_args, attr1)
assert not consistent_equal(call, no_type_args)
more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
assert not consistent_equal(call, more_type_args)
different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
assert not consistent_equal(call, different_type_arg)
def test_let_sequal():
tt1 = relay.TensorType((), "float32")
tt2 = relay.TensorType((), "int8")
v1 = relay.Var("v1")
v1_wtype = relay.Var("v1", tt1)
v2 = relay.Var("v2")
v3 = relay.Var("v3")
let = relay.Let(v1, relay.const(2), v1)
mapped = relay.Let(v2, relay.const(2), v2)
assert consistent_equal(let, mapped)
mismatched_var = relay.Let(v2, relay.const(2), v3)
assert not consistent_equal(let, mismatched_var)
different_value = relay.Let(v2, relay.const(3), v2)
assert not consistent_equal(let, different_value)
different_body = relay.Let(v2, relay.const(3), relay.const(12))
assert not consistent_equal(let, different_body)
# specified types must match
let_with_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
same_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
assert consistent_equal(let_with_type, same_type)
assert not consistent_equal(let, let_with_type)
v2 = relay.Var("v1", tt2)
different_type = relay.Let(v2, relay.const(2), v2)
assert not consistent_equal(let_with_type, different_type)
def test_if_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
if_sample = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
same = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert consistent_equal(if_sample, same)
different_cond = relay.If(v2, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_cond)
different_true = relay.If(v1, relay.const(2), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_true)
different_false = relay.If(v1, relay.const(1), relay.Tuple([]))
assert not consistent_equal(if_sample, different_false)
def test_constructor_sequal():
# smoke test: it should be pointer equality
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
assert consistent_equal(nil, nil)
assert consistent_equal(cons, cons)
assert not consistent_equal(nil, cons)
def test_match_sequal():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
y = relay.Var("y")
nil_case = relay.Clause(relay.PatternConstructor(nil), nil())
cons_case = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(x), relay.PatternVar(y)]), cons(x, y)
)
z = relay.Var("z")
a = relay.Var("a")
equivalent_cons = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(z), relay.PatternVar(a)]), cons(z, a)
)
data = cons(relay.const(1), cons(relay.const(2), nil()))
match = relay.Match(data, [nil_case, cons_case])
equivalent = relay.Match(data, [nil_case, equivalent_cons])
empty = relay.Match(data, [])
no_cons = relay.Match(data, [nil_case])
no_nil = relay.Match(data, [cons_case])
different_data = relay.Match(nil(), [nil_case, cons_case])
different_order = relay.Match(data, [cons_case, nil_case])
different_nil = relay.Match(
data, [relay.Clause(relay.PatternConstructor(nil), cons(nil(), nil())), cons_case]
)
different_cons = relay.Match(
data,
[
nil_case,
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]),
nil(),
),
],
)
another_case = relay.Match(
data, [nil_case, cons_case, relay.Clause(relay.PatternWildcard(), nil())]
)
wrong_constructors = relay.Match(
data,
[
relay.Clause(relay.PatternConstructor(none), nil()),
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), cons(x, nil())),
],
)
tvm.ir.assert_structural_equal(match, match)
assert consistent_equal(match, match)
assert consistent_equal(match, equivalent)
assert not consistent_equal(match, no_cons)
assert not consistent_equal(match, no_nil)
assert not consistent_equal(match, empty)
assert not consistent_equal(match, different_data)
assert not consistent_equal(match, different_order)
assert not consistent_equal(match, different_nil)
assert not consistent_equal(match, different_cons)
assert not consistent_equal(match, another_case)
assert not consistent_equal(match, wrong_constructors)
def test_op_sequal():
# only checks names
op1 = relay.op.get("add")
op2 = relay.op.get("add")
assert consistent_equal(op1, op2)
op3 = relay.op.get("take")
assert not consistent_equal(op1, op3)
def test_graph_equal():
x = relay.var("x")
y0 = relay.add(x, x)
z0 = relay.add(y0, y0)
y1 = relay.add(x, x)
z1 = relay.add(y1, y1)
z3 = relay.add(relay.add(x, x), relay.add(x, x))
assert consistent_equal(z0, z1)
assert consistent_equal(z0, z1)
# z3's dataflow format is different from z0
# z0 is computed from a common y0 node
# Relay view them as different programs
# Check the difference in the text format.
assert not consistent_equal(z0, z3)
def test_hash_unequal():
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
func1 = relay.Function([x1, y1], relay.add(x1, y1))
# func2 is exactly same structure with same variables shapes and dtypes
x2 = relay.var("x2", shape=(10, 10), dtype="float32")
y2 = relay.var("y2", shape=(10, 10), dtype="float32")
func2 = relay.Function([x2, y2], relay.add(x2, y2))
assert consistent_equal(func1, func2)
# func3 is same as func1 but with different var shapes
x3 = relay.var("x3", shape=(20, 10), dtype="float32")
y3 = relay.var("y3", shape=(20, 10), dtype="float32")
func3 = relay.Function([x3, y3], relay.add(x3, y3))
assert not consistent_equal(func1, func3)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
y = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert consistent_equal(x, y)
def test_fn_attribute():
# create function that performs add
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
add = relay.add(a, b)
add_fn = relay.Function([a, b], add)
add_fn = run_opt_pass(add_fn, relay.transform.InferType())
# create function that performs add with test attribute
c = relay.var("c", shape=(10, 10))
d = relay.var("d", shape=(10, 10))
add_1 = relay.add(c, d)
add_1_fn = relay.Function([c, d], add_1)
add_1_fn = add_1_fn.with_attr("TestAttribute", "test")
add_1_fn = run_opt_pass(add_1_fn, relay.transform.InferType())
assert not consistent_equal(add_1_fn, add_fn)
assert not consistent_equal(add_fn, add_1_fn)
def test_fn_vid_map():
def get_fn(with_vid):
x = relay.var("x", shape=(10,), dtype="float32")
f = relay.Function([x], x).with_attr("dict", {x.vid: 1} if with_vid else {x: 1})
return f
assert consistent_equal(get_fn(True), get_fn(True))
assert consistent_equal(get_fn(False), get_fn(False))
def test_lets():
shape = (5, 5)
def func1():
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a0, a1))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func2():
# Alpha conversion is structurally equal
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p0, relay.const(1)))
a0 = sb.let("a0", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func3():
# But changing the order of bindings is not structurally equal
# (even though algebraically equal)
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a2 = sb.let("a2", relay.add(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
assert tvm.ir.structural_equal(func1(), func2())
assert not tvm.ir.structural_equal(func1(), func3())
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_text_printer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay import testing
import numpy as np
from tvm.relay import Expr
from tvm.relay.analysis import free_vars
import pytest
DEBUG_PRINT = False
SEMVER = '#[version = "0.0.5"]\n'
def astext(program, unify_free_vars=False):
text = program.astext()
if isinstance(program, Expr):
roundtrip_program = tvm.parser.parse_expr(text)
else:
roundtrip_program = tvm.parser.fromtext(text)
tvm.ir.assert_structural_equal(roundtrip_program, program, map_free_vars=True)
return text
def show(text):
if DEBUG_PRINT:
print("---------------------------")
print(text)
def assert_prints_as(expr, str):
assert astext(expr) == SEMVER + str
def test_scalars():
assert_prints_as(relay.const(42, "int16"), "42i16")
assert_prints_as(relay.const(42, "int32"), "42")
assert_prints_as(relay.const(42, "int64"), "42i64")
assert_prints_as(relay.const(3.0, "float16"), "3f16")
assert_prints_as(relay.const(3.0, "float32"), "3f")
assert_prints_as(relay.const(3.0, "float64"), "3f64")
def test_large_graph():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
for i in range(int(9e4)):
z = relay.add(z, one)
f = relay.Function([x, y], z)
show(astext(f))
def test_func():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.add(z, z)
f = relay.Function([x, y], z)
show(astext(z))
show(astext(f))
def test_mod():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
z = relay.add(x, y)
z = relay.add(z, z)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["myf"] = f
mod = relay.transform.InferType()(mod)
text = astext(mod)
assert "def @myf" in text
assert "def @myf" in str(mod)
assert "add(%0, %0) /* ty=float32 */" in text
assert "add(%0, %0) /* ty=float32 */" in str(mod)
show(mod.astext(annotate=lambda x: str(x.checked_type.dtype) if type(x) == relay.Call else ""))
show(text)
def test_meta_data():
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", shape=(n, c, h, w))
w = relay.var("w")
z = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
f = relay.Function([x, w], z)
text = astext(f, unify_free_vars=True)
text_no_meta = str(f)
assert "channels=2" in text
assert "channels=2" in text_no_meta
assert "meta[tir.SizeVar][0]" in text
assert "meta[tir.SizeVar][0]" in text_no_meta
assert "type_key" in text
assert "type_key" not in text_no_meta
text = astext(relay.const([1, 2, 3]))
assert "meta[relay.Constant][0]" in text
def test_call_attrs():
x = relay.var("x")
# non default args
z = relay.nn.softmax(x, axis=2)
assert "axis=2" in astext(z)
# default args
z = relay.nn.softmax(x)
assert "softmax(%x)" in astext(z)
# non default args
z = relay.expand_dims(x, axis=2, num_newaxis=2)
assert "num_newaxis=2" in astext(z)
def test_let_if_scope():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
cond = relay.var("cond", "bool")
sb = relay.ScopeBuilder()
with sb.if_scope(cond):
v1 = sb.let("v", relay.const(1, "float32"))
v2 = sb.let("v", x)
sb.ret(relay.subtract(v1, v2))
with sb.else_scope():
v3 = relay.var("v")
let2 = relay.Let(v3, y, v3)
sb.ret(relay.add(let2, let2))
result = sb.get()
f = relay.Function([x, y, cond], result)
text = astext(f)
assert text.count("{") == 3
assert "%cond: bool" in text
show(astext(f))
def test_variable_name():
# avoid pure number even if the namehint is pure number
v1 = relay.var("1")
assert "%v1" in astext(v1)
def test_mlp():
net, _ = tvm.relay.testing.mlp.get_workload(batch_size=1)
astext(net)
def test_resnet():
net, _ = tvm.relay.testing.resnet.get_workload(batch_size=1)
astext(net)
def test_mobilenet():
net, _ = tvm.relay.testing.mobilenet.get_workload(batch_size=1)
astext(net)
def test_dqn():
net, _ = tvm.relay.testing.dqn.get_workload(batch_size=1)
astext(net)
def test_dcgan():
net, _ = tvm.relay.testing.dcgan.get_workload(batch_size=1)
astext(net)
def test_lstm():
net, _ = tvm.relay.testing.lstm.get_workload(1, 1)
astext(net)
net, _ = tvm.relay.testing.lstm.get_workload(4, 4)
astext(net)
def test_inception_v3():
net, _ = tvm.relay.testing.inception_v3.get_workload(batch_size=1)
astext(net)
def test_squeezenet():
for version in ["1.0", "1.1"]:
net, _ = tvm.relay.testing.squeezenet.get_workload(batch_size=1, version=version)
astext(net)
def test_densenet():
net, _ = tvm.relay.testing.densenet.get_workload(batch_size=1)
astext(net)
def test_call_node_order():
x = relay.var("x")
y = relay.var("y")
prog = relay.Call(
relay.Function([x], x), [relay.Call(relay.Function([y], y), [relay.const(1)])]
)
assert astext(prog) == SEMVER + (
"%0 = fn (%y) {\n"
" %y\n"
"};\n"
"%1 = %0(1);\n"
"%2 = fn (%x) {\n"
" %x\n"
"};\n"
"%2(%1)"
)
def test_let_inlining():
tup = relay.Tuple([relay.const(0), relay.const(0)])
x = relay.var("x")
assert astext(relay.Let(x, tup, tup)) == SEMVER + ("%0 = (0, 0);\n" "let %x = %0;\n" "%0")
assert astext(relay.Let(x, tup, x)) == SEMVER + ("let %x = (0, 0);\n" "%x")
def test_zeros():
x = relay.op.zeros([], "float32")
astext(x)
def test_unapplied_constructor():
type_def_str = r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
main_def_str = r"""
def @main[A]() -> fn (A, List[A]) -> List[A] {
Cons
}
"""
mod = tvm.parser.parse(SEMVER + type_def_str + main_def_str)
mod_str = str(mod)
# ensure constructors are printed correctly in type definitions (with their
# signature) and as exprs (without their signature)
assert type_def_str.strip() in mod_str
assert main_def_str.strip() in mod_str
def test_null_attribute():
x = relay.var("x")
y = relay.var("y")
z = relay.Function([x], y)
z = z.with_attr("TestAttribute", None)
txt = astext(z)
assert "TestAttribute=None" in txt
def test_span():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add0"), 0, 0, 0, 0)
)
z = relay.add(z, z)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add1"), 0, 0, 0, 0)
)
f = relay.Function([x, y], z)
txt = astext(f)
assert "Add0" in txt
assert "Add1" in txt
def test_optional_info():
c = relay.const(1)
call = relay.add(c, c)
m = tvm.IRModule.from_expr(call)
m = relay.transform.InferType()(m)
txt = astext(m)
assert txt.count("/* ty=int32 */") == 3
def test_slash_in_identifier():
x = relay.var("base/x")
y = relay.var("base/y")
z = x + y
txt = astext(z)
assert "base/x" in txt
assert "base/y" in txt
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_ir_well_formed.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import well_formed
from tvm.relay.prelude import Prelude
def test_let():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
ty = None
let = relay.Let(x, v, x)
assert well_formed(let)
assert not well_formed(relay.Let(x, v, let))
f = relay.Function([x], x, ty)
assert well_formed(f)
assert well_formed(relay.Let(relay.Var("y"), f, relay.Let(relay.Var("z"), f, v)))
def test_tuple():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
let = relay.Let(x, v, x)
assert well_formed(let)
assert well_formed(relay.Tuple([v, v]))
assert not well_formed(relay.Tuple([let, relay.Let(x, v, x)]))
def test_tuple_get_item():
t = relay.Var("t")
assert well_formed(relay.TupleGetItem(t, 2))
def test_adt():
mod = tvm.IRModule()
p = Prelude(mod)
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
some_case = relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), x)
default_case = relay.Clause(relay.PatternVar(x), x)
m0 = relay.Match(none(), [default_case])
m1 = relay.Match(none(), [some_case, default_case])
assert well_formed(m0)
assert not well_formed(m1)
if __name__ == "__main__":
test_let()
test_tuple()
test_tuple_get_item()
test_adt()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_json_compact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm import te
import json
# 0.6 BACKWARDS COMPATIBILITY TESTS
def test_type_var():
# type var in 0.6
nodes = [
{"type_key": ""},
{"type_key": "relay.TypeVar", "attrs": {"kind": "0", "span": "0", "var": "2"}},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "in0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.TypeVar)
assert tvar.name_hint == "in0"
nodes[1]["type_key"] = "relay.GlobalTypeVar"
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalTypeVar)
assert tvar.name_hint == "in0"
def test_var():
# type var in 0.6
nodes = [
{"type_key": ""},
{
"type_key": "relay.Var",
"attrs": {
"_checked_type_": "0",
"span": "0",
"type_annotation": "0",
"vid": "2",
},
},
{"type_key": "relay.Id", "attrs": {"name_hint": "a3"}},
{"type_key": "relay.TensorType", "attrs": {"dtype": "float32", "shape": "4", "span": "0"}},
{"type_key": "Array", "data": [5, 6]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "16", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "8", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, relay.Var)
assert tvar.name_hint == "a3"
def test_incomplete_type():
nodes = [
{"type_key": ""},
{"type_key": "relay.IncompleteType", "attrs": {"kind": "0", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.IncompleteType)
def test_func_tuple_type():
nodes = [
{"type_key": ""},
{
"type_key": "relay.FuncType",
"attrs": {
"arg_types": "2",
"ret_type": "3",
"span": "0",
"type_constraints": "6",
"type_params": "5",
},
},
{"type_key": "Array"},
{"type_key": "relay.TupleType", "attrs": {"fields": "4", "span": "0"}},
{"type_key": "Array"},
{"type_key": "Array"},
{"type_key": "Array"},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.FuncType)
def test_global_var():
nodes = [
{"type_key": ""},
{
"type_key": "relay.GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
nodes = [
{"type_key": ""},
{
"type_key": "GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
def test_op():
nodes = [{"type_key": ""}, {"type_key": "relay.Op", "global_key": "nn.conv2d"}]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
op = tvm.ir.load_json(json.dumps(data))
assert op == relay.op.get("nn.conv2d")
def test_tir_var():
nodes = [
{"type_key": ""},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "x", "span": "0"}},
{"type_key": "SizeVar", "attrs": {"dtype": "int32", "name": "y", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.tir.Var)
assert x.name == "x"
data["root"] = 2
y = tvm.ir.load_json(json.dumps(data))
assert isinstance(y, tvm.tir.SizeVar)
assert y.name == "y"
def test_str_map():
nodes = [
{"type_key": ""},
{"type_key": "StrMap", "keys": ["z", "x"], "data": [2, 3]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "Max", "attrs": {"a": "4", "b": "10", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "5", "b": "9", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "6", "b": "8", "dtype": "int32", "span": "0"}},
{
"type_key": "tir.Var",
"attrs": {"dtype": "int32", "name": "7", "type_annotation": "0", "span": "0"},
},
{"type_key": "runtime.String", "repr_str": "x"},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "1", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "100", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.ir.container.Map)
assert len(x) == 2
assert "x" in x
assert "z" in x
assert bool(x["z"] == 2)
# 0.7 BACKWARDS COMPATIBILITY TESTS
def test_irmodule_attributes():
nodes = [
{"type_key": ""},
{
"type_key": "IRModule",
"attrs": {
"functions": "0",
"global_type_var_map_": "0",
"global_var_map_": "0",
"source_map": "0",
"type_definitions": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.7.0"},
"b64ndarrays": [],
}
mod = tvm.ir.load_json(json.dumps(data))
assert isinstance(mod, tvm.ir.IRModule)
# IRModule attributes should defualt to null
assert not mod.attrs
# 0.8 BACKWARDS COMPATIBILITY TESTS
def test_virtual_device():
nodes = [
{"type_key": ""},
{
"type_key": "relay.Function",
"attrs": {
"_checked_type_": "0",
"attrs": "0",
"body": "0",
"params": "0",
"ret_type": "0",
"span": "0",
"type_params": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.8.0"},
"b64ndarrays": [],
}
func = tvm.ir.load_json(json.dumps(data))
assert isinstance(func, relay.Function)
assert not func.virtual_device_
if __name__ == "__main__":
test_op()
test_type_var()
test_var()
test_incomplete_type()
test_func_tuple_type()
test_global_var()
test_tir_var()
test_str_map()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_json_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for JSON codegen and runtime."""
import os
import sys
import numpy as np
import tvm
import tvm.relay.op as reg
import tvm.relay.testing
from tvm import relay, runtime
from tvm.contrib import utils
from tvm.relay import transform
from tvm.relay.backend import te_compiler
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import get_pattern_table
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def check_result(
mod, ref_mod, map_inputs, out_shape, tol=1e-5, target="llvm", device=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
# Run the reference result
te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
json, lib, param = relay.build(ref_mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
ref_result = out.numpy()
def check_vm_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
def check_graph_executor_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
json, lib, param = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
check_vm_result()
check_graph_executor_result()
def test_conv2d():
"""Test a subgraph with a single conv2d operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
def conv2d_direct():
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w1shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 54, 50, 6)
def group_conv2d():
dtype = "float32"
ishape = (1, 32, 14, 14)
w2shape = (32, 1, 3, 3)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w2shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w2shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w_data}, (1, 32, 14, 14)
for mod, ref_mod, map_inputs, out_shape in [conv2d_direct(), group_conv2d()]:
check_result(mod, ref_mod, map_inputs, out_shape, tol=1e-5)
def test_add():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_add():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_add()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_multiply():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_multiply():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.multiply(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.multiply(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_multiply()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_relu():
"""Test a subgraph with a single ReLU operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (1, 32, 14, 14)
def gen_relu(shape):
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
func = relay.Function([data0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
main_f = relay.Function([data0], glb_var(data0))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
main_f = relay.Function([data0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
def check(shape):
mod, ref_mod = gen_relu(shape)
data0 = np.random.uniform(-1, 1, shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data0": data0,
},
shape,
tol=1e-5,
)
check(shape=(1, 32, 14, 14))
check(shape=(1, 32))
def test_dense():
"""Test a subgraph with a single dense operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
a_shape = (1, 512)
b_shape = (1024, 512)
def gen_dense():
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
func = relay.Function([a, b], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
main_f = relay.Function([a, b], glb_var(a, b))
mod["main"] = main_f
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
main_f = relay.Function([a, b], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_dense()
data_a = np.random.uniform(0, 1, a_shape).astype(dtype)
data_b = np.random.uniform(0, 1, b_shape).astype(dtype)
check_result(mod, ref_mod, {"A": data_a, "B": data_b}, (1, 1024), tol=1e-5)
def test_bn():
"""Test a subgraph with a single batch_norm operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
d_shape = (1, 8)
c_shape = (8,)
def gen_bn():
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
main_f = relay.Function(
[data, gamma, beta, moving_mean, moving_var],
glb_var(data, gamma, beta, moving_mean, moving_var),
)
mod["main"] = main_f
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
main_f = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_bn()
data = np.random.uniform(-1, 1, d_shape).astype(dtype)
gamma = np.random.uniform(-1, 1, c_shape).astype(dtype)
beta = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_mean = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_var = np.random.uniform(-1, 1, c_shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"gamma": gamma,
"beta": beta,
"moving_mean": moving_mean,
"moving_var": moving_var,
},
d_shape,
tol=1e-5,
)
def test_multiple_ops():
"""Test a subgraph with multiple operators."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
w2shape = (64, 32, 5, 5)
def get_net():
data = relay.var("data", relay.TensorType(ishape, dtype))
w1 = relay.var("w1", relay.TensorType(w1shape, dtype))
w2 = relay.var("w2", relay.TensorType(w2shape, dtype))
layer = relay.nn.conv2d(data=data, weight=w1, kernel_size=(3, 3), padding=(1, 1))
layer = relay.nn.relu(layer)
layer = relay.nn.conv2d(data=layer, weight=w2, kernel_size=(5, 5), padding=(2, 2))
layer = relay.nn.relu(layer)
main_f = relay.Function([data, w1, w2], layer)
mod = tvm.IRModule()
mod["main"] = main_f
return mod
def get_partitoned_mod(mod):
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
byoc_pass = tvm.transform.Sequential(
[
remove_bn_pass,
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return byoc_pass(mod)
ref_mod = get_net()
mod = get_partitoned_mod(ref_mod)
data = np.random.uniform(0, 1, ishape).astype(dtype)
w1 = np.random.uniform(0, 1, w1shape).astype(dtype)
w2 = np.random.uniform(0, 1, w2shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"w1": w1,
"w2": w2,
},
(1, 64, 14, 14),
tol=1e-5,
)
def test_composite():
"""Test DNNL patterns and there composite functions."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
def conv2d_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
func = relay.Function([in_1, in_2], relu)
func = func.with_attr("Composite", "dnnl.conv2d_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2])
p_func = relay.Function([arg_1, arg_2], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
main_func = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
main_func = relay.Function([data, weight], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 32, 14, 14)
def conv2d_bias_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
bshape = (32, 1, 1)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
in_3 = relay.var("in_3", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, in_3)
relu = relay.nn.relu(add)
func = relay.Function([in_1, in_2, in_3], relu)
func = func.with_attr("Composite", "dnnl.conv2d_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_add_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
arg_3 = relay.var("arg_3", shape=bshape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2, arg_3])
p_func = relay.Function([arg_1, arg_2, arg_3], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
main_func = relay.Function([data, weight, bias], glb_var(data, weight, bias))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, bias)
relu = relay.nn.relu(add)
main_func = relay.Function([data, weight, bias], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
b_data = np.random.uniform(0, 1, bshape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data, "bias": b_data}, (1, 32, 14, 14)
for mod, ref_mod, input_maps, out_shape in [conv2d_relu(), conv2d_bias_relu()]:
check_result(mod, ref_mod, input_maps, out_shape, tol=1e-5)
def test_constant():
"""Test the subgraph with (var, const, ...) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3), padding=(1, 1))
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out = bn_output[0]
out = relay.nn.relu(out)
func = relay.Function(relay.analysis.free_vars(out), out)
ref_mod, params = tvm.relay.testing.create_workload(func)
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
dnnl_patterns = get_pattern_table("dnnl")
composite_partition = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl_patterns),
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
ref_mod = remove_bn_pass(ref_mod)
mod = composite_partition(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"data": i_data}, (1, 32, 14, 14), tol=1e-5)
def test_partial_constant():
"""Test the subgraph with (const, var, const, var) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (10, 10)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=ishape, dtype=dtype)
in_3 = relay.var("in_3", shape=ishape, dtype=dtype)
in_4 = relay.var("in_4", shape=ishape, dtype=dtype)
add1 = relay.add(in_1, in_2)
add2 = relay.add(add1, in_3)
add3 = relay.add(add2, in_3)
add4 = relay.add(add3, in_3)
func = relay.Function([in_1, in_2, in_3, in_4], add4)
ref_mod = tvm.IRModule.from_expr(func)
ref_mod = relay.transform.InferType()(ref_mod)
data1 = np.random.uniform(0, 1, ishape).astype(dtype)
data3 = np.random.uniform(0, 1, ishape).astype(dtype)
params = {
"in_1": tvm.nd.array(data1, device=tvm.cpu(0)),
"in_3": tvm.nd.array(data3, device=tvm.cpu(0)),
}
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = opt_pass(ref_mod)
data2 = np.random.uniform(0, 1, ishape).astype(dtype)
data4 = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"in_2": data2, "in_4": data4}, (10, 10), tol=1e-5)
if __name__ == "__main__":
test_conv2d()
test_add()
test_multiply()
test_relu()
test_dense()
test_bn()
test_multiple_ops()
test_composite()
test_constant()
test_partial_constant()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_layer_count.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay.testing import resnet
from tvm.relay.analysis import count_layers
def test_layer_count():
def verify(num_layers):
# Load a resnet with a known number of layers.
mod, _ = resnet.get_workload(num_layers=num_layers)
# Count the number of conv and dense layers.
count = count_layers(mod, valid_ops=["nn.conv2d", "nn.dense"])
assert count == num_layers
verify(18)
verify(50)
if __name__ == "__main__":
test_layer_count()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_memory_passes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import tvm
from tvm import te
import numpy as np
from tvm import relay
def check_memory_plan(func, check_fn):
# Build Module
mod = tvm.IRModule().from_expr(func)
# Convert arguments.
args = []
for param in func.params:
param = param.type_annotation
sh = [int(sh) for sh in param.shape]
data = np.random.rand(*sh).astype(param.dtype)
args.append(tvm.nd.array(data))
# TODO(mbs): Why does the executor need to be shared? Seems wrong.
ex = relay.create_executor("vm", mod)
# Compute without memory planning.
no_plan_result = ex.evaluate()(*args)
# Compute with memory planning.
with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]):
plan_result = ex.evaluate()(*args)
# Compute Python result.
py_res = check_fn(*[arg.numpy() for arg in args])
# First check that the two VM results agree.
np.testing.assert_allclose(no_plan_result.numpy(), plan_result.numpy())
# Finally check that the results match the Python result.
np.testing.assert_allclose(plan_result.numpy(), py_res)
def storage_type(mod):
return relay.TypeCall(mod.get_global_type_var("Storage"), [])
def test_tyck_alloc_storage():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
def test_tyck_alloc_tensor():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
sto = relay.Var("x", storage_type(mod))
sh = relay.const(np.array([1, 2]), dtype="int64")
at = relay.op.memory.alloc_tensor(sto, relay.const(0, dtype="int64"), sh)
mod["main"] = relay.Function([sto], at)
relay.transform.InferType()(mod)
def check_add(x):
return x + x
def test_add():
x = relay.var("x", shape=(2,))
z = x + x
func = relay.Function(
[
x,
],
z,
)
check_memory_plan(func, check_add)
def check_add_sub(x, y):
z = x + x
return z - y
def test_add_sub():
x = relay.var("x", shape=(10,))
y = relay.var("y", shape=(10,))
z = x + x
z = z - y
func = relay.Function([x, y], z)
check_memory_plan(func, check_add_sub)
def check_no_fuse(x, y, w):
z = x + y
return np.matmul(z, np.transpose(w))
def test_no_fuse():
x = relay.var("x", shape=(5, 1))
y = relay.var("y", shape=(5, 1))
w = relay.var("w", shape=(5, 1))
z = x + y
out = relay.op.nn.dense(z, w)
func = relay.Function([x, y, w], out)
check_memory_plan(func, check_no_fuse)
if __name__ == "__main__":
test_tyck_alloc_tensor()
test_add()
test_add_sub()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_name_mangling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import tvm
import tvm.testing
import tvm.relay as relay
import tvm.relay.backend.utils as utils
import pytest
def test_mangle_mod_name():
assert utils.mangle_module_name("default") == "tvmgen_default"
assert utils.mangle_module_name("ccompiler") == "tvmgen_ccompiler"
assert utils.mangle_module_name("1234"), "tvmgen_1234"
assert utils.mangle_module_name(""), "tvmgen"
assert utils.mangle_module_name(None), "tvmgen"
with pytest.raises(ValueError):
utils.mangle_module_name("\u018e")
utils.mangle_module_name("\xf1")
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_name_supply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import relay
from tvm.ir import GlobalVar, structural_equal
from tvm.ir.supply import NameSupply
from tvm.ir.supply import GlobalVarSupply
def test_name_supply():
name_supply = NameSupply("prefix")
name_supply.reserve_name("test")
assert name_supply.contains_name("test")
assert name_supply.fresh_name("test") == "prefix_test_1"
assert name_supply.contains_name("test_1")
assert not name_supply.contains_name("test_1", False)
assert not name_supply.contains_name("test_2")
def test_global_var_supply_from_none():
var_supply = GlobalVarSupply()
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(var_supply.fresh_global("test"), global_var)
def test_global_var_supply_from_name_supply():
name_supply = NameSupply("prefix")
var_supply = GlobalVarSupply(name_supply)
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
def test_global_var_supply_from_ir_mod():
x = relay.var("x")
y = relay.var("y")
mod = tvm.IRModule()
global_var = GlobalVar("test")
mod[global_var] = relay.Function([x, y], relay.add(x, y))
var_supply = GlobalVarSupply(mod)
second_global_var = var_supply.fresh_global("test", False)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(second_global_var, global_var)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_name_transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License" you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm import TVMError
from tvm.relay.backend.name_transforms import (
to_c_function_style,
to_c_variable_style,
to_c_constant_style,
prefix_name,
prefix_generated_name,
)
from tvm.runtime.name_transforms import sanitize_name
def test_to_c_function_style():
assert to_c_function_style("TVM_Woof") == "TVMWoof"
assert to_c_function_style("TVM_woof") == "TVMWoof"
assert to_c_function_style("TVM_woof_woof") == "TVMWoofWoof"
assert to_c_function_style("TVMGen_woof_woof") == "TVMGenWoofWoof"
# Incorrect prefix
with pytest.raises(TVMError, match="Function not TVM prefixed"):
to_c_function_style("Cake_Bakery")
with pytest.raises(TVMError, match="Function name is empty"):
to_c_function_style("")
def test_to_c_variable_style():
assert to_c_variable_style("TVM_Woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof_Woof") == "tvm_woof_woof"
# Incorrect prefix
with pytest.raises(TVMError, match="Variable not TVM prefixed"):
to_c_variable_style("Cake_Bakery")
with pytest.raises(TVMError, match="Variable name is empty"):
to_c_variable_style("")
def test_to_c_constant_style():
assert to_c_constant_style("TVM_Woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof_Woof") == "TVM_WOOF_WOOF"
with pytest.raises(TVMError, match="Constant not TVM prefixed"):
to_c_constant_style("Cake_Bakery")
with pytest.raises(TVMError):
to_c_constant_style("")
def test_prefix_name():
assert prefix_name("Woof") == "TVM_Woof"
assert prefix_name(["Woof"]) == "TVM_Woof"
assert prefix_name(["woof"]) == "TVM_woof"
assert prefix_name(["woof", "moo"]) == "TVM_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_name([""])
def test_prefix_generated_name():
assert prefix_generated_name("Woof") == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["woof"]) == "TVMGen_woof"
assert prefix_generated_name(["woof", "moo"]) == "TVMGen_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_generated_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_generated_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_generated_name([""])
def test_sanitize_name():
assert sanitize_name("+_+ ") == "____"
assert sanitize_name("input+") == "input_"
assert sanitize_name("input-") == "input_"
assert sanitize_name("input++") == "input__"
assert sanitize_name("woof:1") == "woof_1"
with pytest.raises(TVMError, match="Name is empty"):
sanitize_name("")
def test_combined_logic():
assert (
to_c_function_style(prefix_name(["Device", "target", "Invoke"])) == "TVMDeviceTargetInvoke"
)
assert to_c_function_style(prefix_generated_name(["model", "Run"])) == "TVMGenModelRun"
assert to_c_variable_style(prefix_name(["Device", "target", "t"])) == "tvm_device_target_t"
assert (
to_c_variable_style(prefix_generated_name(["model", "Devices"])) == "tvmgen_model_devices"
)
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_fast_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import scipy
from scipy import special
import tvm
import tvm.testing
import tvm.relay as relay
from tvm import topi
from tvm import te
from tvm.contrib import graph_executor
from tvm.topi import testing
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fastmath(target, dev):
def test_apply(relay_op, name, f_numpy, low, high, step, dtype="float32"):
a_np = np.arange(low, high, step).astype(dtype).reshape((1, -1))
b_np = f_numpy(a_np)
x = relay.var("x", shape=a_np.shape, dtype="float32")
y = relay_op(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
graph, lib, params = relay.build(mod, target=target, params=None)
# Check that the op related to fast math have been convered to function in lib
func_name = "tvmgen_default_fused_" + name
# When there're multiple targets in tvm.testing.parametrize_targets, the function
# built will have a "_1" in function name
assert func_name in graph
m = graph_executor.create(graph, lib, dev)
# Set inputs
m.set_input("x", tvm.nd.array(a_np, dev))
m.set_input(**params)
# Execute
m.run()
# Get outputs
tvm_output = m.get_output(0)
tvm.testing.assert_allclose(tvm_output.numpy(), b_np, rtol=1e-5, atol=1e-5)
test_apply(relay.exp, "fast_exp", np.exp, low=-88, high=88, step=0.01)
test_apply(relay.erf, "fast_erf", scipy.special.erf, low=-10, high=10, step=0.01)
test_apply(relay.tanh, "fast_tanh", np.tanh, low=-10, high=10, step=0.01)
test_apply(
relay.nn.fast_softmax,
"nn_fast_softmax",
tvm.topi.testing.softmax_python,
low=-10,
high=10,
step=0.01,
)
if __name__ == "__main__":
test_fastmath()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_grad_level1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te, relay
from tvm.relay.testing import check_grad, run_infer_type
from tvm.relay.transform import gradient
executor_kind = tvm.testing.parameter("debug")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
class TestUnaryOp:
config = {
"log": (tvm.relay.log, lambda x, g: g * (1 / x)),
"exp": (tvm.relay.exp, lambda x, g: g * np.exp(x)),
"sigmoid": (tvm.relay.sigmoid, lambda x, g: g * sigmoid(x) * (1 - sigmoid(x))),
"tanh": (tvm.relay.tanh, lambda x, g: g * (1 - np.tanh(x) * np.tanh(x))),
"sqrt": (tvm.relay.sqrt, lambda x, g: g * 0.5 * np.power(x, -0.5)),
"abs": (tvm.relay.abs, lambda x, g: np.where(x < 0, -g, g)),
"relu": (relay.nn.relu, lambda x, g: np.where(x < 0, np.zeros_like(x), g)),
"erf": (tvm.relay.erf, lambda x, g: g * (2.0 / (np.pi ** (0.5)) * np.exp(-x * x))),
"cos": (tvm.relay.cos, lambda x, g: g * -1.0 * np.sin(x)),
"sin": (tvm.relay.sin, lambda x, g: g * np.cos(x)),
"tan": (tvm.relay.tan, lambda x, g: g * (1.0 / (np.cos(x) ** 2))),
"atan": (tvm.relay.atan, lambda x, g: g * (1 / (1 + np.power(x, 2.0)))),
"log2": (tvm.relay.log2, lambda x, g: g * (1 / (np.log(2) * x))),
"log10": (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))),
"cosh": (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))),
"sinh": (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))),
"asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x**2) ** (1.0 / 2.0))),
"acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x**2.0) ** (1.0 / 2.0))),
"acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x**2 - 1.0) ** (1.0 / 2.0))),
"asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x**2 + 1.0) ** (1.0 / 2.0))),
"atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x**2 - 1.0))),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testing.parameter((10, 4))
def test_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
known_breaks = {
"float32": [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
"float64": [
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sigmoid,
tvm.relay.tanh,
tvm.relay.sqrt,
tvm.relay.erf,
tvm.relay.cos,
tvm.relay.sin,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log2,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
}
if relay_op in known_breaks[dtype]:
pytest.xfail(f"{dtype} {relay_op.__name__} not yet supported on Vulkan runtime")
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
g = relay.var("g", tp)
y = relay_op(x) * g
fwd_func = relay.Function([x, g], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data_in = np.random.rand(*shape).astype(dtype)
grad_in = np.random.rand(*shape).astype(dtype)
ref_grad_out = ref_func(data_in, grad_in)
op_res, (op_grad, _) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data_in, grad_in)
np.testing.assert_allclose(op_grad.numpy(), ref_grad_out, rtol=0.01)
class TestBinaryOp:
config = {
"add": (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]),
"subtract": (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]),
"multiply": (relay.multiply, lambda x, y: [y, x]),
"divide": (relay.divide, lambda x, y: [1 / y, -x / (y**2)]),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testing.parameter((5, 10, 5))
def test_binary_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
t = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay_op(x, y)
x_data = np.random.rand(*shape).astype(t.dtype)
y_data = np.random.rand(*shape).astype(t.dtype)
ref_grad0, ref_grad1 = ref_func(x_data, y_data)
fwd_func = relay.Function([x, y], z)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
op_res, (op_grad0, op_grad1) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(x_data, y_data)
np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01)
np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01)
def test_softmax_grad(executor_kind, target, dev):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((1, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_log_softmax_grad(executor_kind, target, dev):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((2, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.log_softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
class TestBiasAddGrad:
d_shape, b_shape, axis = tvm.testing.parameters(
((1, 16), (16,), 1),
((1, 8, 2, 2), (8,), 1),
((1, 2, 2, 8), (8,), 3),
((4, 8), (8,), 1),
)
def test_bias_add(self, executor_kind, target, dev, d_shape, b_shape, axis):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
bias = relay.var("bias", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function([data, bias], relay.nn.bias_add(data, bias, axis=axis))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_expand_dims_grad(executor_kind, target, dev):
data = relay.var("data", shape=(2, 3), dtype="float64")
fwd_func = relay.Function([data], relay.expand_dims(data, axis=1, num_newaxis=2))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_concatenate_grad(executor_kind, target, dev):
x = relay.var("x", shape=(2, 2, 5))
y = relay.var("y", shape=(2, 1, 5))
z = relay.var("z", shape=(2, 4, 5))
fwd_func = relay.Function([x, y, z], relay.concatenate([x, y, z], axis=1))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_grad_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.testing import check_grad
index_dtype = tvm.testing.parameter("int32", "int64")
val_dtype = tvm.testing.parameter("float32", "float64")
executor_kind = tvm.testing.parameter("debug")
def test_cross_entropy_grad(executor_kind, target, dev, val_dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and val_dtype == "float64":
# GLSL.std.450's Log implementation only takes 16/32-bit floats.
pytest.xfail("Known failing test case for vulkan runtime")
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_cross_entropy_with_logits_grad(executor_kind, target, dev, val_dtype):
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_checkpoint(executor_kind, target, dev):
inputs = [relay.var("x{}".format(i), shape=(1,)) for i in range(4)]
output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3]))
check_grad(
relay.Function(inputs, relay.annotation.checkpoint(output)), executor_kind=executor_kind
)
scope = relay.ScopeBuilder()
out_tuple = scope.let(
"out_tuple",
relay.Tuple([relay.add(inputs[0], inputs[1]), relay.multiply(inputs[2], inputs[3])]),
)
scope.ret(
relay.subtract(
relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),
relay.TupleGetItem(out_tuple, 1),
)
)
out_single = scope.get()
check_grad(
relay.Function(inputs, out_single),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
class TestBatchMatmulGrad:
a_shape, b_shape, transpose_a, transpose_b = tvm.testing.parameters(
((2, 3, 5), (2, 5, 4), False, False),
((2, 3, 5), (2, 4, 5), False, True),
((2, 5, 3), (2, 5, 4), True, False),
((2, 5, 3), (2, 4, 5), True, True),
)
def test_batch_matmul_grad(
self, executor_kind, target, dev, a_shape, b_shape, transpose_a, transpose_b
):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
check_grad(
relay.Function(
[tensor_a, tensor_b],
relay.op.nn.batch_matmul(
tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b
),
),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_reverse_reshape_grad(executor_kind, target, dev):
x = relay.var("x", shape=(3, 4, 5), dtype="float64")
check_grad(
relay.Function([x], relay.op.reverse_reshape(x, (-1, 0))),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_one_hot_grad(executor_kind, target, dev, index_dtype, val_dtype):
indices_shape = (3, 4)
depth = 5
axis = -1
inputs = [
np.random.randint(depth, size=indices_shape, dtype=index_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
]
test_inputs = inputs[1:]
indices = relay.var("indices", shape=indices_shape, dtype=index_dtype)
on_val = relay.var("on_val", shape=tuple(), dtype=val_dtype)
off_val = relay.var("off_val", shape=tuple(), dtype=val_dtype)
y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype)
f = relay.Function([indices, on_val, off_val], y)
check_grad(
f,
inputs=inputs,
test_inputs=test_inputs,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_grad_level2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from tvm import topi
import tvm.topi.testing
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import check_grad, run_infer_type, run_opt_pass
from tvm.relay.transform import gradient
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
def verify_max_pool2d_grad(executor_kind, x_shape, pool_size, strides, padding, ceil_mode):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.max_pool2d(
x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="max",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_max_pool2d_grad(executor_kind):
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
)
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
)
def verify_avg_pool2d_grad(
x_shape,
pool_size,
strides,
padding,
ceil_mode,
count_include_pad,
executor_kind,
dtype="float32",
):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in x_shape], dtype=dtype)
y = tvm.relay.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype(dtype)
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="avg",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_avg_pool2d_grad(executor_kind):
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=True,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
dtype="int32",
)
def verify_global_avg_pool2d_grad(executor_kind, x_shape):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.global_avg_pool2d(x)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=(x_shape[2], x_shape[3]),
strides=(1, 1),
padding=[0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_global_avg_pool2d_grad(executor_kind):
verify_global_avg_pool2d_grad(executor_kind, (1, 4, 16, 16))
verify_global_avg_pool2d_grad(executor_kind, (1, 8, 8, 24))
def verify_conv2d_grad(
dshape, wshape, strides, padding, dilation, groups=1, mode="higher_order", executor_kind="vm"
):
dtype = "float32"
data = relay.var("data", shape=dshape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
conv = relay.nn.conv2d(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
out_dtype=dtype,
)
fwd_func = relay.Function([data, weight], conv)
check_grad(fwd_func, mode=mode, executor_kind=executor_kind)
@tvm.testing.uses_gpu
def test_conv2d_grad(executor_kind):
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 3, 3), [1, 1], [1, 1], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [1, 1], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [2, 2], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16),
(16, 4, 3, 3),
[1, 1],
[1, 1],
[1, 1],
mode="first_order",
executor_kind=executor_kind,
)
def verify_dense_grad(d_shape, w_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
weight = relay.var("weight", relay.TensorType(w_shape, "float32"))
fwd_func = relay.Function([data, weight], relay.nn.dense(data, weight))
check_grad(fwd_func, executor_kind=executor_kind)
def test_dense_grad(executor_kind):
verify_dense_grad((1, 8), (16, 8), executor_kind)
verify_dense_grad((1, 4), (3, 4), executor_kind)
verify_dense_grad((5, 4), (3, 4), executor_kind)
def verify_matmul_grad(a_shape, b_shape, transpose_a, transpose_b, executor_kind):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function(
[tensor_a, tensor_b],
relay.nn.matmul(tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b),
)
check_grad(fwd_func, executor_kind=executor_kind)
def test_matmul_grad(executor_kind):
verify_matmul_grad((1, 8), (8, 16), False, False, executor_kind)
verify_matmul_grad((4, 1), (4, 3), True, False, executor_kind)
verify_matmul_grad((4, 5), (3, 4), True, True, executor_kind)
def verify_batch_flatten_grad(d_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.nn.batch_flatten(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_batch_flatten_grad(executor_kind):
verify_batch_flatten_grad((1, 2, 3, 4), executor_kind)
verify_batch_flatten_grad((1, 8), executor_kind)
def verify_conv2d_backward_weight(
executor_kind, dy_shape, x_shape, kernel_size, stride, padding, groups=1, out_channels=None
):
dtype = "float32"
dy = relay.var("dy", shape=dy_shape, dtype=dtype)
x = relay.var("x", shape=x_shape, dtype=dtype)
dw_func = relay.Function(
[dy, x],
relay.nn.conv2d_backward_weight(
dy,
x,
strides=stride,
padding=padding,
kernel_size=kernel_size,
groups=groups,
channels=out_channels,
out_dtype=dtype,
),
)
dw_func_legalized = run_opt_pass(dw_func, relay.transform.Legalize())
for dw, target in [(dw_func_legalized, "llvm"), (dw_func, "cuda -libs=cudnn")]:
if "cudnn" in target and not tvm.contrib.cudnn.exists():
continue
dev = tvm.device(target, 0)
dy_np = np.random.randn(*dy_shape).astype(dtype)
x_np = np.random.randn(*x_shape).astype(dtype)
dw_np = (
relay.create_executor(executor_kind, device=dev, target=target)
.evaluate(dw)(dy_np, x_np)
.numpy()
)
ref_dw_np = tvm.topi.testing.conv2d_backward_weight_python(
dy_np, x_np, kernel_size, stride, padding, groups=groups, channels=out_channels
)
np.testing.assert_allclose(dw_np, ref_dw_np, rtol=1e-4, atol=1e-4)
def test_conv2d_backward_weight(executor_kind):
verify_conv2d_backward_weight(
executor_kind, (2, 8, 32, 32), (2, 4, 32, 32), (3, 3), (1, 1), (1, 1)
)
verify_conv2d_backward_weight(
executor_kind, (2, 16, 15, 15), (2, 3, 32, 32), (3, 3), (2, 2), (0, 0)
)
verify_conv2d_backward_weight(
executor_kind,
(1, 16, 32, 32),
(1, 16, 32, 32),
(3, 3),
(1, 1),
(1, 1),
groups=16,
out_channels=16,
)
def test_conv2d_backward_weight_infer_type():
# From https://github.com/apache/tvm/pull/10439
depthwise_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 1, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=3, channels=3, kernel_size=[3, 3]);
nn.bias_add(%0, %v0_bias)
}
"""
normal_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 3, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=1, channels=3, kernel_size=[3, 3]);
nn.bias_add(%0, %v0_bias)
}
"""
SEMVER = '#[version = "0.0.5"]\n'
for code in [normal_conv_code, depthwise_conv_code]:
expr = tvm.parser.parse_expr(SEMVER + code)
fmod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(fmod)
bwd_expr = relay.transform.gradient(mod["main"], mode="first_order")
bwd_mod = tvm.IRModule.from_expr(bwd_expr)
bwd_mod = relay.transform.InferType()(bwd_mod)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_grad_level3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import check_grad, run_infer_type, run_opt_pass, _np_randn_from_type
from tvm.relay.transform import gradient
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
@tvm.testing.uses_gpu
def test_clip(executor_kind):
for dtype in ("float32", "float64"):
ref = lambda x: np.where(
x > 10.0, np.zeros_like(x), np.where(x < 1.0, np.zeros_like(x), np.ones_like(x))
)
x = relay.var("x", relay.TensorType((10, 4), dtype))
y = tvm.relay.clip(x, 1.0, 10.0)
data = np.random.rand(10, 4).astype(dtype) * 11.0
ref_grad = ref(data)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
def verify_transpose_grad(d_shape, axes=None, executor_kind="vm"):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.transpose(data, axes=axes))
check_grad(fwd_func, executor_kind=executor_kind)
def test_transpose_grad(executor_kind):
verify_transpose_grad((1, 2, 3, 4), executor_kind=executor_kind)
verify_transpose_grad((1, 2, 3, 4), axes=(0, 2, 3, 1), executor_kind=executor_kind)
def test_negative_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.negative(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.cast(data, "float64"))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_like_grad(executor_kind):
data = relay.var("data", shape=(10, 4), dtype="float32")
like = relay.var("like", shape=(1,), dtype="float64")
fwd_func = relay.Function([data, like], relay.cast_like(data, like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_copy_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float64"))
fwd_func = relay.Function([data], relay.copy(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_take_grad(executor_kind):
data_dtype = relay.TensorType((3, 4, 5), "float64")
data = relay.var("data", data_dtype)
indices = relay.var("indices", relay.TensorType((relay.Any(),), "int32"))
inputs = [_np_randn_from_type(data_dtype, scale=1e-5), np.array([1, 2], dtype="int32")]
test_inputs = [inputs[0]]
# take on axis
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=1))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
# take on flattened
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=None))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
def test_stack_grad(executor_kind):
args = [relay.var(c, shape=(2, 3, 4), dtype="float64") for c in "xyz"]
fwd_func = relay.Function(args, relay.stack(args, axis=0))
check_grad(fwd_func, executor_kind=executor_kind)
def test_squeeze_grad(executor_kind):
data = relay.var("data", shape=(2, 1, 1, 3, 4, 1), dtype="float64")
fwd_func = relay.Function([data], relay.squeeze(data))
fwd_func_subset = relay.Function([data], relay.squeeze(data, axis=[1, -1]))
check_grad(fwd_func, executor_kind=executor_kind)
check_grad(fwd_func_subset, executor_kind=executor_kind)
def test_arange_grad(executor_kind):
# TODO: testing arange numerically is strange because two-sided approx can
# produce different output shapes
dtype = "float64"
start = relay.var("start", relay.TensorType((), dtype))
stop = relay.var("stop", relay.TensorType((), dtype))
step = relay.var("step", relay.TensorType((), dtype))
values = [np.array(v, dtype=dtype) for v in [2.5, 9.5, 1.8]]
fwd_func = relay.Function([start, stop, step], relay.arange(start, stop, step, dtype))
check_grad(fwd_func, inputs=values, executor_kind=executor_kind)
def test_gather_nd_grad(executor_kind):
data = relay.var("data", relay.TensorType((2, 3), "float64"))
indices = relay.var("indices", relay.TensorType((2, 4), "int64"))
fwd = relay.Function([data, indices], relay.gather_nd(data, indices))
data_np = np.random.rand(2, 3).astype("float64")
indices_np = np.array([[0, 1, 1, 0], [0, 1, 0, 0]], dtype="int64")
check_grad(
fwd, inputs=[data_np, indices_np], test_inputs=[data_np], executor_kind=executor_kind
)
def test_reshape_like_grad(executor_kind):
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
fwd_func = relay.Function([data, shape_like], relay.reshape_like(data, shape_like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_zeros_ones_grad_const_ints():
# when shape is static (i.e. not an input), there is no gradient at all
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
expected_ty = relay.TupleType([static_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
fwd_func = relay.Function([], op(static_ty.concrete_shape, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty)
def test_zeros_ones_grad_const_expr():
# when shape is static (i.e. not an input), there is no gradient at all
shape_const = relay.const(np.array([2, 3, 4]), dtype="int32") * relay.const(1, dtype="int32")
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
dyn_ty = relay.TensorType([relay.Any(), relay.Any(), relay.Any()], dtype="float32")
expected_ty_static = relay.TupleType([static_ty, relay.TupleType([])])
expected_ty_dyn = relay.TupleType([dyn_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
# with DynamicToStatic, the shape should be concretized
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
fwd_func = run_opt_pass(fwd_func, relay.transform.DynamicToStatic())
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_static)
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_dyn)
def test_zeros_ones_grad_dynamic(executor_kind):
rank = np.random.randint(low=1, high=5, dtype="int32")
dyn_shape = np.random.randint(low=1, high=4, size=(rank,), dtype="int32")
shape_data = relay.var("shape_data", shape=(rank,), dtype="int32")
for op, op_ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
fwd_func = relay.Function([shape_data], op(shape_data, dtype="float32"))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
for target, dev in tvm.testing.enabled_targets():
res, (grad,) = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
bwd_func
)(dyn_shape)
tvm.testing.assert_allclose(res.numpy(), op_ref(dyn_shape, dtype="float32"))
tvm.testing.assert_allclose(grad.numpy(), np.zeros((rank,), dtype="int32"))
if __name__ == "__main__":
pytest.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_grad_level4.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm.relay.testing import check_grad, _np_randn_from_type
executor_kind = tvm.testing.parameter("debug")
def verify_reduction_grad(executor_kind, red_fn, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], red_fn(data, axis=axis, keepdims=keepdims, exclude=exclude))
check_grad(fwd_func, executor_kind=executor_kind)
def test_reduction_grad(executor_kind):
def _unbiased_variance(x, axis=None, keepdims=False, exclude=False):
return relay.variance(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)
for op in (relay.sum, relay.variance, _unbiased_variance, relay.mean):
verify_reduction_grad(executor_kind, op, (4, 2))
verify_reduction_grad(executor_kind, op, (4, 2), axis=-1, keepdims=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=(1, 2), exclude=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=1)
def verify_max_grad(executor_kind, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function(
[data], relay.max(data, axis=axis, keepdims=keepdims, exclude=exclude)
)
check_grad(fwd_func, scale=1e-3, executor_kind=executor_kind)
def test_max_grad(executor_kind):
verify_max_grad(executor_kind, (10, 10), axis=None)
verify_max_grad(executor_kind, (10, 10), axis=-1)
verify_max_grad(executor_kind, (6, 3, 2), axis=(1, 2), keepdims=True)
verify_max_grad(executor_kind, (5, 4, 3), axis=(0, 2), exclude=True)
def test_where_grad(executor_kind):
cond_type = relay.TensorType((2, 3, 4), "int32")
lhs_type = relay.TensorType((1, 3, 4), "float32")
rhs_type = relay.TensorType((2, 1, 4), "float32")
inputs = [
np.random.randint(2, size=cond_type.concrete_shape, dtype=cond_type.dtype),
_np_randn_from_type(lhs_type, scale=1e-5),
_np_randn_from_type(rhs_type, scale=1e-5),
]
cond = relay.var("cond", type_annotation=cond_type)
lhs = relay.var("lhs", type_annotation=lhs_type)
rhs = relay.var("rhs", type_annotation=rhs_type)
fwd_func = relay.Function([cond, lhs, rhs], relay.where(cond, lhs, rhs))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs[1:], executor_kind=executor_kind)
def test_less_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
# We need to generate inputs far apart to get correct numerical gradients
# (otherwise adding epsilon may change comparison result). The gradient
# should always be zero for both inputs.
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.less_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_not_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
# We need to generate inputs far apart to get correct numerical gradients
# (otherwise adding epsilon may change comparison result). The gradient
# should always be zero for both inputs.
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.not_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_strided_slice_grad(executor_kind):
def check(sh, dtype, begin, end, strides, slice_mode):
x = relay.var("x", shape=sh, dtype=dtype)
f = relay.Function(
[x],
relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode),
)
check_grad(f, executor_kind=executor_kind)
check((2, 3, 4), "float32", (0, 1, 0), (-1, -1, 1), (1, 1, 1), "size")
check((2, 3, 4), "float32", (0, 1, 0), (2, 3, 1), (1, 1, 1), "end")
# check that strides are properly ignored when using "size" mode
check((2, 3, 4), "float32", (0, 0, 0), (-1, -1, -1), (1, 1, 2), "size")
check((2, 3, 4), "float32", (0, 0, 0), (2, 3, 4), (1, 1, 2), "end")
if __name__ == "__main__":
pytest.main()
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_level1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
import scipy
from tvm import relay
import pytest
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
import tvm.testing
executor_kind = tvm.testing.parameter("graph", "vm")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
class TestUnaryOp:
# Tuple of (operator, reference op, supports fp16)
op_list = {
"log": (tvm.relay.log, np.log, True),
"exp": (tvm.relay.exp, np.exp, True),
"erf": (tvm.relay.erf, scipy.special.erf, True),
"sqrt": (tvm.relay.sqrt, np.sqrt, True),
"rqsrt": (tvm.relay.rsqrt, rsqrt, True),
"sigmoid": (tvm.relay.sigmoid, sigmoid, True),
"tanh": (tvm.relay.tanh, np.tanh, False),
"relu": (relay.nn.relu, relu, True),
"cos": (tvm.relay.cos, np.cos, True),
"sin": (tvm.relay.sin, np.sin, True),
"tan": (tvm.relay.tan, np.tan, False),
"atan": (tvm.relay.atan, np.arctan, False),
"ceil": (tvm.relay.ceil, np.ceil, True),
"floor": (tvm.relay.floor, np.floor, True),
"trunc": (tvm.relay.trunc, np.trunc, True),
"round": (tvm.relay.round, np.round, False),
}
dtype = tvm.testing.parameter("float16", "float32")
relay_op, ref_func, supports_fp16 = tvm.testing.parameters(
*op_list.values(), ids=op_list.keys()
)
def test_unary_op(self, target, dev, relay_op, ref_func, supports_fp16, dtype):
target = tvm.target.Target(target)
if dtype == "float16":
if target.kind.name == "cuda":
if not have_fp16(tvm.cuda(0).compute_version):
pytest.xfail(
"No float16 support on local cuda device (compute_version != 5.3 and < 6.0)"
)
elif target.kind.name == "vulkan" and not target.attrs.get("supports_float16", False):
pytest.xfail("No float16 support on vulkan target (supports_float16=False)")
elif not supports_fp16:
pytest.xfail(f"No float16 support on {target.kind.name} target")
if target.kind.name == "vulkan" and relay_op in [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
]:
pytest.xfail(f"Vulkan runtime doesn't yet support {relay_op}")
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", type_annotation=tp)
y = relay_op(x)
# test printer
assert ("{}(%x)".format(y.op.name)) in y.astext()
# test type inference
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref_func is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref_func(data).astype(dtype)
func = relay.Function([x], y)
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tolerance = 1e-2 if dtype == "float16" else 1e-5
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=tolerance)
@tvm.testing.uses_gpu
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = te.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01, atol=1e-3)
for opfunc, ref in [
(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod),
]:
for dtype in ["float16", "float32"]:
check_binary_op(opfunc, ref, dtype)
@tvm.testing.uses_gpu
def test_expand_dims():
# based on topi test
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
for dtype in ["float16", "float32"]:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_bias_add():
for dtype in ["float16", "float32"]:
xshape = (10, 2, 3, 4)
bshape = (2,)
rtol = 1e-2 if dtype == "float16" else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol)
def test_bias_add_type_failure():
def assert_failure(expr):
try:
run_infer_type(expr)
except tvm._ffi.base.TVMError:
return
else:
assert False
for axis in (0, -1, -3, 1):
assert_failure(relay.nn.bias_add(relay.const(1), relay.const(2), axis=axis))
def test_expand_dims_infer_type():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
@tvm.testing.uses_gpu
def test_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_log_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.log_softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
if target == "nvptx":
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_concatenate(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
# check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
try:
x = relay.var("p1", shape=(2, 5))
y = relay.var("p2", shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data, t_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_dropout(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), te.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
in_np = np.random.random([4, 5, 6]).astype("float32")
x = relay.const(in_np)
y = relay.nn.dropout(x, rate=0.5)
func = relay.Function([], y)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01)
def test_batch_norm():
for dtype in ["float16", "float32"]:
# beta and gamma ignored
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype),
]
)
)
# axis=1
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
# axis=-1
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
def do_concat_test(shapes, t_shape, dtype, axis, dev, target):
varsToConcat = []
inputData = []
pos = 0
for s in shapes:
varsToConcat.append(relay.var("x{}".format(pos), shape=s))
inputData.append(np.random.rand(*s).astype(dtype))
pos += 1
t = relay.var("z", shape=t_shape, dtype=dtype)
z = relay.concatenate(varsToConcat, axis=axis)
z = relay.add(z, t)
params = varsToConcat
params.append(t)
func = relay.Function(params, z)
t_data = np.random.uniform(low=-10, high=10, size=t_shape).astype(dtype)
ref_res = np.concatenate((tuple(inputData)), axis=axis) + t_data
mod = tvm.IRModule.from_expr(func)
executor = relay.create_executor("graph", mod=mod, device=dev, target=target)
op_res1 = executor.evaluate()(*inputData, t_data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.000001)
op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(
*inputData, t_data
)
tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.000001)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate1(target, dev):
np.random.seed(471)
maxNumDimensions = 6
shape = [4, 32, 16, 1, 31, 20, 21, 8, 28, 7] # just randomly selected 10 numbers
for dtype in ["float32"]:
for dimsNum in range(1, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(0, dimsNum): # range should be (-dimsNum + 1, dimsNum)
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
# the code below to normalize axes index. For some reasons tvm notifies about error if the axis is negative
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
shp = tuple(shape[:dimsNum])
finalSize += shape[(i % len(shape))]
shapes.append(
shp[:normalizedAxis]
+ tuple([shape[(i % len(shape))]])
+ shp[normalizedAxis + 1 :]
)
t_shape = shp[:normalizedAxis] + tuple([finalSize]) + shp[normalizedAxis + 1 :]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate2(target, dev):
# test to cover cases (1, .. , x, 1, .. , 1)
np.random.seed(13)
maxNumDimensions = 6
shape = [8, 3, 25, 33, 12, 29, 5, 11, 29, 11] # just randomly selected 10 numbers
ind = 0
for dtype in ["float32"]:
for dimsNum in range(2, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(-dimsNum + 1, dimsNum): # range should be (-dimsNum + 1, dimsNum)
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
# the code below to normalize axes index. For some reasons tvm notifies about error if the axis is negative
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
axisVal = [1] * dimsNum
axisVal[axis] = shape[(ind % len(shape))]
ind += 1
finalSize += axisVal[axis]
shapes.append(tuple(axisVal))
temp = [1] * dimsNum
temp[axis] = finalSize
t_shape = tuple(temp)
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate3(target, dev):
np.random.seed(477)
for dtype in ["float32"]:
axis = -2
ending = 1
shapes = [[3, 2, 1, ending], [3, 2, 1, ending]]
t_shape = [3, 2, 2, ending]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate4(target, dev):
np.random.seed(7)
x_shape = (2, 1)
x = relay.var("x", shape=x_shape, dtype="int64")
concat = relay.concatenate([x], axis=1)
f = relay.Function([x], concat)
x_val = np.array([[33], [13]], dtype="int64")
graph = relay.create_executor("graph", device=tvm.cpu(), target="llvm")
op_res = graph.evaluate(f)(x_val)
ref_res = np.concatenate([x_val], axis=1)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.000001)
def test_batch_norm_fold_const():
axis = 1
dtype = "float32"
shape = [4, 5, 6]
data_np = np.random.random(shape).astype(dtype)
beta_np = np.random.random(shape[axis]).astype(dtype)
gamma_np = np.random.random(shape[axis]).astype(dtype)
moving_mean_np = np.random.random(shape[axis]).astype(dtype)
moving_var_np = np.random.random(shape[axis]).astype(dtype)
data = relay.var("data", relay.TensorType(shape, dtype))
beta = relay.var("beta", relay.TensorType((shape[1],), dtype))
gamma = relay.var("gamma", relay.TensorType((shape[1],), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((shape[1],), dtype))
moving_var = relay.var("moving_var", relay.TensorType((shape[1],), dtype))
out = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis).astuple()
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
out_const = relay.nn.batch_norm(
relay.const(data_np),
relay.const(gamma_np),
relay.const(beta_np),
relay.const(moving_mean_np),
relay.const(moving_var_np),
axis=axis,
).astuple()
func_const = relay.Function([], out_const)
# Build the module with constants to have FoldConstant transform batch_norm.
mod_const = tvm.IRModule.from_expr(func_const)
mod_const = relay.transform.FoldConstant()(mod_const)
const_data_out = mod_const["main"].body[0].data
const_moving_mean_out = mod_const["main"].body[1].data
const_moving_var_out = mod_const["main"].body[2].data
# Run the Relay func without constants. This will use SimplyInference instead.
vm_data_out, vm_moving_mean_out, vm_moving_var_out = relay.create_executor(
"vm", device=tvm.device("llvm"), target="llvm"
).evaluate(func)(data_np, gamma_np, beta_np, moving_mean_np, moving_var_np)
tvm.testing.assert_allclose(const_data_out.numpy(), vm_data_out.numpy())
tvm.testing.assert_allclose(const_moving_mean_out.numpy(), vm_moving_mean_out.numpy())
tvm.testing.assert_allclose(const_moving_var_out.numpy(), vm_moving_var_out.numpy())
@pytest.mark.xfail
def test_matmul_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((mismatch_w, 2), dtype))
y = relay.nn.matmul(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_matmul(executor_kind):
for dtype in ["float16", "float32"]:
# Matmul accuracy for float16 is poor
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.matmul(x, w, units=2, transpose_b=True)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, w, h), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((wh, ww), dtype))
y = relay.nn.matmul(x, w, transpose_a=True)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.matmul(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(5, 10), dtype=dtype)
w = relay.var("w", shape=(5, 2), dtype=dtype)
z = relay.nn.matmul(x, w, transpose_a=True)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(5, 10).astype(dtype)
w_data = np.random.rand(5, 2).astype(dtype)
ref_res = np.dot(x_data.transpose(), w_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@pytest.mark.xfail
def test_dense_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_dense(executor_kind):
for dtype in ["float16", "float32"]:
# Dense accuracy for float16 is poor
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
# test dynamic shape in inner
m, k = 4, 2
x = relay.var("x", relay.TensorType((m, k), dtype))
k, nw = relay.Any(), 6
w = relay.var("w", relay.TensorType((k, n), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dense_same_args_compile():
for dtype in ["float32", "int8"]:
x = relay.var("x", shape=(32, 64), dtype=dtype)
out_dtype = "int32" if dtype == "int8" else "float32"
f = relay.Function([x], relay.nn.dense(x, x, out_dtype=out_dtype))
m = tvm.IRModule.from_expr(f)
for target, _ in tvm.testing.enabled_targets():
tvm.relay.build(m, target=target)
def test_dense_dtype():
data_dtype = "uint8"
weight_dtype = "int8"
out_dtype = "uint8"
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == "uint8"
assert run_infer_type(yy.args[1]).checked_type.dtype == "int8"
def test_bitserial_dense():
m, k = te.size_var("m"), te.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
@tvm.testing.requires_cascadelake
def test_dense_vnni():
data_shape = (32, 96)
weight_shape = (128, 96)
for data_dtype in ["uint8", "int8"]:
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=cascadelake"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "vpdpbusd" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_dense_rocm_sdot4():
data_shape = (32, 96)
weight_shape = (128, 96)
data_dtype = "int8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
def test_extern_concat_injective_fuse():
# This is a subgraph from MobileBERT, which crashes compilation if buffers created in te.extern(...)
# do not have their elem_offset explicitly set as a variable.
# fmt: off
mod = tvm.parser.fromtext(
"""
#[version = "0.0.5"]
def @main(%p0844: Tensor[(1, 384), int64], %p1652: Tensor[(2016, 128), float16]) {
%1331 = cast(%p0844, dtype="int32");
%1332 = take(%p1652, %1331, axis=0);
%1333 = strided_slice(%1332, begin=[0, 1, 0], end=[1, 384, 128], strides=[1, 1, 1], axes=None);
%1334 = strided_slice(%1332, begin=[0, 0, 0], end=[1, -1, 128], strides=[1, 1, 1], axes=None);
%1335 = nn.pad(%1333, 0, pad_width=[[0, 0], [0, 1], [0, 0]]);
%1336 = nn.pad(%1334, 0, pad_width=[[0, 0], [1, 0], [0, 0]]);
%1337 = (%1335, %1332, %1336);
%1338 = concatenate(%1337, axis=2);
reshape(%1338, newshape=[-1, 384])
}
"""
)
# fmt: on
relay.build(mod, params={}, target="llvm")
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/relay/test_op_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level10 operator test cases.
"""
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te, topi
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_checkpoint(executor_kind):
dtype = "float32"
xs = [relay.var("x{}".format(i), dtype) for i in range(4)]
f = relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
f_checkpoint = relay.annotation.checkpoint(f)
func, func_checkpoint = relay.Function(xs, f), relay.Function(xs, f_checkpoint)
f, f_checkpoint = run_infer_type(func), run_infer_type(func_checkpoint)
assert f.checked_type == f_checkpoint.checked_type
inputs = [np.random.uniform() for _ in range(len(xs))]
for target, dev in tvm.testing.enabled_targets():
f_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(*inputs)
f_checkpoint_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
f_checkpoint
)(*inputs)
tvm.testing.assert_allclose(f_res.numpy(), f_checkpoint_res.numpy(), 0, 0)
def test_checkpoint_alpha_equal():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
),
)
df = transform.gradient(run_infer_type(f))
# run PE and DCE
with tvm.transform.PassContext(opt_level=3):
# The expected output assumes DCE can elide 'dead writes' to references. At the time this unit test was
# written DCE would elide all writes, which though unsound in general happens to work for this case. Preserve
# that legacy behaviour here using 'ignore_impurity=True'.
# TODO(mbs): Revisit once DCE supports dead reference writes.
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.parser.parse_expr(
"""
#[version = "0.0.5"]
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[(1), float32], %w: Tensor[(1), float32])
-> (Tensor[(1), float32],
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) {
%0 = add(%x, %y);
%1 = add(%z, %w);
let %x1: Tensor[(1), float32] = multiply(%0, %1);
let %x2: Tensor[(1), float32] = ones_like(%x1);
let %x3: Tensor[(1), float32] = add(%x, %y);
let %x4: Tensor[(1), float32] = add(%z, %w);
%2 = zeros_like(%x3);
%3 = multiply(%x2, %x4);
%4 = collapse_sum_like(%3, %x3);
let %x5: Tensor[(1), float32] = add(%2, %4);
%5 = zeros_like(%x4);
%6 = multiply(%x2, %x3);
%7 = collapse_sum_like(%6, %x4);
let %x6: Tensor[(1), float32] = add(%5, %7);
%8 = zeros_like(%x);
%9 = collapse_sum_like(%x5, %x);
%10 = add(%8, %9);
%11 = zeros_like(%y);
%12 = collapse_sum_like(%x5, %y);
%13 = add(%11, %12);
%14 = zeros_like(%z);
%15 = collapse_sum_like(%x6, %z);
%16 = add(%14, %15);
%17 = zeros_like(%w);
%18 = collapse_sum_like(%x6, %w);
%19 = add(%17, %18);
%20 = (%10, %13, %16, %19);
(%x1, %20)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
def test_checkpoint_alpha_equal_tuple():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.Tuple([relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3])])
),
)
df = transform.gradient(run_infer_type(f))
# run PE and DCE
with tvm.transform.PassContext(opt_level=3):
# See comment in test_checkpoint_alpha_equal above.
# TODO(mbs): Revisit once DCE supports dead reference writes.
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.parser.parse_expr(
"""
#[version = "0.0.5"]
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[(1), float32], %w: Tensor[(1), float32])
-> ((Tensor[(1), float32], Tensor[(1), float32]),
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) {
let %x1: Tensor[(1), float32] = add(%x, %y) /* ty=Tensor[(1), float32] */;
let %x2: Tensor[(1), float32] = add(%z, %w) /* ty=Tensor[(1), float32] */;
let %x3: Tensor[(1), float32] = zeros_like(%x2) /* ty=Tensor[(1), float32] */;
let %x4: Tensor[(1), float32] = ones_like(%x1) /* ty=Tensor[(1), float32] */;
%0 = (%x1, %x2);
%1 = zeros_like(%x) /* ty=Tensor[(1), float32] */;
%2 = collapse_sum_like(%x4, %x) /* ty=Tensor[(1), float32] */;
%3 = add(%1, %2) /* ty=Tensor[(1), float32] */;
%4 = zeros_like(%y) /* ty=Tensor[(1), float32] */;
%5 = collapse_sum_like(%x4, %y) /* ty=Tensor[(1), float32] */;
%6 = add(%4, %5) /* ty=Tensor[(1), float32] */;
%7 = zeros_like(%z) /* ty=Tensor[(1), float32] */;
%8 = collapse_sum_like(%x3, %z) /* ty=Tensor[(1), float32] */;
%9 = add(%7, %8) /* ty=Tensor[(1), float32] */;
%10 = zeros_like(%w) /* ty=Tensor[(1), float32] */;
%11 = collapse_sum_like(%x3, %w) /* ty=Tensor[(1), float32] */;
%12 = add(%10, %11) /* ty=Tensor[(1), float32] */;
%13 = (%3, %6, %9, %12);
(%0, %13)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
@tvm.testing.uses_gpu
def test_collapse_sum_like(executor_kind):
shape = (3, 4, 5, 6)
shape_like = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.collapse_sum_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.sum(x, 0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_collapse_sum_to(executor_kind):
shape = (3, 4, 5, 6)
shape_to = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.collapse_sum_to(x, shape_to)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_to, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.sum(x, 0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.broadcast_to(x, shape=shape_like)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to_const_shape_int64(executor_kind):
shape_like = relay.const(np.array([1, 5]), dtype="int64")
x = relay.var("x", shape=(1,), dtype="int64")
z = relay.broadcast_to(x, shape=shape_like)
z = relay.sum(z, axis=0)
f = relay.Function([x], z)
x = np.random.randint(10, size=(1,), dtype="int64")
ref_res = np.broadcast_to(x, (5,))
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_concat_shape_int64(executor_kind):
x_shape = (1, 2, 1, 1)
broadcast_shape = [1, 2, 2, 1]
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype="int64"))
concate = relay.op.concatenate((broadcast_to,), axis=0)
f = relay.Function([x], concate)
x = np.zeros(x_shape).astype("float32")
ref_res = np.concatenate((np.broadcast_to(x, broadcast_shape),), axis=0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_pool2d_shape_int64(executor_kind):
x_shape = (1, 3, 32, 32)
out_shape = (2, 3, 32, 32)
x = relay.var("data", shape=x_shape, dtype="float32")
broadcast_to = relay.broadcast_to(x, shape=relay.const([2, 3, 32, 32], dtype="int64"))
pool2d = relay.nn.max_pool2d(broadcast_to, pool_size=(3, 3), padding=(1, 1, 1, 1))
sub = relay.subtract(broadcast_to, pool2d)
f = relay.Function([x], sub)
x = np.ones(x_shape).astype("float32")
ref_res = np.zeros(out_shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_broadcast_to_like(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.broadcast_to_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def np_slice_like(np_data, np_shape_like, axis=None):
begin_idx = [0 for _ in np_data.shape]
end_idx = list(np_data.shape)
if axis:
for i in axis:
if i < 0:
i = len(np_data.shape) + i
end_idx[i] = np_shape_like.shape[i]
else:
for i in range(len(np_data.shape)):
if i < len(np_shape_like.shape):
end_idx[i] = np_shape_like.shape[i]
slice_idx = []
for b, e in zip(begin_idx, end_idx):
slice_idx.append(slice(b, e))
np_result = np_data[tuple(slice_idx)]
return np_result
def verify_slice_like(executor_kind, data, slice_like, axes, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
y = relay.var("slice_like", relay.TensorType(slice_like, dtype))
z = relay.slice_like(x, y, axes)
zz = run_infer_type(z)
if axes:
assert "axes" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if all(isinstance(v, int) == 0 for v in data) or all(
isinstance(v, int) == 0 for v in slice_like
):
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=data).astype(dtype)
y_data = np.random.uniform(size=slice_like).astype(dtype)
ref_res = np_slice_like(x_data, y_data, axes)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_slice_like(executor_kind):
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
verify_slice_like(
executor_kind, data=(d1, d2, d3), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(
executor_kind, data=(1, 2, 3), slice_like=(d1, d2, d3), axes=None, output=(d1, d2, d3)
)
verify_slice_like(
executor_kind, data=(d2, d3, d4), slice_like=(d1, d2, d3), axes=(1, 2), output=(d2, d2, d3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(executor_kind, data=(3, 4, 5), slice_like=(1, 2), axes=None, output=(1, 2, 5))
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(1, 2), output=(3, 2, 3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(-1, -3), output=(1, 4, 3)
)
verify_slice_like(
executor_kind,
data=(1, 3, 224, 224),
slice_like=(1, 3, 112, 112),
axes=(2, 3),
output=(1, 3, 112, 112),
)
@tvm.testing.uses_gpu
def test_reverse_reshape(executor_kind):
def verify_reverse_reshape(executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reverse_reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_reverse_reshape(executor_kind, (2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reverse_reshape(executor_kind, (2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -1), (3, 8))
verify_reverse_reshape(executor_kind, (2, 3, 4), (-1, 0), (6, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -3), (2, 12))
def verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype="float32", trans_x=False, trans_y=True
):
z = relay.nn.batch_matmul(x, y, transpose_a=trans_x, transpose_b=trans_y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
input_vars = relay.analysis.free_vars(z)
func = relay.Function(input_vars, z)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=trans_x, trans_y=trans_y)
for target, dev in tvm.testing.enabled_targets():
if len(input_vars) == 2:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_np, y_np
)
else:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_np)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5, atol=1e-5)
def verify_batch_matmul(
executor_kind, x_shape, y_shape, out_shape, dtype="float32", trans_x=False, trans_y=True
):
x = relay.var("x", relay.TensorType(x_shape, dtype))
y = relay.var("y", relay.TensorType(y_shape, dtype))
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype, trans_x, trans_y
)
@tvm.testing.uses_gpu
def test_batch_matmul(executor_kind):
b, m, n, k = te.size_var("b"), te.size_var("m"), te.size_var("n"), te.size_var("k")
x = relay.var("x", relay.TensorType((b, m, k), "float32"))
y = relay.var("y", relay.TensorType((b, n, k), "float32"))
z = relay.nn.batch_matmul(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((b, m, n), "float32")
verify_batch_matmul(
executor_kind, (1, 16, 32), (1, 16, 32), (1, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 16, 32), (5, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 20, 32), (5, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (30, 16, 32), (30, 20, 32), (30, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (1, 32, 16), (1, 16, 32), (1, 16, 16), trans_x=True, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 32, 16), (5, 16, 16), trans_x=False, trans_y=False
)
verify_batch_matmul(
executor_kind, (5, 32, 16), (5, 32, 20), (5, 16, 20), trans_x=True, trans_y=False
)
x_np = np.random.randn(10, 27, 64).astype("float32")
x = relay.var("x", shape=x_np.shape)
verify_batch_matmul_with_inputs(executor_kind, x, x, x_np, x_np, (10, 27, 27))
@tvm.testing.requires_cascadelake
def test_batch_matmul_vnni():
x_shape = (16, 32, 96)
y_shape = (16, 128, 96)
z_shape = (16, 32, 128)
for lhs_dtype in ["uint8", "int8"]:
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
z = relay.var("z", shape=z_shape, dtype="int32")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
out = bmm + z
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=cascadelake"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "vpdpbusd" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
z_np = np.random.uniform(1, 10, size=z_shape).astype("int32")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.set_input("z", z_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32") + z_np
np.testing.assert_equal(out, ref)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_batch_matmul_rocm_sdot4():
x_shape = (16, 32, 96)
y_shape = (16, 128, 96)
lhs_dtype = "int8"
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
mod = tvm.IRModule.from_expr(bmm)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32")
np.testing.assert_equal(out, ref)
@tvm.testing.uses_gpu
def test_shape_of():
shape = (10, 5, 12)
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.shape_of(x))
func = run_infer_type(func)
x_data = np.random.rand(*shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
# Because using graph executor, this op will be optimized after
# constant folding pass, here we only test with interpreter
for kind in ["vm"]:
op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), np.array(shape).astype("int32"))
@tvm.testing.uses_gpu
def test_ndarray_size(executor_kind):
def verify_ndarray_size(shape):
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.ndarray_size(x))
func = run_infer_type(func)
x_data = np.random.uniform(size=shape).astype("float32")
ref_res = np.size(x_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify_ndarray_size((2, 3, 5))
verify_ndarray_size((2, 3, 5, 7))
def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
for target, dev in tvm.testing.enabled_targets():
relay_out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
np_data
)
tvm.testing.assert_allclose(relay_out.numpy(), np_out, rtol=1e-5, atol=1e-5)
def verify_adaptive_pool1d(dshape, out_size, pool_type, layout="NCW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool1d if pool_type == "avg" else relay.nn.adaptive_max_pool1d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool3d(dshape, out_size, pool_type, layout="NCDHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool3d if pool_type == "avg" else relay.nn.adaptive_max_pool3d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
@tvm.testing.uses_gpu
def test_adaptive_pool():
verify_adaptive_pool1d((1, 9, 224), (1), "max")
verify_adaptive_pool1d((1, 3, 224), (3), "avg")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", dtype="int32")
verify_adaptive_pool1d((1, 14, 78), (13), "max")
verify_adaptive_pool1d((1, 5, 97), (96), "avg")
verify_adaptive_pool1d((1, 224, 3), (1), "max", layout="NWC")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", layout="NWC")
verify_adaptive_pool2d((1, 9, 224, 224), (1, 1), "max")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", dtype="int32")
verify_adaptive_pool2d((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool2d((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool2d((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", layout="NHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
@tvm.testing.uses_gpu
def test_sequence_mask(executor_kind):
def _verify(data_shape, mask_value, axis, dtype, itype):
max_length = data_shape[axis]
nbatch = data_shape[1 - axis]
data = relay.var("data", relay.TensorType(data_shape, dtype))
valid_length = relay.var("valid_length", relay.TensorType((nbatch,), itype))
out = relay.sequence_mask(data, valid_length, mask_value, axis)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(data_shape, dtype)
func = relay.Function([data, valid_length], out)
data_np = np.random.uniform(size=data_shape).astype(dtype)
valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(data_np, valid_length_np)
tvm.testing.assert_allclose(out_relay.numpy(), gt_out_np)
_verify((5, 10), 0.0, 1, "float32", "int32")
_verify((2, 3, 5, 3), 0.0, 0, "float32", "int64")
_verify((5, 8, 3), 0.1, 1, "float64", "float32")
@tvm.testing.uses_gpu
def test_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
on_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth, axis, dtype)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(indices_shape, depth, axis), dtype
)
func = relay.Function([indices], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(indices_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_matrix_set_diag(executor_kind):
def _verify(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = relay.var("input", relay.TensorType(input_shape, dtype))
diagonal = relay.var("diagonal", relay.TensorType(diagonal_shape, dtype))
out = relay.matrix_set_diag(input, diagonal, k, align)
in_type = run_infer_type(input)
out_type = run_infer_type(out)
assert in_type.checked_type == out_type.checked_type
func = relay.Function([input, diagonal], out)
input_np = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_np = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_np = tvm.topi.testing.matrix_set_diag(input_np, diagonal_np, k, align)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(input_np, diagonal_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((2, 2), (2,), "float32")
_verify((4, 3, 3), (4, 3), "int32")
_verify((2, 3, 4), (2, 3), "float32", 1)
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_RIGHT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_LEFT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "RIGHT_RIGHT")
@tvm.testing.parametrize_targets
def test_nll_loss(executor_kind, dev, target):
def _get_oshape(target_shape, reduction):
if reduction == "none":
return target_shape
else:
return []
def _verify(prediction_shape, reduction="mean", ignore_index=-100, dtype="float32"):
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = relay.var("predictions", relay.TensorType(prediction_shape, dtype))
targets = relay.var("targets", relay.TensorType(target_shape, "int32"))
weights = relay.var("weights", relay.TensorType((C,), dtype))
out = relay.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(target_shape, reduction), dtype
)
func = relay.Function([predictions, targets, weights], out)
predictions_np = np.random.uniform(size=prediction_shape).astype(dtype)
targets_np = np.random.randint(0, C, target_shape).astype("int32")
weights_np = np.random.uniform(size=(C,)).astype(dtype)
out_np = tvm.topi.testing.nll_loss(
predictions_np, targets_np, weights_np, reduction, ignore_index
)
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
predictions_np, targets_np, weights_np
)
tvm.testing.assert_allclose(out_relay.numpy(), out_np, rtol=1e-6, atol=1e-6)
_verify((10, 5))
_verify((10, 5, 2, 2))
_verify((10, 5), reduction="sum")
_verify((10, 5), reduction="none")
_verify((10, 5), ignore_index=3)
_verify((10, 5), dtype="float64")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.