file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/contrib/test_ethosu/test_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tflite.Model
import tvm
import tensorflow as tf
from tvm import relay
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.testing.aot import generate_ref_data
from . import infra
ACCEL_TYPES = ["ethos-u55-256", "ethos-u55-128", "ethos-u55-64", "ethos-u55-32", "ethos-u65-256"]
def is_u55_accel_type(accel_type):
return "u55" in accel_type
@pytest.mark.parametrize("accel_type", ACCEL_TYPES + ["ethos-u65-512"])
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 2), (1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_ethosu_conv2d_single(
ifm_shape,
kernel_shape,
strides,
dilation,
padding,
accel_type,
activation,
):
np.random.seed(0)
@tf.function
def conv2d(x):
# Use tf.nn API to create the model
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]),
dtype=tf.float32,
),
strides=tf_strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(conv2d, [ifm_shape], accel_type)
def test_tflite_conv2d_with_separate_pad():
np.random.seed(0)
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
dilation = (2, 1)
padding = (0, 0, 1, 1)
@tf.function
def conv2d(x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
infra.compare_tvm_with_tflite(conv2d, [ifm_shape], "ethos-u55-256")
@pytest.mark.parametrize("ifm_shape", [(1, 214, 227, 2), (1, 27, 42, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("accel_type", ACCEL_TYPES + ["ethos-u65-512"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_ethosu_conv2d_double(
ifm_shape,
kernel_shape,
strides,
dilation,
padding,
accel_type,
activation,
):
np.random.seed(0)
@tf.function
def conv2d_double(x):
# Use tf.nn API to create the model with two convolutions
op = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], ifm_shape[3], 5]),
dtype=tf.float32,
),
strides=strides,
padding=padding,
dilations=dilation,
)
# Second convolution
op2 = tf.nn.conv2d(
op,
filters=tf.constant(
np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 5, 3)),
dtype=tf.float32,
),
strides=strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op2 = tf.nn.relu(op2)
return op2
infra.compare_tvm_with_tflite(conv2d_double, [ifm_shape], accel_type)
@pytest.mark.parametrize("weight_min, weight_max", [(0.0, 1e-11), (-1e10, 1e10)])
def test_out_of_range_scaling(weight_min, weight_max):
np.random.seed(0)
ifm_shape = (1, 6, 6, 2)
strides = (1, 1)
kernel_shape = (1, 1)
dilation = (1, 1)
padding = "SAME"
activation = "RELU"
accel_type = "ethos-u55-128"
@tf.function
def conv_invalid_scale(x):
# Use tf.nn API to create the model
tf_strides = [1, strides[0], strides[1], 1]
weights = np.random.uniform(size=[kernel_shape[0], kernel_shape[1], 2, 2])
# Overwrite to force quantization that produces out of range shift values
weights[0][0][0][0] = weight_min
weights[0][0][1][0] = weight_max
op = tf.nn.conv2d(
x,
filters=tf.constant(
weights,
dtype=tf.float32,
),
strides=tf_strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(conv_invalid_scale, [ifm_shape], accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize(
"kernel_shape, activation_function",
[((3, 3), "RELU"), ((1, 2), "NONE")],
)
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 2)), ((3, 2), (1, 1))])
def test_tflite_depthwise_conv2d(
accel_type,
ifm_shape,
kernel_shape,
padding,
strides,
dilation,
activation_function,
):
np.random.seed(0)
@tf.function
def depthwise_conv2d(x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=padding, dilations=dilation
)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(depthwise_conv2d, [ifm_shape], accel_type)
def test_tflite_depthwise_conv2d_with_separate_pad():
np.random.seed(0)
ifm_shape = (1, 23, 32, 7)
kernel_shape = (1, 2)
strides = (3, 2)
dilation = (1, 1)
padding = (0, 0, 1, 1)
@tf.function
def depthwise_conv2d(x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.depthwise_conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
infra.compare_tvm_with_tflite(depthwise_conv2d, [ifm_shape], "ethos-u55-256")
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(
"pool_shape, strides, activation_function, padding",
[([1, 2], [1, 2], "NONE", "SAME"), ([2, 3], [2, 3], "RELU", "VALID")],
)
def test_ethosu_pooling(
accel_type,
ifm_shape,
pooling_type,
strides,
pool_shape,
activation_function,
padding,
):
np.random.seed(0)
@tf.function
def pooling(x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(pooling, [ifm_shape], accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 3, 4], [1, 1, 1, 1]),
([1, 1, 1, 1], [1, 2, 3, 4]),
([1, 4, 4], [4, 1]),
],
)
@pytest.mark.parametrize("activation_function", ["NONE", "RELU"])
def test_ethosu_binary_elementwise(
accel_type,
operator_type,
ifm_shape,
ifm2_shape,
activation_function,
):
np.random.seed(0)
@tf.function
def binary_elementwise(lhs, rhs):
if operator_type == "ADD":
op = tf.math.add(lhs, rhs)
elif operator_type == "SUB":
op = tf.math.subtract(lhs, rhs)
elif operator_type == "MUL":
op = tf.math.multiply(lhs, rhs)
elif operator_type == "MIN":
op = tf.math.minimum(lhs, rhs)
elif operator_type == "MAX":
op = tf.math.maximum(lhs, rhs)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(
binary_elementwise,
shapes=[ifm_shape, ifm2_shape],
ranges=[(0, 1), (0, 2)],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([4], [4]),
([4], [1, 2, 3, 4]),
([1, 4, 4], [4, 1]),
],
)
def test_binary_add_with_non_4d_shapes(
request,
accel_type,
ifm_shape,
ifm2_shape,
):
np.random.seed(0)
@tf.function
def binary_elementwise(lhs, rhs):
return tf.math.add(lhs, rhs)
infra.compare_tvm_with_tflite(
binary_elementwise,
shapes=[ifm_shape, ifm2_shape],
ranges=[(0, 1), (0, 2)],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/12634")
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize(
"ifm_shape, axis, keep_dims, use_same_quantization",
[
# mean to depthwise + multiply
[(1, 8, 16, 16), (1, 2), True, False],
[(1, 3, 4), (0, 1), True, False],
[(1, 65, 2, 1), (1, 2), True, False], # special case when h > 64
# mean to average pool
[(1, 8, 16, 16), (2,), False, True],
[(3, 3, 4), (0,), True, True],
[(8, 5), (0,), False, True],
# mean to depthwise
[(1, 8, 16, 16), (2,), True, False],
[(1, 8, 16, 16), (2, 1), False, False],
[(8, 4), (0,), False, False],
],
)
def test_mean(accel_type, ifm_shape, axis, keep_dims, use_same_quantization):
np.random.seed(0)
dtype = "int8"
def create_mod_from_tflite():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"ifm": ifm_shape},
dtype_dict={"ifm": dtype},
)
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
return mod, input_data, output_data
def create_mod_from_relay():
ifm = relay.var("input", shape=ifm_shape, dtype=dtype)
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=axis, keepdims=keep_dims)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(requantize), requantize)
mod = tvm.IRModule.from_expr(func)
input_data = {"input": np.random.randint(low=-127, high=128, size=ifm_shape, dtype=dtype)}
output_data = generate_ref_data(mod, input_data)
return mod, input_data, output_data
mod, input_data, output_data = (
create_mod_from_relay() if use_same_quantization else create_mod_from_tflite()
)
mod = partition_for_ethosu(mod)
test_runner = infra.create_test_runner(accel_type)
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
infra.print_payload(cmms)
infra.verify_source(compiled_models, test_runner)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
@pytest.mark.parametrize("constant", [np.ones((1, 1, 1, 1)), np.array(1)])
def test_elementwise_add_from_constant_scalar(accel_type, dtype, constant):
np.random.seed(0)
ifm_shape = (1, 4, 4, 8)
def create_relay_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(constant, dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(add), add))
cpu_mod = create_relay_graph()
ethosu_mod = partition_for_ethosu(cpu_mod)
# Generate reference data
input_data = {
"input": np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=ifm_shape, dtype=dtype
),
}
output_data = generate_ref_data(cpu_mod, input_data)
# Scalar constants are not supported by the cascader
infra.compare_ethosu_with_reference(
ethosu_mod, input_data, output_data, accel_type, enable_cascader=False
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 3, 4], [1, 1, 3, 1]),
([1, 1, 3, 1], [1, 2, 3, 4]),
],
)
def test_ethosu_left_shift_binary_elemwise(
accel_type,
ifm_shape,
ifm2_shape,
):
np.random.seed(0)
dtype = "int32"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
c1 = relay.left_shift(ifm, ifm2)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], c1))
cpu_mod = create_model()
# Generate reference data
in_min, in_max = util.get_range_for_dtype_str(dtype)
input_data = {
"ifm": np.random.randint(in_min, high=in_max, size=ifm_shape, dtype=dtype),
"ifm2": np.random.randint(0, high=32, size=ifm2_shape, dtype=dtype),
}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = partition_for_ethosu(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands, ofm_dtype",
[
([1, 2, 3, 4], [1, 2, 3, 4], False, "int8"),
([1, 2, 3, 1], [1, 1, 3, 1], False, "int32"),
([1, 1, 3, 1], [1, 2, 3, 1], True, "int32"),
],
)
def test_ethosu_right_shift_binary_elemwise(
ifm_shape, ifm2_shape, reversed_operands, accel_type, ofm_dtype
):
np.random.seed(0)
dtype = "int32"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
shr_op = infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_shape[3], ifm2_shape[3], "SHR", ofm_dtype, reversed_operands
)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], shr_op))
def generate_output_data(input_data):
lhs = input_data["ifm"]
rhs = input_data["ifm2"]
if reversed_operands:
lhs = np.broadcast_to(lhs, ifm2_shape)
lhs, rhs = rhs, lhs
else:
rhs = np.broadcast_to(rhs, ifm_shape)
def rounding_right_shift(lhs, rhs):
r = 1 << (rhs - 1)
return (lhs + r) >> rhs
return [
np.array([rounding_right_shift(x[0], x[1]) for x in zip(lhs.flat, rhs.flat)]).astype(
ofm_dtype
)
]
cpu_mod = create_model()
# Generate reference data
in_min, in_max = util.get_range_for_dtype_str(dtype)
in_min, in_max = 18, 19
lhs = np.random.randint(in_min, high=in_max, size=ifm_shape, dtype=dtype)
rhs = np.random.randint(1, high=2, size=ifm2_shape, dtype=dtype)
input_data = {
"ifm": lhs,
"ifm2": rhs,
}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(3, 2), (1, 15, 11, 7), (3, 1, 12), (400,)])
@pytest.mark.parametrize("ifm_scale, ifm_zp, ofm_scale, ofm_zp", [(1, 0, 1, 0), (0.015, 3, 0.2, 5)])
def test_ethosu_identity_codegen(
request, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp, accel_type
):
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
identity = infra.make_ethosu_identity(
ifm,
ifm_scale=ifm_scale,
ifm_zero_point=ifm_zp,
ofm_scale=ofm_scale,
ofm_zero_point=ofm_zp,
)
return tvm.IRModule.from_expr(relay.Function([ifm], identity))
def generate_output_data(input_data):
requant_data = (ifm_scale * (input_data["ifm"] - ifm_zp)) / ofm_scale + ofm_zp
return [np.round(np.clip(requant_data, -128, 127)).astype("int8")]
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-120, high=120, size=ifm_shape, dtype="int8")}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
output_tolerance=1,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, new_shape",
[
((1, 4, 1, 2), (1, 1, 1, 8)),
((12, 20), (1, 6, 4, 10)),
((12, 20), (6, 4, 10)),
((20,), (4, 5)),
((12, 2, 10), (0, -3)),
((11, 3, 25), (-1,)),
((8, 7, 3), (-4, 1, 8, -2)),
],
)
def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
reshape = relay.op.reshape(ifm, newshape=new_shape)
return tvm.IRModule.from_expr(relay.Function([ifm], reshape))
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-128, high=127, size=ifm_shape, dtype="int8")}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, begin, size",
[
([1, 10, 50, 4], [0, 5, 11, 2], [1, 5, 11, 1]),
([15, 17, 3], [3, 0, 1], [8, 17, 2]),
([7, 6043], [0, 704], [1, 2860]),
([5000], [123], [2151]),
],
)
def test_tflite_slice(request, accel_type, ifm_shape, begin, size):
np.random.seed(0)
@tf.function
def slice_func(x):
return tf.slice(x, begin, size)
infra.compare_tvm_with_tflite(
slice_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, begin, end",
[([1, 1, 5, 8], [0, 0, 0, 0], [1, 1, 2, 3]), ([1, 3, 3], [0, 1, 2], [1, 2, 3])],
)
def test_tflite_strided_slice(accel_type, ifm_shape, begin, end):
np.random.seed(0)
@tf.function
def strided_slice_func(x):
return tf.strided_slice(x, begin, end)
infra.compare_tvm_with_tflite(
strided_slice_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("operator_type", ["ABS"])
@pytest.mark.parametrize(
"ifm_shape",
[[1, 5, 12, 4], [1, 1, 2], [4, 3, 2], [10, 20], [345]],
)
def test_ethosu_unary_elementwise(
request,
accel_type,
operator_type,
ifm_shape,
):
np.random.seed(0)
@tf.function
def abs_func(x):
if operator_type == "ABS":
op = tf.math.abs(x)
return op
infra.compare_tvm_with_tflite(
abs_func,
[ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
def test_ethosu_section_name():
np.random.seed(0)
@tf.function
def depthwise_conv2d(x):
weight_shape = [3, 3, 3, 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
tf_strides = [1, 1, 1, 1]
op = tf.nn.depthwise_conv2d(x, weight, strides=tf_strides, padding="SAME", dilations=(2, 2))
return op
mod, tflite_graph = infra.get_tflite_graph(depthwise_conv2d, [(1, 55, 55, 3)])
# Generate reference data
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
test_runner = infra.create_test_runner()
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
source = ethosu_module.get_source()
assert (
'__attribute__((section(".rodata.tvm"), aligned(16))) static int8_t tvmgen_default_ethos_u_main_0_cms_data_data'
in source
)
assert (
'__attribute__((section(".rodata.tvm"), aligned(16))) static int8_t tvmgen_default_ethos_u_main_0_weights'
in source
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_ethosu_clz(accel_type):
np.random.seed(0)
ifm_shape = (1, 42, 5, 4)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int32")
clz = infra.make_ethosu_unary_elementwise(ifm, 4, "CLZ")
return tvm.IRModule.from_expr(relay.Function([ifm], clz))
def generate_output_data(input_data):
def clz_comp(n):
n_bin = np.binary_repr(n)
if n_bin[0] == "-":
return 0
else:
return 32 - len(n_bin)
return [
np.array([clz_comp(i) for i in input_data["ifm"].ravel()])
.reshape(ifm_shape)
.astype("int32")
]
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-500000, high=500000, size=ifm_shape, dtype="int32")}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_tflite_tanh(accel_type):
np.random.seed(0)
ifm_shape = [1, 115, 32, 7]
@tf.function
def tanh_func(x):
op = tf.nn.tanh(x)
return op
infra.compare_tvm_with_tflite(
tanh_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 5, 5, 3), (1, 12, 9, 1)])
def test_tflite_hard_swish(accel_type, ifm_shape):
np.random.seed(0)
@tf.function
def hard_swish_func(x):
op = tf.keras.layers.Lambda(
lambda x: x * tf.keras.activations.relu(x + 3.0, max_value=6.0) / 6.0
)(x)
return op
infra.compare_tvm_with_tflite(hard_swish_func, [ifm_shape], accel_type, ranges=[(-1, 1)])
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"shapes, axis",
[
([(2, 3), (4, 3)], 0),
([(3, 2, 1), (3, 1, 1)], 1),
([(10,), (13,), (14,)], 0),
([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),
],
)
def test_tflite_concat(shapes, axis, accel_type):
np.random.seed(0)
@tf.function
def concat_func(*inputs):
op = tf.concat(list(inputs), axis)
return op
infra.compare_tvm_with_tflite(concat_func, shapes, accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_tflite_sigmoid(accel_type):
np.random.seed(0)
ifm_shape = [1, 135, 41, 6]
@tf.function
def sigmoid_function(x):
op = tf.nn.sigmoid(x)
return op
infra.compare_tvm_with_tflite(
sigmoid_function, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
# This codegen test checks both, split and split_v
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), (1, 3, 4), 3),
((4, 6, 8), 2, 0),
((50,), 25, 0),
((5, 11), 1, 1),
((13,), (13,), 0),
((22, 7), (4, -1), 1),
],
)
def test_tflite_split(accel_type, ifm_shape, num_or_size_splits, axis):
np.random.seed(0)
@tf.function
def split_func(x):
op = tf.split(x, num_or_size_splits, axis=axis)
return op
infra.compare_tvm_with_tflite(split_func, [ifm_shape], accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp",
[
[(1, 8, 8, 3), 1.0, 0, 1.0, 0],
[(1, 20, 30, 3), 1.345, 34, 0.32, -23],
],
)
def test_ethosu_requantize(accel_type, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):
np.random.seed(0)
dtype = "int8"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
requantize = relay.qnn.op.requantize(
ifm,
relay.const(ifm_scale, dtype="float32"),
relay.const(ifm_zp, dtype="int32"),
relay.const(ofm_scale, dtype="float32"),
relay.const(ofm_zp, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-128, high=127, size=ifm_shape, dtype=dtype)}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = partition_for_ethosu(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape,axis", [((2,), 0), ((1, 3, 3), 2)])
def test_tflite_expand_dims(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def expand_dims_func(x):
return tf.expand_dims(x, axis=axis)
infra.compare_tvm_with_tflite(
expand_dims_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,axis", [((1, 1, 2, 1), 0), ((1, 3, 3, 1), 3), ((1, 1, 2, 1), None)]
)
def test_tflite_squeeze(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def squeeze_func(x):
return tf.squeeze(x, axis=axis)
infra.compare_tvm_with_tflite(
squeeze_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,size",
[[(1, 2, 2, 1), (4, 4)], [(1, 4, 7, 3), (8, 14)], [(1, 3, 5, 3), (3, 5)]],
)
def test_tflite_resize2d_nearest_neighbor(accel_type, ifm_shape, size):
np.random.seed(0)
align_corners = False
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_nearest_neighbor(
x, size, align_corners=align_corners, half_pixel_centers=False
)
infra.compare_tvm_with_tflite(
resize_model, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,size,align_corners",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 4, 7, 3), (8, 14), False],
[(1, 2, 2, 1), (3, 3), True],
[(1, 4, 7, 3), (7, 13), True],
[(1, 3, 5, 3), (3, 5), False],
],
)
def test_tflite_resize2d_bilinear(accel_type, ifm_shape, size, align_corners):
np.random.seed(0)
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_bilinear(
x, size, align_corners=align_corners, half_pixel_centers=False
)
infra.compare_tvm_with_tflite(
resize_model, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,ofm_shape,kernel_shape,padding",
[
[(1, 2, 2, 1), (1, 4, 4, 1), (3, 3), "SAME"],
[(1, 2, 2, 1), (1, 9, 9, 1), (7, 7), "VALID"],
[(1, 2, 4, 3), (1, 4, 8, 3), (5, 3), "SAME"],
[(1, 10, 5, 3), (1, 21, 13, 3), (3, 5), "VALID"],
],
)
@pytest.mark.parametrize("has_bias", [False, True])
def test_tflite_transpose_convolution(
accel_type, ifm_shape, ofm_shape, kernel_shape, padding, has_bias
):
np.random.seed(0)
dilations = (1, 1)
strides = (2, 2)
@tf.function
def conv2d_transpose(x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], ofm_shape[3]]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
bias_shape = ofm_shape[3]
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d_transpose(
x,
weight,
output_shape=ofm_shape,
strides=tf_strides,
padding=padding,
dilations=dilations,
)
if has_bias:
op = tf.nn.bias_add(op, bias)
return op
infra.compare_tvm_with_tflite(
conv2d_transpose,
[ifm_shape],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shapes,axis",
[
([(1, 2, 2), (1, 2, 2), (1, 2, 2)], 2),
([(5, 4), (5, 4)], 1),
([(1,), (1,)], 0),
([(3, 1), (3, 1), (3, 1), (3, 1)], 0),
],
)
def test_tflite_pack(accel_type, ifm_shapes, axis):
np.random.seed(0)
@tf.function
def pack_func(*inputs):
return tf.stack(inputs, axis=axis)
infra.compare_tvm_with_tflite(pack_func, ifm_shapes, accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,axis",
[[(1, 2, 3, 4), 1], [(2, 3), 1], [(5, 6, 7), 2]],
)
def test_tflite_unpack(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def unpack_func(x):
return tf.unstack(x, axis=axis)
infra.compare_tvm_with_tflite(unpack_func, [ifm_shape], accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 15, 15, 3), (1, 8, 9, 1)])
@pytest.mark.parametrize("alpha", [0.2, 0.634])
def test_tflite_leaky_relu(accel_type, ifm_shape, alpha):
np.random.seed(0)
@tf.function
def leaky_relu_func(x):
return tf.nn.leaky_relu(x, alpha=alpha)
infra.compare_tvm_with_tflite(
leaky_relu_func,
[ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
ranges=[(-1, 1)],
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 14), (1, 151)])
@pytest.mark.parametrize("ofm_channels", [32, 64])
@pytest.mark.parametrize("use_bias", [True, False])
@pytest.mark.parametrize("activation_function", ["RELU", "NONE"])
def test_tflite_fully_connected(
accel_type,
ifm_shape,
ofm_channels,
use_bias,
activation_function,
):
np.random.seed(0)
@tf.function
def fully_connected(x):
bias_shape = ofm_channels
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
w = tf.constant(
np.random.uniform(size=[ifm_shape[1], ofm_channels]),
dtype=tf.float32,
)
x = tf.matmul(x, w)
if use_bias:
x = tf.nn.bias_add(x, bias)
if activation_function:
x = tf.nn.relu(x)
return x
infra.compare_tvm_with_tflite(
fully_connected, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from . import infra
def _create_single_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv1), conv1)
return func
def _create_double_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = infra.make_ethosu_conv2d(conv1, 4, 7, (2, 2), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
return func
def _create_non_linear_conv2d():
shape = (1, 8, 8, 4)
ifm1 = relay.var("x", shape=shape, dtype="int8")
ifm2 = relay.var("y", shape=shape, dtype="int8")
conv1 = infra.make_ethosu_conv2d(ifm1, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = infra.make_ethosu_conv2d(ifm2, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
add = infra.make_ethosu_binary_elementwise(conv1, conv2, shape[3], shape[3], "ADD", "int8")
func = relay.Function(relay.analysis.free_vars(add), add)
return func
@pytest.mark.parametrize(
"relay_function, arg_count",
[(_create_single_conv2d, 2), (_create_double_conv2d, 2), (_create_non_linear_conv2d, 3)],
)
def test_lower_to_tir_arg_count(relay_function, arg_count):
mod = tvm.IRModule()
mod["main"] = relay_function()
mod = relay.transform.InferType()(mod)
tir_mod = _lower_to_tir(mod["main"])[0]
primfunc = tir_mod["main"]
assert len(primfunc.params) == arg_count
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_copy_compute_reordering.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm.relay.backend.contrib.ethosu.tir.passes import CopyComputeReordering
# fmt: off
@tvm.script.ir_module
class AllOperatorsWithWeights:
@T.prim_func
def main() -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer2 = T.buffer_decl([128], "uint8")
buffer3 = T.buffer_decl([32], "uint8")
buffer4 = T.buffer_decl([112], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
buffer6 = T.buffer_decl([112], "uint8")
buffer7 = T.buffer_decl([32], "uint8")
buffer8 = T.buffer_decl([112], "uint8")
buffer9 = T.buffer_decl([32], "uint8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1 = T.decl_buffer([128], "uint8")
p2 = T.decl_buffer([112], "uint8")
p3 = T.decl_buffer([112], "uint8")
p4 = T.decl_buffer([32], "uint8")
p5 = T.decl_buffer([32], "uint8")
p6 = T.decl_buffer([32], "uint8")
p7 = T.decl_buffer([112], "uint8")
p8 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, 12, p5[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 112, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 112, 12, p6[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 112, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer9[0], 32, p8[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
def test_all_operators_with_weights_max_copy_movements_0():
test_mod = CopyComputeReordering(0)(AllOperatorsWithWeights)
reference_mod = AllOperatorsWithWeights
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_all_operators_with_weights_max_copy_movements_1():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer2 = T.buffer_decl([128], "uint8")
buffer3 = T.buffer_decl([32], "uint8")
buffer4 = T.buffer_decl([112], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
buffer6 = T.buffer_decl([112], "uint8")
buffer7 = T.buffer_decl([32], "uint8")
buffer8 = T.buffer_decl([112], "uint8")
buffer9 = T.buffer_decl([32], "uint8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1 = T.decl_buffer([128], "uint8")
p2 = T.decl_buffer([112], "uint8")
p3 = T.decl_buffer([112], "uint8")
p4 = T.decl_buffer([32], "uint8")
p5 = T.decl_buffer([32], "uint8")
p6 = T.decl_buffer([32], "uint8")
p7 = T.decl_buffer([112], "uint8")
p8 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 112, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, 12, p5[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 112, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer9[0], 32, p8[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 112, 12, p6[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(1)(AllOperatorsWithWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_all_operators_with_weights_max_copy_movements_2():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer2 = T.buffer_decl([128], "uint8")
buffer3 = T.buffer_decl([32], "uint8")
buffer4 = T.buffer_decl([112], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
buffer6 = T.buffer_decl([112], "uint8")
buffer7 = T.buffer_decl([32], "uint8")
buffer8 = T.buffer_decl([112], "uint8")
buffer9 = T.buffer_decl([32], "uint8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1 = T.decl_buffer([128], "uint8")
p2 = T.decl_buffer([112], "uint8")
p3 = T.decl_buffer([112], "uint8")
p4 = T.decl_buffer([32], "uint8")
p5 = T.decl_buffer([32], "uint8")
p6 = T.decl_buffer([32], "uint8")
p7 = T.decl_buffer([112], "uint8")
p8 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 112, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 112, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer9[0], 32, p8[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, 12, p5[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 112, 12, p6[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(2)(AllOperatorsWithWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
# fmt: off
@tvm.script.ir_module
class AllOperatorsWithoutWeights:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([36], "int8")
buffer2 = T.buffer_decl([9], "int8")
# body
p1 = T.decl_buffer([96], "int8")
T.evaluate(T.call_extern("ethosu_pooling", "int8", 3, 4, 3, 3, 0, 4, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 12, 3, 1, "int8", 3, 2, 3, 3, 0, 2, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 32, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_pooling", "int8", 3, 2, 3, 3, 0, 2, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 32, 16, 1, "int8", 3, 1, 3, 3, 0, 1, buffer2[0], 0, 0, 0, T.float32(1), 0, "NHWC", 3, 1, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
@pytest.mark.parametrize("max_copy_movements", [0, 1, 2])
def test_all_operators_without_weights(max_copy_movements):
test_mod = CopyComputeReordering(max_copy_movements)(AllOperatorsWithoutWeights)
reference_mod = AllOperatorsWithoutWeights
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
# fmt: off
@tvm.script.ir_module
class OperatorsWithAndWithoutWeights:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([97156], "int8")
buffer2 = T.buffer_decl([80], "uint8")
buffer3 = T.buffer_decl([64], "uint8")
buffer4 = T.buffer_decl([96], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
# body
p1 = T.decl_buffer([390336], "int8")
p2 = T.decl_buffer([80], "uint8")
p3 = T.decl_buffer([64], "uint8")
p4 = T.decl_buffer([390336], "int8")
p5 = T.decl_buffer([96], "uint8")
p6 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 3, 214, 0, 114, buffer3[0], 0, 0, 0, T.float32(0.104816), -128, "NHWC", 342, 3, 1, 3, 1, 1, 1, 1, 2, p5[0], 96, 0, p6[0], 32, 0, 1, 0, 1, "CLIP", -128, 127, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
def test_operators_with_and_without_weights_max_copy_movements_0():
test_mod = CopyComputeReordering(0)(OperatorsWithAndWithoutWeights)
reference_mod = OperatorsWithAndWithoutWeights
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_operators_with_and_without_weights_max_copy_movements_1():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([97156], "int8")
buffer2 = T.buffer_decl([80], "uint8")
buffer3 = T.buffer_decl([64], "uint8")
buffer4 = T.buffer_decl([96], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
# body
p1 = T.decl_buffer([390336], "int8")
p2 = T.decl_buffer([80], "uint8")
p3 = T.decl_buffer([64], "uint8")
p4 = T.decl_buffer([390336], "int8")
p5 = T.decl_buffer([96], "uint8")
p6 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 3, 214, 0, 114, buffer3[0], 0, 0, 0, T.float32(0.104816), -128, "NHWC", 342, 3, 1, 3, 1, 1, 1, 1, 2, p5[0], 96, 0, p6[0], 32, 0, 1, 0, 1, "CLIP", -128, 127, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(1)(OperatorsWithAndWithoutWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_operators_with_and_without_weights_max_copy_movements_2():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([97156], "int8")
buffer2 = T.buffer_decl([80], "uint8")
buffer3 = T.buffer_decl([64], "uint8")
buffer4 = T.buffer_decl([96], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
# body
p1 = T.decl_buffer([390336], "int8")
p2 = T.decl_buffer([80], "uint8")
p3 = T.decl_buffer([64], "uint8")
p4 = T.decl_buffer([390336], "int8")
p5 = T.decl_buffer([96], "uint8")
p6 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 3, 214, 0, 114, buffer3[0], 0, 0, 0, T.float32(0.104816), -128, "NHWC", 342, 3, 1, 3, 1, 1, 1, 1, 2, p5[0], 96, 0, p6[0], 32, 0, 1, 0, 1, "CLIP", -128, 127, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(2)(OperatorsWithAndWithoutWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
# fmt: off
@tvm.script.ir_module
class CopyToBufferWithLocalScope:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([64], "uint8")
buffer2 = T.buffer_decl([48], "uint8")
buffer3 = T.buffer_decl([48], "uint8")
buffer4 = T.buffer_decl([256], "uint8")
buffer5 = T.buffer_decl([16], "uint8")
buffer6 = T.buffer_decl([48], "uint8")
buffer7 = T.buffer_decl([256], "uint8")
buffer8 = T.buffer_decl([64], "uint8")
# body
p1 = T.decl_buffer([48], "uint8")
p2 = T.decl_buffer([48], "uint8")
p3 = T.decl_buffer([256], "int8", scope="local")
p4 = T.decl_buffer([256], "int8")
p5 = T.decl_buffer([16], "uint8")
p6 = T.decl_buffer([48], "uint8")
p7 = T.decl_buffer([256], "int8", scope="local")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 48, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 48, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 256, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer1[0], 0, 0, 0, T.float32(0.00392081), -128, "NHWC", 16, 4, 1, "int8", 4, 4, 4, 4, 0, 4, p4[0], 0, 0, 0, T.float32(0.00839574), -128, "NHCWB16", 64, 16, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, 0, p2[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 16, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 48, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 256, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 4, 4, 4, 4, 0, 4, p4[0], 0, 0, 0, T.float32(0.0078125), 0, "NHCWB16", 64, 16, 1, "int8", 4, 4, 4, 4, 0, 4, buffer8[0], 0, 0, 0, T.float32(0.00372155), -128, "NHWC", 16, 4, 1, 1, 1, 1, 1, 1, 1, p5[0], 16, 0, p6[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
def test_copy_to_buffer_with_local_scope_max_copy_movements_0():
test_mod = CopyComputeReordering(0)(CopyToBufferWithLocalScope)
reference_mod = CopyToBufferWithLocalScope
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
@pytest.mark.parametrize("max_copy_movements", [1, 2])
def test_copy_to_buffer_with_local_scope_max_copy_movements_n(max_copy_movements):
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([64], "uint8")
buffer2 = T.buffer_decl([48], "uint8")
buffer3 = T.buffer_decl([48], "uint8")
buffer4 = T.buffer_decl([256], "uint8")
buffer5 = T.buffer_decl([16], "uint8")
buffer6 = T.buffer_decl([48], "uint8")
buffer7 = T.buffer_decl([256], "uint8")
buffer8 = T.buffer_decl([64], "uint8")
# body
p1 = T.decl_buffer([48], "uint8")
p2 = T.decl_buffer([48], "uint8")
p3 = T.decl_buffer([256], "int8", scope="local")
p4 = T.decl_buffer([256], "int8")
p5 = T.decl_buffer([16], "uint8")
p6 = T.decl_buffer([48], "uint8")
p7 = T.decl_buffer([256], "int8", scope="local")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 48, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 48, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 256, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 16, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 48, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer1[0], 0, 0, 0, T.float32(0.00392081), -128, "NHWC", 16, 4, 1, "int8", 4, 4, 4, 4, 0, 4, p4[0], 0, 0, 0, T.float32(0.00839574), -128, "NHCWB16", 64, 16, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, 0, p2[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 256, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 4, 4, 4, 4, 0, 4, p4[0], 0, 0, 0, T.float32(0.0078125), 0, "NHCWB16", 64, 16, 1, "int8", 4, 4, 4, 4, 0, 4, buffer8[0], 0, 0, 0, T.float32(0.00372155), -128, "NHWC", 16, 4, 1, 1, 1, 1, 1, 1, 1, p5[0], 16, 0, p6[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(max_copy_movements)(CopyToBufferWithLocalScope)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_multiple_prim_funcs():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main():
T.evaluate(0)
@T.prim_func
def abc():
T.evaluate(0)
# fmt: on
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the CopyComputeReordering pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
CopyComputeReordering(1)(InputModule)
def test_no_main_prim_func():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def abs():
T.evaluate(0)
# fmt: on
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the CopyComputeReordering pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
CopyComputeReordering(1)(InputModule)
def test_default_max_copy_movements():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([97156], "int8")
buffer2 = T.buffer_decl([80], "uint8")
buffer3 = T.buffer_decl([64], "uint8")
buffer4 = T.buffer_decl([96], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
# body
p1 = T.decl_buffer([390336], "int8")
p2 = T.decl_buffer([80], "uint8")
p3 = T.decl_buffer([64], "uint8")
p4 = T.decl_buffer([390336], "int8")
p5 = T.decl_buffer([96], "uint8")
p6 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 3, 214, 0, 114, buffer3[0], 0, 0, 0, T.float32(0.104816), -128, "NHWC", 342, 3, 1, 3, 1, 1, 1, 1, 2, p5[0], 96, 0, p6[0], 32, 0, 1, 0, 1, "CLIP", -128, 127, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering()(OperatorsWithAndWithoutWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_pass_context_option_max_copy_movements():
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([97156], "int8")
buffer2 = T.buffer_decl([80], "uint8")
buffer3 = T.buffer_decl([64], "uint8")
buffer4 = T.buffer_decl([96], "uint8")
buffer5 = T.buffer_decl([32], "uint8")
# body
p1 = T.decl_buffer([390336], "int8")
p2 = T.decl_buffer([80], "uint8")
p3 = T.decl_buffer([64], "uint8")
p4 = T.decl_buffer([390336], "int8")
p5 = T.decl_buffer([96], "uint8")
p6 = T.decl_buffer([32], "uint8")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, p1[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 5, 214, 0, 114, p4[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 3, 214, 0, 114, buffer3[0], 0, 0, 0, T.float32(0.104816), -128, "NHWC", 342, 3, 1, 3, 1, 1, 1, 1, 2, p5[0], 96, 0, p6[0], 32, 0, 1, 0, 1, "CLIP", -128, 127, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
with tvm.transform.PassContext(
config={"tir.contrib.ethos-u.copy_compute_reordering_max_copy_movements": 2}
):
test_mod = CopyComputeReordering()(OperatorsWithAndWithoutWeights)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_reordering_based_on_cycles():
# fmt: off
@tvm.script.ir_module
class ModuleBefore:
@T.prim_func
def main(placeholder: T.Buffer[97156, "int8"], placeholder_encoded: T.Buffer[208, "uint8"], placeholder_encoded_1: T.Buffer[112, "uint8"], placeholder_encoded_2: T.Buffer[96, "uint8"], placeholder_encoded_3: T.Buffer[112, "uint8"], ethosu_write: T.Buffer[43672, "int8"]) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main", "from_legacy_te_schedule": True})
ax0_ax1_fused_ax2_fused_ax3_fused = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_1 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_2 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_3 = T.var("int32")
nn = T.var("int32")
nn_1 = T.var("int32")
nn_2 = T.var("int32")
nn_3 = T.var("int32")
nn_4 = T.var("int32")
nn_5 = T.var("int32")
# body
placeholder_d_global = T.decl_buffer([208], "uint8")
placeholder_d_global_1 = T.decl_buffer([112], "uint8")
placeholder_d_global_2 = T.decl_buffer([96], "uint8")
placeholder_d_global_3 = T.decl_buffer([112], "uint8")
ethosu_write_1 = T.decl_buffer([195168], "int8")
ethosu_write_2 = T.decl_buffer([184800], "int8")
ethosu_write_3 = T.decl_buffer([174688], "int8")
ethosu_write_4 = T.decl_buffer([174688], "int8")
ethosu_write_5 = T.decl_buffer([174688], "int8")
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused, None, "DataPar", ""), "pragma_compute_cycles_hint", 1792):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded[0], 208, placeholder_d_global[0], dtype="handle"))
with T.attr(T.iter_var(nn, None, "DataPar", ""), "pragma_compute_cycles_hint", 250):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 227, 2, 214, 0, 227, placeholder[0], 0, 0, 0, T.float32(0.0039215679280459881), -128, "NHWC", 454, 2, 1, "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, 3, 3, 2, 2, 1, 1, placeholder_d_global[0], 160, T.int8(-1), T.int8(-1), 0, placeholder_d_global[160], 48, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 12, 10, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 112, placeholder_d_global_1[0], dtype="handle"))
with T.attr(T.iter_var(nn_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 467):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_1[0], 64, 0, placeholder_d_global_1[64], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_2[0], 96, placeholder_d_global_2[0], dtype="handle"))
with T.attr(T.iter_var(nn_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 441):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_3[0], 0, 0, 0, T.float32(0.0057637207210063934), -128, "NHCWB16", 1696, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_2[0], 48, 0, placeholder_d_global_2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(nn_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 439):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
with T.attr(T.iter_var(nn_4, None, "DataPar", ""), "pragma_compute_cycles_hint", 439):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_5[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_3[0], 112, placeholder_d_global_3[0], dtype="handle"))
T.attr(T.iter_var(nn_5, None, "DataPar", ""), "pragma_compute_cycles_hint", 22340)
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_5[0], 0, 0, 0, T.float32(0.0057637207210063934), -128, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write[0], 0, 0, 0, T.float32(0.0057619437575340271), -128, "NHWC", 424, 4, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_3[0], 64, 0, placeholder_d_global_3[64], 48, 1, 2, 1, 2, "NONE", 0, 0, "TFL", "NONE", 14, 18, 8, dtype="handle"))
@tvm.script.ir_module
class ModuleAfter:
@T.prim_func
def main(placeholder: T.Buffer[97156, "int8"], placeholder_encoded: T.Buffer[208, "uint8"], placeholder_encoded_1: T.Buffer[112, "uint8"], placeholder_encoded_2: T.Buffer[96, "uint8"], placeholder_encoded_3: T.Buffer[112, "uint8"], ethosu_write: T.Buffer[43672, "int8"]) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main", "from_legacy_te_schedule": True})
ax0_ax1_fused_ax2_fused_ax3_fused = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_1 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_2 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_3 = T.var("int32")
nn = T.var("int32")
nn_1 = T.var("int32")
nn_2 = T.var("int32")
nn_3 = T.var("int32")
nn_4 = T.var("int32")
nn_5 = T.var("int32")
# body
placeholder_d_global = T.decl_buffer([208], "uint8")
placeholder_d_global_1 = T.decl_buffer([112], "uint8")
placeholder_d_global_2 = T.decl_buffer([96], "uint8")
placeholder_d_global_3 = T.decl_buffer([112], "uint8")
ethosu_write_1 = T.decl_buffer([195168], "int8")
ethosu_write_2 = T.decl_buffer([184800], "int8")
ethosu_write_3 = T.decl_buffer([174688], "int8")
ethosu_write_4 = T.decl_buffer([174688], "int8")
ethosu_write_5 = T.decl_buffer([174688], "int8")
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused, None, "DataPar", ""), "pragma_compute_cycles_hint", 1792):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded[0], 208, placeholder_d_global[0], dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 112, placeholder_d_global_1[0], dtype="handle"))
with T.attr(T.iter_var(nn, None, "DataPar", ""), "pragma_compute_cycles_hint", 250):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 227, 2, 214, 0, 227, placeholder[0], 0, 0, 0, T.float32(0.0039215679280459881), -128, "NHWC", 454, 2, 1, "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, 3, 3, 2, 2, 1, 1, placeholder_d_global[0], 160, T.int8(-1), T.int8(-1), 0, placeholder_d_global[160], 48, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 12, 10, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_2[0], 96, placeholder_d_global_2[0], dtype="handle"))
with T.attr(T.iter_var(nn_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 467):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_1[0], 64, 0, placeholder_d_global_1[64], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 1024):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_3[0], 112, placeholder_d_global_3[0], dtype="handle"))
with T.attr(T.iter_var(nn_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 441):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_3[0], 0, 0, 0, T.float32(0.0057637207210063934), -128, "NHCWB16", 1696, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_2[0], 48, 0, placeholder_d_global_2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(nn_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 439):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
with T.attr(T.iter_var(nn_4, None, "DataPar", ""), "pragma_compute_cycles_hint", 439):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write_5[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1696, 16, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
T.attr(T.iter_var(nn_5, None, "DataPar", ""), "pragma_compute_cycles_hint", 22340)
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 103, 106, 4, 103, 0, 106, ethosu_write_5[0], 0, 0, 0, T.float32(0.0057637207210063934), -128, "NHCWB16", 1696, 16, 1, "int8", 103, 106, 4, 103, 0, 106, ethosu_write[0], 0, 0, 0, T.float32(0.0057619437575340271), -128, "NHWC", 424, 4, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global_3[0], 64, 0, placeholder_d_global_3[64], 48, 1, 2, 1, 2, "NONE", 0, 0, "TFL", "NONE", 14, 18, 8, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(reorder_by_cycles=True)(ModuleBefore)
reference_mod = ModuleAfter
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
def test_reordering_based_on_cycles_luts_present():
# fmt: off
@tvm.script.ir_module
class ModuleBefore:
@T.prim_func
def main(placeholder: T.Buffer[97156, "int8"], placeholder_encoded: T.Buffer[208, "uint8"], placeholder_encoded_1: T.Buffer[112, "uint8"], placeholder_1: T.Buffer[256, "int8"], placeholder_encoded_2: T.Buffer[96, "uint8"], placeholder_2: T.Buffer[256, "int8"], placeholder_3: T.Buffer[256, "int8"], ethosu_write: T.Buffer[46200, "int8"]) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main", "from_legacy_te_schedule": True})
ax0_ax1_fused_ax2_fused_ax3_fused = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_1 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_2 = T.var("int32")
nn = T.var("int32")
nn_1 = T.var("int32")
nn_2 = T.var("int32")
nn_3 = T.var("int32")
nn_4 = T.var("int32")
nn_5 = T.var("int32")
# body
placeholder_d_d_global = T.decl_buffer([208], "uint8")
placeholder_d_d_global_1 = T.decl_buffer([112], "uint8")
placeholder_d_global = T.decl_buffer([96], "uint8")
ethosu_write_1 = T.decl_buffer([195168], "int8")
placeholder_local = T.decl_buffer([256], "int8", scope="local")
ethosu_write_2 = T.decl_buffer([184800], "int8")
ethosu_write_3 = T.decl_buffer([184800], "int8")
ethosu_write_4 = T.decl_buffer([184800], "int8")
placeholder_d_local = T.decl_buffer([256], "int8", scope="local")
ethosu_write_5 = T.decl_buffer([184800], "int8")
placeholder_d_d_local = T.decl_buffer([256], "int8", scope="local")
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused, None, "DataPar", ""), "pragma_compute_cycles_hint", 1792):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded[0], 208, placeholder_d_d_global[0], dtype="handle"))
with T.attr(T.iter_var(nn, None, "DataPar", ""), "pragma_compute_cycles_hint", 73668):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 227, 2, 214, 0, 227, placeholder[0], 0, 0, 0, T.float32(0.0039215679280459881), -128, "NHWC", 454, 2, 1, "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, 3, 3, 2, 2, 1, 1, placeholder_d_d_global[0], 160, T.int8(-1), T.int8(-1), 0, placeholder_d_d_global[160], 48, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 12, 10, 16, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_1[0], 256, placeholder_local[0], dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 384):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 112, placeholder_d_d_global_1[0], dtype="handle"))
with T.attr(T.iter_var(nn_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 330):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_d_global_1[0], 64, 0, placeholder_d_d_global_1[64], 48, 0, 0, 0, 0, "SIGMOID", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(nn_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 411):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
with T.attr(T.iter_var(nn_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 458):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_2[0], 256, placeholder_d_local[0], dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 1500):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_2[0], 96, placeholder_d_global[0], dtype="handle"))
with T.attr(T.iter_var(nn_4, None, "DataPar", ""), "pragma_compute_cycles_hint", 10464):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_4[0], 0, 0, 0, T.float32(0.00390625), -128, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_5[0], 0, 0, 0, T.float32(0.00381289585493505), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global[0], 48, 0, placeholder_d_global[48], 48, 1, 2, 1, 2, "TANH", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_3[0], 256, placeholder_d_d_local[0], dtype="handle"))
T.attr(T.iter_var(nn_5, None, "DataPar", ""), "pragma_compute_cycles_hint", 5253)
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_5[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 440, 4, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 4, 64, 8, dtype="handle"))
@tvm.script.ir_module
class ModuleAfter:
@T.prim_func
def main(placeholder: T.Buffer[97156, "int8"], placeholder_encoded: T.Buffer[208, "uint8"], placeholder_encoded_1: T.Buffer[112, "uint8"], placeholder_1: T.Buffer[256, "int8"], placeholder_encoded_2: T.Buffer[96, "uint8"], placeholder_2: T.Buffer[256, "int8"], placeholder_3: T.Buffer[256, "int8"], ethosu_write: T.Buffer[46200, "int8"]) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main", "from_legacy_te_schedule": True})
ax0_ax1_fused_ax2_fused_ax3_fused = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_1 = T.var("int32")
ax0_ax1_fused_ax2_fused_ax3_fused_2 = T.var("int32")
nn = T.var("int32")
nn_1 = T.var("int32")
nn_2 = T.var("int32")
nn_3 = T.var("int32")
nn_4 = T.var("int32")
nn_5 = T.var("int32")
# body
placeholder_d_d_global = T.decl_buffer([208], "uint8")
placeholder_d_d_global_1 = T.decl_buffer([112], "uint8")
placeholder_d_global = T.decl_buffer([96], "uint8")
ethosu_write_1 = T.decl_buffer([195168], "int8")
placeholder_local = T.decl_buffer([256], "int8", scope="local")
ethosu_write_2 = T.decl_buffer([184800], "int8")
ethosu_write_3 = T.decl_buffer([184800], "int8")
ethosu_write_4 = T.decl_buffer([184800], "int8")
placeholder_d_local = T.decl_buffer([256], "int8", scope="local")
ethosu_write_5 = T.decl_buffer([184800], "int8")
placeholder_d_d_local = T.decl_buffer([256], "int8", scope="local")
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused, None, "DataPar", ""), "pragma_compute_cycles_hint", 1792):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded[0], 208, placeholder_d_d_global[0], dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 384):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 112, placeholder_d_d_global_1[0], dtype="handle"))
with T.attr(T.iter_var(nn, None, "DataPar", ""), "pragma_compute_cycles_hint", 73668):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 227, 2, 214, 0, 227, placeholder[0], 0, 0, 0, T.float32(0.0039215679280459881), -128, "NHWC", 454, 2, 1, "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, 3, 3, 2, 2, 1, 1, placeholder_d_d_global[0], 160, T.int8(-1), T.int8(-1), 0, placeholder_d_d_global[160], 48, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 12, 10, 16, dtype="handle"))
with T.attr(T.iter_var(ax0_ax1_fused_ax2_fused_ax3_fused_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 1500):
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_2[0], 96, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_1[0], 256, placeholder_local[0], dtype="handle"))
with T.attr(T.iter_var(nn_1, None, "DataPar", ""), "pragma_compute_cycles_hint", 330):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 107, 114, 4, 107, 0, 114, ethosu_write_1[0], 0, 0, 0, T.float32(0.009109782986342907), -128, "NHCWB16", 1824, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(0.0066184266470372677), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_d_global_1[0], 64, 0, placeholder_d_d_global_1[64], 48, 0, 0, 0, 0, "SIGMOID", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
with T.attr(T.iter_var(nn_2, None, "DataPar", ""), "pragma_compute_cycles_hint", 411):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_2[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
with T.attr(T.iter_var(nn_3, None, "DataPar", ""), "pragma_compute_cycles_hint", 458):
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 2, 64, 16, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_2[0], 256, placeholder_d_local[0], dtype="handle"))
with T.attr(T.iter_var(nn_4, None, "DataPar", ""), "pragma_compute_cycles_hint", 10464):
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_4[0], 0, 0, 0, T.float32(0.00390625), -128, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write_5[0], 0, 0, 0, T.float32(0.00381289585493505), -128, "NHCWB16", 1760, 16, 1, 3, 2, 1, 1, 2, 2, placeholder_d_global[0], 48, 0, placeholder_d_global[48], 48, 1, 2, 1, 2, "TANH", 0, 0, "TFL", "NONE", 8, 16, 16, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_3[0], 256, placeholder_d_d_local[0], dtype="handle"))
T.attr(T.iter_var(nn_5, None, "DataPar", ""), "pragma_compute_cycles_hint", 5253)
T.evaluate(T.call_extern("ethosu_pooling", "int8", 105, 110, 4, 105, 0, 110, ethosu_write_5[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1760, 16, 1, "int8", 105, 110, 4, 105, 0, 110, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 440, 4, 1, "MAX", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 4, 64, 8, dtype="handle"))
# fmt: on
test_mod = CopyComputeReordering(reorder_by_cycles=True)(ModuleBefore)
reference_mod = ModuleAfter
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_create_tiles.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.script
from tvm.relay.backend.contrib.ethosu.tir.dma import Tiles, create_tiles
from tvm.script import tir as T
def check_tiles_equal(tiles, expected):
assert tiles.height_0 == expected.height_0
assert tiles.height_1 == expected.height_1
assert tiles.width_0 == expected.width_0
if isinstance(tiles.address_0, int):
assert tiles.address_0 == expected.address_0
else:
assert tiles.address_0.buffer == expected.address_0.buffer
assert tiles.address_0.indices[0] == expected.address_0.indices[0]
if isinstance(tiles.address_1, int):
assert tiles.address_1 == expected.address_1
else:
assert tiles.address_1.buffer == expected.address_1.buffer
assert tiles.address_1.indices[0] == expected.address_1.indices[0]
if isinstance(tiles.address_2, int):
assert tiles.address_2 == expected.address_2
else:
assert tiles.address_2.buffer == expected.address_2.buffer
assert tiles.address_2.indices[0] == expected.address_2.indices[0]
def test_create_tiles_h():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder1: T.Buffer[(100,), "int8"], placeholder2: T.Buffer[(100,), "int8"]) -> None:
T.attr("i0", "pragma_layout", "NHCWB16")
for i0 in T.serial(0, 1):
for i1 in T.serial(0, 6):
for i2 in T.serial(0, 1):
for i3 in T.serial(0, 1):
for i4 in T.serial(0, 16):
placeholder1[((i1*16) + i4)] = placeholder2[((T.floormod((i1 + 4), 6)*16) + i4)]
__tvm_meta__ = None
# fmt: on
stmt = Module["main"].body
tiles = create_tiles(stmt)
buffer = stmt.body.body.body.body.body.body.value.buffer
expected = Tiles(
height_0=tvm.tir.expr.IntImm("int32", 2),
height_1=tvm.tir.expr.IntImm("int32", 0),
width_0=tvm.tir.expr.IntImm("int32", 1),
address_0=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 64)]),
address_1=tvm.tir.expr.IntImm("int32", 0),
address_2=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 0)]),
)
check_tiles_equal(tiles, expected)
def test_create_tiles_w():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder1: T.Buffer[(100,), "int8"], placeholder2: T.Buffer[(100,), "int8"]) -> None:
T.attr("i0", "pragma_layout", "NHCWB16")
for i0 in T.serial(0, 1):
for i1 in T.serial(0, 1):
for i2 in T.serial(0, 1):
for i3 in T.serial(0, 6):
for i4 in T.serial(0, 16):
placeholder1[((i3*16) + i4)] = placeholder2[((T.floormod((i3 + 4), 6)*16) + i4)]
__tvm_meta__ = None
# fmt: on
stmt = Module["main"].body
tiles = create_tiles(stmt)
buffer = stmt.body.body.body.body.body.body.value.buffer
expected = Tiles(
height_0=tvm.tir.expr.IntImm("int32", 1),
height_1=tvm.tir.expr.IntImm("int32", 1),
width_0=tvm.tir.expr.IntImm("int32", 2),
address_0=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 64)]),
address_1=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 0)]),
address_2=tvm.tir.expr.IntImm("int32", 0),
)
check_tiles_equal(tiles, expected)
def test_create_tiles_wrong_var_stride():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder1: T.Buffer[(100,), "int8"], placeholder2: T.Buffer[(100,), "int8"]) -> None:
T.attr("i0", "pragma_layout", "NHCWB16")
for i0 in T.serial(0, 1):
for i1 in T.serial(0, 6):
for i2 in T.serial(0, 1):
for i3 in T.serial(0, 1):
for i4 in T.serial(0, 16):
placeholder1[((i1*16) + i4)] = placeholder2[((T.floormod((i1 + 4), 6)*8) + i4)]
__tvm_meta__ = None
# fmt: on
stmt = Module["main"].body
tiles = create_tiles(stmt)
buffer = stmt.body.body.body.body.body.body.value.buffer
expected = Tiles(
height_0=tvm.tir.expr.IntImm("int32", 6),
height_1=tvm.tir.expr.IntImm("int32", 0),
width_0=tvm.tir.expr.IntImm("int32", 1),
address_0=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 32)]),
address_1=tvm.tir.expr.IntImm("int32", 0),
address_2=tvm.tir.expr.IntImm("int32", 0),
)
check_tiles_equal(tiles, expected)
def test_create_tiles_multiple_var_occurrences():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder1: T.Buffer[(100,), "int8"], placeholder2: T.Buffer[(100,), "int8"]) -> None:
T.attr("i0", "pragma_layout", "NHWC")
for i0 in T.serial(0, 1):
for i1 in T.serial(0, 5):
for i2 in T.serial(0, 6):
for i3 in T.serial(0, 4):
placeholder1[(((i1*24) + (i2*4)) + i3)] = placeholder2[(((((T.floordiv((i1 - 1), 2)*48) + (T.floormod((i1 + 1), 2)*24)) + (i2*4)) + i3) + 96)]
__tvm_meta__ = None
# fmt: on
stmt = Module["main"].body
tiles = create_tiles(stmt)
buffer = stmt.body.body.body.body.body.value.buffer
expected = Tiles(
height_0=tvm.tir.expr.IntImm("int32", 5),
height_1=tvm.tir.expr.IntImm("int32", 0),
width_0=tvm.tir.expr.IntImm("int32", 6),
address_0=tvm.tir.BufferLoad(buffer, [tvm.tir.expr.IntImm("int32", 72)]),
address_1=tvm.tir.expr.IntImm("int32", 0),
address_2=tvm.tir.expr.IntImm("int32", 0),
)
check_tiles_equal(tiles, expected)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_encode_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.script import tir as T
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.relay.backend.contrib.ethosu.tir.scheduler import OperatorCompute
from tvm.relay.backend.contrib.ethosu.tir.scheduler import copy_constants
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator
from .infra import make_ethosu_conv2d, make_ethosu_binary_elementwise
# fmt: off
@tvm.script.ir_module
class WeightStreamOnlyU55:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([160], "uint8")
buffer3 = T.buffer_decl([144], "uint8")
buffer5 = T.buffer_decl([144], "uint8")
buffer7 = T.buffer_decl([144], "uint8")
buffer8 = T.buffer_decl([32], "uint8")
# body
p1_data = T.allocate([160], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([160], "uint8", data=p1_data)
p2_data = T.allocate([144], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([144], "uint8", data=p2_data)
buffer9 = T.buffer_decl([144], "uint8", data=p1.data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 160, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 144, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, T.int8(-1), T.int8(-1), 12, p1[128], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 144, buffer9[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, T.int8(-1), T.int8(-1), 12, p2[112], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 144, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, buffer9[0], 112, T.int8(-1), T.int8(-1), 12, buffer9[112], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, T.int8(-1), T.int8(-1), 12, p2[112], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class WeightStreamOnlyU65:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
buffer_encoded_1 = T.buffer_decl([192], dtype="uint8")
buffer_encoded_2_1 = T.buffer_decl([192], dtype="uint8")
buffer_encoded_4_1 = T.buffer_decl([208], dtype="uint8")
buffer_encoded_6_1 = T.buffer_decl([192], dtype="uint8")
# body
p1_data = T.allocate([208], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([208], "uint8", data=p1_data)
p2_data = T.allocate([192], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([192], "uint8", data=p2_data)
p3 = T.buffer_decl([192], dtype="uint8", data=p1.data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_1[0], 192, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_2_1[0], 192, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 80, p3[80], 80, 12, p3[160], 16, p3[176], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_4_1[0], 208, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 80, p2[80], 80, 12, p2[160], 16, p2[176], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_6_1[0], 192, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 96, p1[96], 80, 12, p1[176], 16, p1[192], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 80, p2[80], 80, 12, p2[160], 16, p2[176], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"accelerator, reference_mod, reference_const_sizes",
[
(
"ethos-u55-128",
WeightStreamOnlyU55,
[160, 144, 144, 144],
),
(
"ethos-u65-512",
WeightStreamOnlyU65,
[192, 192, 208, 192],
),
],
)
def test_weight_stream_only(accelerator, reference_mod, reference_const_sizes):
def _planner(cached_func, const_dict, sch):
weights = cached_func.inputs[1]
bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = OperatorCompute.from_output(out)
co = conv_compute.split(sch, 3, 2)
cache_weights = sch.cache_read(weights, "global", [conv_compute.op])
cache_bias = sch.cache_read(bias, "global", [conv_compute.op])
sch[cache_weights].compute_at(sch[out], co)
sch[cache_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
config = {
"accelerator_config": accelerator,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
func = _get_func()
mod, consts = _lower_to_tir(func, cascader=_planner)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
test_const_size = [value.size for value in list(consts.values())]
assert reference_const_sizes.sort() == test_const_size.sort()
# fmt: off
@tvm.script.ir_module
class RereadWeightsU55:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([384], "uint8")
# body
p1_data = T.allocate([384], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([384], "uint8", data=p1_data)
p2_data = T.allocate([384], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([384], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 384, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 384, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 304, T.int8(-1), T.int8(-1), 12, p1[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[64], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 304, T.int8(-1), T.int8(-1), 12, p2[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class RereadWeightsU65:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
placeholder_encoded_1 = T.buffer_decl([464], "uint8")
# body
p1_data = T.allocate([464], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([464], "uint8", data=p1_data)
p2_data = T.allocate([464], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([464], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 464, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 464, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[64], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"accelerator, reference_mod, reference_const_sizes",
[
(
"ethos-u55-128",
RereadWeightsU55,
[384],
),
(
"ethos-u65-512",
RereadWeightsU65,
[464],
),
],
)
def test_re_read_weights(accelerator, reference_mod, reference_const_sizes):
def _cascader(cached_func, const_dict, sch):
weights = cached_func.inputs[1]
bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = OperatorCompute.from_output(out)
co = conv_compute.split(sch, 2, 8)
cache_weights = sch.cache_read(weights, "global", [conv_compute.op])
cache_bias = sch.cache_read(bias, "global", [conv_compute.op])
sch[cache_weights].compute_at(sch[out], co)
sch[cache_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
config = {
"accelerator_config": accelerator,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
func = _get_func()
mod, consts = _lower_to_tir(func, cascader=_cascader)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
test_const_size = [value.size for value in list(consts.values())]
assert reference_const_sizes.sort() == test_const_size.sort()
# fmt: off
@tvm.script.ir_module
class DirectReadOnlyU55:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([592], "uint8")
buffer_1 = T.buffer_decl([160], "uint8")
buffer_2 = T.buffer_decl([160], "uint8")
buffer_3 = T.buffer_decl([80], "uint8")
# body
ethosu_write_1_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
ethosu_write_1 = T.buffer_decl([4096], "int8", data=ethosu_write_1_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, buffer[0], 592, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 160, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class DirectReadOnlyU65:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
placeholder_encoded = T.buffer_decl([608], dtype="uint8")
placeholder_encoded_1 = T.buffer_decl([160], dtype="uint8")
placeholder_encoded_2 = T.buffer_decl([208], dtype="uint8")
placeholder_encoded_3 = T.buffer_decl([96], dtype="uint8")
# body
ethosu_write_2_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
ethosu_write_2 = T.buffer_decl([4096], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_encoded[0], 304, placeholder_encoded[304], 304, 12, placeholder_encoded_1[0], 80, placeholder_encoded_1[80], 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_encoded_2[0], 112, placeholder_encoded_2[112], 96, 12, placeholder_encoded_3[0], 48, placeholder_encoded_3[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"accelerator, reference_mod, reference_const_sizes",
[
(
"ethos-u55-128",
DirectReadOnlyU55,
[592, 160, 160, 80],
),
(
"ethos-u65-512",
DirectReadOnlyU65,
[608, 160, 208, 96],
),
],
)
def test_direct_read_only(accelerator, reference_mod, reference_const_sizes):
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
conv2 = make_ethosu_conv2d(
conv1,
16,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
config = {
"accelerator_config": accelerator,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
func = _get_func()
mod, consts = _lower_to_tir(func)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
test_const_size = [value.size for value in list(consts.values())]
assert reference_const_sizes.sort() == test_const_size.sort()
# fmt: off
@tvm.script.ir_module
class MixedReadU55:
@T.prim_func
def main(ifm: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([112], "uint8")
buffer3 = T.buffer_decl([112], "uint8")
buffer5 = T.buffer_decl([112], "uint8")
buffer7 = T.buffer_decl([112], "uint8")
buffer9 = T.buffer_decl([592], "uint8")
buffer10 = T.buffer_decl([160], "uint8")
# body
p1_data = T.allocate([112], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([112], "uint8", data=p1_data)
p3_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
p3 = T.buffer_decl([4096], "int8", data=p3_data)
p2_data = T.allocate([112], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([112], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 112, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, ifm[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, buffer9[0], 592, T.int8(-1), T.int8(-1), 12, buffer10[0], 160, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 80, T.int8(-1), T.int8(-1), 12, p1[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 112, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 80, T.int8(-1), T.int8(-1), 12, p2[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 80, T.int8(-1), T.int8(-1), 12, p1[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 80, T.int8(-1), T.int8(-1), 12, p2[80], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class MixedReadU65:
@T.prim_func
def main(ifm: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
buffer1 = T.buffer_decl([128], dtype="uint8")
buffer2 = T.buffer_decl([128], dtype="uint8")
buffer3 = T.buffer_decl([128], dtype="uint8")
buffer4 = T.buffer_decl([608], dtype="uint8")
buffer5 = T.buffer_decl([160], dtype="uint8")
buffer6 = T.buffer_decl([128], dtype="uint8")
p1_data = T.allocate([128], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([128], "uint8", data=p1_data)
p2_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([4096], "int8", data=p2_data)
p3_data = T.allocate([128], "uint8", "global", annotations={"disable_lower_builtin":True})
p3 = T.buffer_decl([128], "uint8", data=p3_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, ifm[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, p2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, buffer4[0], 304, buffer4[304], 304, 12, buffer5[0], 80, buffer5[80], 80, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, p1[48], 48, 12, p1[96], 16, p1[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 48, p3[48], 48, 12, p3[96], 16, p3[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 128, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, p1[48], 48, 12, p1[96], 16, p1[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, p2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 48, p3[48], 48, 12, p3[96], 16, p3[112], 16, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"accelerator, reference_mod, reference_const_sizes",
[
(
"ethos-u55-128",
MixedReadU55,
[592, 160, 112, 112, 112, 112],
),
(
"ethos-u65-512",
MixedReadU65,
[608, 160, 128, 128, 128, 128],
),
],
)
def test_mixed_read(accelerator, reference_mod, reference_const_sizes):
def _planner(cached_func, const_dict, sch):
weight = cached_func.inputs[4]
scale_bias = cached_func.inputs[5]
out = cached_func.outputs[0]
conv_compute = OperatorCompute.from_output(out)
co = conv_compute.split(sch, 3, 2)
cache_weight = sch.cache_read(weight, "global", [conv_compute.op])
cache_scale_bias = sch.cache_read(scale_bias, "global", [conv_compute.op])
sch[cache_weight].compute_at(sch[out], co)
sch[cache_scale_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
conv2 = make_ethosu_conv2d(
conv1,
16,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
config = {
"accelerator_config": accelerator,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
func = _get_func()
mod, consts = _lower_to_tir(func, cascader=_planner)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
test_const_size = [value.size for value in list(consts.values())]
assert reference_const_sizes.sort() == test_const_size.sort()
def test_constant_as_input():
"""Test to check that constants specified as inputs aren't
interpreted as an encoded constant."""
def get_graph():
dtype = "uint8"
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype=dtype)
conv1 = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add1 = make_ethosu_binary_elementwise(
conv1, scalar, ifm_channels=32, ifm2_channels=1, operator_type="ADD", ofm_dtype=dtype
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
tir_mod, params = _lower_to_tir(get_graph(), copy_constants())
# Check tile address for the scalar constant input hasn't been
# overwritten.
extern_calls = tir_mod["main"].body.body.body.body
binary_elementwise = extern_calls[-1].value
args = binary_elementwise.args
reason = "Tile address overwritten"
assert args[26] == 0, reason
assert args[27] == 0, reason
assert args[28] == 0, reason
# More generally, check compiles successfully to make sure
# nothing else was overrwritten.
# With Target Hooks the TIR module needs a target attached
# and lowered via make unpacked API.
tir_mod["main"] = tir_mod["main"].with_attr("target", tvm.target.Target("ethos-u"))
tir_mod = tvm.tir.transform.MakeUnpackedAPI()(tir_mod)
tir_to_cs_translator.translate(tir_mod, params)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_extract_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import extract_constants
import numpy as np
def test_extract_constants_single():
def _get_func():
var_input = relay.var("data", shape=(10, 10), dtype="uint8")
const_data = np.random.uniform(0, 255, (10, 10)).astype("uint8")
const_input = relay.const(const_data, dtype="uint8")
out = relay.add(var_input, const_input)
func = relay.Function(relay.analysis.free_vars(out), out)
func = run_opt_pass(func, relay.transform.InferType())
return func, const_input
def _expected():
var_input1 = relay.var("data", shape=(10, 10), dtype="uint8")
var_input2 = relay.var("p1", shape=(10, 10), dtype="uint8")
out = relay.add(var_input1, var_input2)
func = relay.Function(relay.analysis.free_vars(out), out)
func = run_opt_pass(func, relay.transform.InferType())
return func
func, const = _get_func()
new_func, const_dict = extract_constants(func)
assert tvm.ir.structural_equal(new_func, _expected())
assert 1 in const_dict
assert (const_dict[1] == const.data.asnumpy()).all()
def test_extract_constants_multi():
def _get_func():
var_input1 = relay.var("data1", shape=(10, 10), dtype="uint8")
var_input2 = relay.var("data2", shape=(10, 10), dtype="uint8")
const_data_1 = np.random.uniform(0, 255, (10, 10)).astype("uint8")
const_data_2 = np.random.uniform(0, 255, (10, 10)).astype("uint8")
const_data_3 = np.random.uniform(0, 255, (10, 10)).astype("uint8")
const_data_4 = np.random.uniform(0, 255, (10, 10)).astype("uint8")
const_input_1 = relay.const(const_data_1, dtype="uint8")
const_input_2 = relay.const(const_data_2, dtype="uint8")
const_input_3 = relay.const(const_data_3, dtype="uint8")
const_input_4 = relay.const(const_data_4, dtype="uint8")
out = relay.add(var_input1, var_input2)
out = relay.add(out, const_input_1)
out = relay.add(out, const_input_2)
out = relay.add(out, const_input_3)
out = relay.add(out, const_input_4)
func = relay.Function(relay.analysis.free_vars(out), out)
func = run_opt_pass(func, relay.transform.InferType())
return func, [const_input_1, const_input_2, const_input_3, const_input_4]
def _expected():
var_input1 = relay.var("data1", shape=(10, 10), dtype="uint8")
var_input2 = relay.var("data2", shape=(10, 10), dtype="uint8")
var_input3 = relay.var("p1", shape=(10, 10), dtype="uint8")
var_input4 = relay.var("p2", shape=(10, 10), dtype="uint8")
var_input5 = relay.var("p3", shape=(10, 10), dtype="uint8")
var_input6 = relay.var("p4", shape=(10, 10), dtype="uint8")
out = relay.add(var_input1, var_input2)
out = relay.add(out, var_input3)
out = relay.add(out, var_input4)
out = relay.add(out, var_input5)
out = relay.add(out, var_input6)
func = relay.Function(relay.analysis.free_vars(out), out)
func = run_opt_pass(func, relay.transform.InferType())
return func
func, consts = _get_func()
new_func, const_dict = extract_constants(func)
assert tvm.ir.structural_equal(new_func, _expected())
for i, const in enumerate(consts):
assert i + 2 in const_dict
assert (const_dict[i + 2] == consts[i].data.asnumpy()).all()
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_hoist_allocates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Testing the pass that moves allocate nodes to the body of the function.
"""
# pylint: disable=wrong-import-position
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm.relay.backend.contrib.ethosu.tir.passes import HoistAllocates
class ExtractAllocateInfo:
"""
Extracts information from allocate nodes which we will use as sanity to check the allocate
after mutation.
"""
def __init__(self):
self.allocates_info = []
def __call__(self, mod):
tvm.tir.stmt_functor.ir_transform(mod["main"].body, self._pre_visit, None, ["tir.Allocate"])
return self.allocates_info
def _pre_visit(self, stmt):
self.allocates_info.append(
{"extents": stmt.extents, "dtype": stmt.dtype, "condition": stmt.condition}
)
def CheckAllocates(allocate_info): # pylint: disable=invalid-name
"""
Checks that all allocates have been visited before an external call has been visited and
checks that the information for each allocate is what is expected. Additionally, the pass
checks the body of the tir after the final allocate statement is flat (it contains no
sequence statement).
"""
allocate_idx = 0
expected_num_allocates = len(allocate_info)
num_seq_stmts = 0
def _pre_visit(stmt):
nonlocal allocate_idx, expected_num_allocates, num_seq_stmts
if isinstance(stmt, tvm.tir.Allocate):
expected = allocate_info[allocate_idx]
assert (
stmt.extents == expected["extents"]
), f"Allocate extents {stmt.extents} did not match expected {expected['extents']}"
assert (
stmt.dtype == expected["dtype"]
), f"Allocate dtype {stmt.dtype} did not match expected {expected['dtype']}"
assert (
stmt.condition == expected["condition"]
), f"Allocate condition {stmt.condition} did not match expected {expected['condition']}"
allocate_idx += 1
elif isinstance(stmt, tvm.tir.SeqStmt):
num_seq_stmts += 1
assert num_seq_stmts <= expected_num_allocates, (
"Encountered a SeqStmt after all allocates have been visited, was the "
"body flattened correctly?"
)
else:
assert (
allocate_idx == expected_num_allocates
), "A call node was visited before all allocates"
def _ftransform(f, mod, ctx):
f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body, _pre_visit, None, ["tir.Allocate", "tir.Call", "tir.SeqStmt"]
)
)
return tvm.tir.transform.prim_func_pass(_ftransform, opt_level=0)
def test_double_convolution():
"""
Test to check the HoistAllocates pass works on a function with two convolutions.
"""
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(3402,), "int8"], placeholder_encoded: T.Buffer[(128,), "uint8"], placeholder_encoded_1: T.Buffer[(32,), "uint8"], placeholder_encoded_2: T.Buffer[(128,), "uint8"], placeholder_encoded_3: T.Buffer[(32,), "uint8"], ethosu_write: T.Buffer[(3402,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(placeholder, [1, 27, 42, 3], dtype="int8", data=placeholder.data)
T.preflattened_buffer(placeholder_encoded, [3, 3, 2, 3], dtype="int8")
T.preflattened_buffer(placeholder_encoded_1, [3, 10], dtype="uint8")
T.preflattened_buffer(placeholder_encoded_2, [3, 3, 2, 3], dtype="int8")
T.preflattened_buffer(placeholder_encoded_3, [3, 10], dtype="uint8")
T.preflattened_buffer(ethosu_write, [1, 27, 42, 3], dtype="int8", data=ethosu_write.data)
# body
placeholder_global_data = T.allocate([128], "uint8", "global")
placeholder_global = T.buffer_decl([128], "uint8", data=placeholder_global_data)
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded[0], 128, placeholder_global[0], dtype="handle"))
placeholder_d_global_data = T.allocate([32], "uint8", "global")
placeholder_d_global = T.buffer_decl([32], "uint8", data=placeholder_d_global_data)
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_1[0], 32, placeholder_d_global[0], dtype="handle"))
ethosu_write_2_data = T.allocate([18144], "int8", "global")
ethosu_write_2 = T.buffer_decl([18144], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 27, 42, 3, 27, 0, 42, placeholder[0], 0, 0, 0, T.float32(0.0039215646684169769), -128, "NHWC", 126, 3, 1, "int8", 27, 42, 3, 27, 0, 42, ethosu_write_2[0], 0, 0, 0, T.float32(0.031308155506849289), -128, "NHCWB16", 672, 16, 1, 2, 3, 1, 1, 1, 2, placeholder_global[0], 128, 0, placeholder_d_global[0], 32, 2, 0, 2, 1, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
placeholder_d_global_1_data = T.allocate([128], "uint8", "global")
placeholder_d_global_1 = T.buffer_decl([128], "uint8", data=placeholder_d_global_1_data)
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_2[0], 128, placeholder_d_global_1[0], dtype="handle"))
placeholder_d_global_2_data = T.allocate([32], "uint8", "global")
placeholder_d_global_2 = T.buffer_decl([32], "uint8", data=placeholder_d_global_2_data)
T.evaluate(T.call_extern("ethosu_copy", placeholder_encoded_3[0], 32, placeholder_d_global_2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 27, 42, 3, 27, 0, 42, ethosu_write_2[0], 0, 0, 0, T.float32(0.031308155506849289), -128, "NHCWB16", 672, 16, 1, "int8", 27, 42, 3, 27, 0, 42, ethosu_write[0], 0, 0, 0, T.float32(0.23604340851306915), -128, "NHWC", 126, 3, 1, 2, 3, 1, 1, 1, 2, placeholder_d_global_1[0], 128, 0, placeholder_d_global_2[0], 32, 2, 0, 2, 1, "CLIP", -128, 127, "TFL", "NONE", dtype="handle"))
# fmt: on
mod = Module
allocate_info = ExtractAllocateInfo()(mod)
mod = HoistAllocates()(mod)
CheckAllocates(allocate_info)(mod)
def test_identities():
"""
Test to check the HoistAllocates pass works on a function with multiple identity
operations, with no copy operations.
"""
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(24,), "int8"], T_concat: T.Buffer[(24,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(placeholder, [1, 2, 3, 4], dtype="int8", data=placeholder.data)
T.preflattened_buffer(T_concat, [24], dtype="int8", data=T_concat.data)
# body
ethosu_write_data = T.allocate([12], "int8", "global")
ethosu_write = T.buffer_decl([12], "int8", data=ethosu_write_data)
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 3, 4, 1, 0, 3, placeholder[12], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 3, 4, 1, 0, 3, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
ethosu_write_1_data = T.allocate([12], "int8", "global")
ethosu_write_1 = T.buffer_decl([12], "int8", data=ethosu_write_1_data)
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 3, 4, 1, 0, 3, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 3, 4, 1, 0, 3, ethosu_write_1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_identity", "int8", 12, 1, 1, 12, 0, 1, ethosu_write_1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 1, 1, "int8", 12, 1, 1, 12, 0, 1, T_concat[12], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 1, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
ethosu_write_2_data = T.allocate([12], "int8", "global")
ethosu_write_2 = T.buffer_decl([12], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 3, 4, 1, 0, 3, placeholder[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 3, 4, 1, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
ethosu_write_3_data = T.allocate([12], "int8", "global")
ethosu_write_3 = T.buffer_decl([12], "int8", data=ethosu_write_3_data)
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 3, 4, 1, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 3, 4, 1, 0, 3, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
T.evaluate(T.call_extern("ethosu_identity", "int8", 12, 1, 1, 12, 0, 1, ethosu_write_3[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 1, 1, "int8", 12, 1, 1, 12, 0, 1, T_concat[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 1, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", dtype="handle"))
# fmt: on
mod = Module
allocate_info = ExtractAllocateInfo()(mod)
mod = HoistAllocates()(mod)
CheckAllocates(allocate_info)(mod)
def test_outer_seq_stmt():
"""
Test to check the HoistAllocates pass works on a function where the outer-most statement is
a sequence statement, rather than the usual allocate.
"""
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"], buffer_encoded: T.Buffer[(128,), "uint8"], buffer_encoded_1: T.Buffer[(32,), "uint8"], buffer_encoded_2: T.Buffer[(112,), "uint8"], buffer_encoded_3: T.Buffer[(32,), "uint8"], buffer_encoded_4: T.Buffer[(112,), "uint8"], buffer_encoded_5: T.Buffer[(32,), "uint8"], buffer_encoded_6: T.Buffer[(112,), "uint8"], buffer_encoded_7: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
with T.allocate([128], "uint8", "global") as placeholder_global_data:
placeholder_global = T.buffer_decl([128], "uint8", data=placeholder_global_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded[0], 128, placeholder_global[0], dtype="handle"))
placeholder_d_global_data = T.allocate([32], "uint8", "global")
placeholder_d_global = T.buffer_decl([32], "uint8", data=placeholder_d_global_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_1[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 128, 12, placeholder_d_global[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.allocate([112], "uint8", "global") as placeholder_global_1_data:
placeholder_global_1 = T.buffer_decl([112], "uint8", data=placeholder_global_1_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_2[0], 112, placeholder_global_1[0], dtype="handle"))
placeholder_d_global_1_data = T.allocate([32], "uint8", "global")
placeholder_d_global_1 = T.buffer_decl([32], "uint8", data=placeholder_d_global_1_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_3[0], 32, placeholder_d_global_1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_1[0], 112, 12, placeholder_d_global_1[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.allocate([112], "uint8", "global") as placeholder_global_2_data:
placeholder_global_2 = T.buffer_decl([112], "uint8", data=placeholder_global_2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_4[0], 112, placeholder_global_2[0], dtype="handle"))
placeholder_d_global_2_data = T.allocate([32], "uint8", "global")
placeholder_d_global_2 = T.buffer_decl([32], "uint8", data=placeholder_d_global_2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_5[0], 32, placeholder_d_global_2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_2[0], 112, 12, placeholder_d_global_2[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
placeholder_global_3_data = T.allocate([112], "uint8", "global")
placeholder_global_3 = T.buffer_decl([112], "uint8", data=placeholder_global_3_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_6[0], 112, placeholder_global_3[0], dtype="handle"))
placeholder_d_global_3_data = T.allocate([32], "uint8", "global")
placeholder_d_global_3 = T.buffer_decl([32], "uint8", data=placeholder_d_global_3_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_7[0], 32, placeholder_d_global_3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_3[0], 112, 12, placeholder_d_global_3[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
mod = Module
allocate_info = ExtractAllocateInfo()(mod)
mod = HoistAllocates()(mod)
CheckAllocates(allocate_info)(mod)
def test_allocate_without_seq_stmt():
"""
Tests the case when an allocate statement does not have a sequence statement as its body.
"""
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"], buffer_encoded: T.Buffer[(128,), "uint8"], buffer_encoded_1: T.Buffer[(32,), "uint8"], buffer_encoded_2: T.Buffer[(112,), "uint8"], buffer_encoded_3: T.Buffer[(32,), "uint8"], buffer_encoded_4: T.Buffer[(112,), "uint8"], buffer_encoded_5: T.Buffer[(32,), "uint8"], buffer_encoded_6: T.Buffer[(112,), "uint8"], buffer_encoded_7: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
placeholder_global_data = T.allocate([128], "uint8", "global")
placeholder_global = T.buffer_decl([128], "uint8", data=placeholder_global_data)
placeholder_global_1_data = T.allocate([112], "uint8", "global")
placeholder_global_1 = T.buffer_decl([112], "uint8", data=placeholder_global_1_data)
placeholder_global_2_data = T.allocate([112], "uint8", "global")
placeholder_global_2 = T.buffer_decl([112], "uint8", data=placeholder_global_2_data)
placeholder_d_global_data = T.allocate([32], "uint8", "global")
placeholder_d_global = T.buffer_decl([32], "uint8", data=placeholder_d_global_data)
placeholder_d_global_1_data = T.allocate([32], "uint8", "global")
placeholder_d_global_1 = T.buffer_decl([32], "uint8", data=placeholder_d_global_1_data)
placeholder_d_global_2_data = T.allocate([32], "uint8", "global")
placeholder_d_global_2 = T.buffer_decl([32], "uint8", data=placeholder_d_global_2_data)
placeholder_global_3_data = T.allocate([112], "uint8", "global")
placeholder_global_3 = T.buffer_decl([112], "uint8", data=placeholder_global_3_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded[0], 128, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_1[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 128, 12, placeholder_d_global[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_2[0], 112, placeholder_global_1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_3[0], 32, placeholder_d_global_1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_1[0], 112, 12, placeholder_d_global_1[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_4[0], 112, placeholder_global_2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_5[0], 32, placeholder_d_global_2[0], dtype="handle"))
placeholder_d_global_3_data = T.allocate([32], "uint8", "global")
placeholder_d_global_3 = T.buffer_decl([32], "uint8", data=placeholder_d_global_3_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_2[0], 112, 12, placeholder_d_global_2[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_6[0], 112, placeholder_global_3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_encoded_7[0], 32, placeholder_d_global_3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global_3[0], 112, 12, placeholder_d_global_3[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
mod = Module
allocate_info = ExtractAllocateInfo()(mod)
mod = HoistAllocates()(mod)
CheckAllocates(allocate_info)(mod)
def test_multiple_prim_funcs():
@tvm.script.ir_module
class Module:
@T.prim_func
def main():
T.evaluate(0)
@T.prim_func
def abc():
T.evaluate(0)
mod = Module
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the HoistAllocates pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
mod = HoistAllocates()(mod)
def test_no_main_prim_func():
@tvm.script.ir_module
class Module:
@T.prim_func
def abs():
T.evaluate(0)
mod = Module
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the HoistAllocates pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
mod = HoistAllocates()(mod)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_identity_optimizer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test the identity optimizer pass that removes redundant identity
operations from the microNPU codegen.
"""
import pytest
pytest.importorskip("ethosu.vela")
import tensorflow as tf
import tvm
from tvm import relay
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.relay.backend.contrib.ethosu.codegen import relay_to_tir
from tvm.relay.backend.contrib.ethosu.codegen import IdentityOptimizer
from . import infra
def _optimize(func, optimize=True):
"""Create IRModule and run identity optimizer pass."""
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
if optimize:
mod = IdentityOptimizer()(mod)
entry = mod["main"]
return entry if isinstance(func, relay.Function) else entry.body
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"IdentityOptimizer is not correctly removing redundant "
"identity operations."
)
assert tvm.ir.structural_equal(a, b), reason
def test_simple_reshape_identity_removal():
"""Check identity is removed when there is a reshape in
the graph and a compute operation follows."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = infra.make_ethosu_conv2d(x, 4, 4, (1, 1), (0, 0), (1, 1), (1, 1))
x = relay.reshape(x, newshape=(1, 4, 4, 1))
if not get_expected:
x = infra.make_ethosu_identity(x)
x = infra.make_ethosu_unary_elementwise(x, 1, "ABS")
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_simple_strided_slice_identity_removal():
"""Check identity is removed when there is a strided slice
in the graph and a compute operation follows."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = infra.make_ethosu_pooling(x, "MAX", (1, 1), 4, (1, 1), (0, 0))
x = relay.strided_slice(x, begin=[0, 0, 0, 0], end=[1, 2, 2, 2])
if not get_expected:
x = infra.make_ethosu_identity(x)
x = infra.make_ethosu_pooling(x, "MAX", (1, 1), 2, (1, 1), (0, 0))
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_no_identity():
"""Check the graph is not affected when there is no identity in the graph."""
def get_graph():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = infra.make_ethosu_conv2d(x, 4, 4, (1, 1), (0, 0), (1, 1), (1, 1))
x = infra.make_ethosu_pooling(x, "MAX", (1, 1), 4, (1, 1), (0, 0))
x = infra.make_ethosu_depthwise_conv2d(x, 4, (1, 1), (0, 0), (1, 1), (1, 1))
x = infra.make_ethosu_unary_elementwise(x, 4, "ABS")
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_reshape_last():
"""Check that an identity as a leaf of the graph is not removed."""
def get_graph():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = infra.make_ethosu_conv2d(x, 4, 4, (1, 1), (0, 0), (1, 1), (1, 1))
x = relay.reshape(x, newshape=(1, 4, 4, 1))
x = infra.make_ethosu_identity(x)
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_requantize_identity_no_removal():
"""Check that an identity that actually performs a requantize isn't removed."""
def get_graph():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 1, 4, 4))
x = infra.make_ethosu_identity(
x, ifm_scale=0.5, ifm_zero_point=1, ofm_scale=0.3, ofm_zero_point=2
)
x = infra.make_ethosu_unary_elementwise(x, 4, "ABS")
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_activation_identity_no_removal():
"""Check thst an identity with an activation isn't removed."""
def get_graph():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 1, 4, 4))
x = infra.make_ethosu_identity(x, activation="LUT")
x = infra.make_ethosu_unary_elementwise(x, 4, "ABS")
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_multiple_output_identity():
"""Check that an identity is removed when it has multiple outputs."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
if not get_expected:
x = infra.make_ethosu_identity(x)
y = infra.make_ethosu_unary_elementwise(x, 4, "ABS")
z = infra.make_ethosu_unary_elementwise(x, 4, "ABS")
out = relay.concatenate((y, z), axis=0)
return relay.Function(relay.analysis.free_vars(x), out)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_many_output_identity():
"""Check an identity with many outputs. It cannot be removed due
to having a strided slice as output."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 1, 4, 4))
if not get_expected:
x = infra.make_ethosu_identity(x)
outputs = []
for _ in range(4):
outputs.append(infra.make_ethosu_unary_elementwise(x, 4, "ABS"))
ss = relay.strided_slice(x, begin=(0, 0, 0, 0), end=(1, 1, 4, 4))
identity_2 = infra.make_ethosu_identity(ss)
outputs.append(identity_2)
out = relay.concatenate(outputs, axis=0)
return relay.Function(relay.analysis.free_vars(out), out)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_identity_before_concatenate_no_removal():
"""Check that an identity isn't removed when the operator
following it is a concatenate operation."""
def get_graph():
x = relay.var("x", shape=(1, 1, 4, 4), dtype="int8")
y = relay.var("y", shape=(1, 2, 2, 4), dtype="int8")
z = relay.var("z", shape=(1, 2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 2, 2, 4))
y = relay.strided_slice(y, begin=(0, 0, 0, 0), end=(1, 2, 2, 4))
x = infra.make_ethosu_identity(x)
y = infra.make_ethosu_identity(y)
out = relay.concatenate([x, y, z], axis=0)
return relay.Function(relay.analysis.free_vars(out), out)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_identity_removal_with_multiple_transform_ops():
"""Check that only an identity directly parent to a compute
operation is removed."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.strided_slice(x, begin=[0, 0, 0, 0], end=[1, 2, 2, 2])
if not get_expected:
x = infra.make_ethosu_identity(x)
x = relay.reshape(x, newshape=(1, 1, 1, 8))
if not get_expected:
x = infra.make_ethosu_identity(x)
x = infra.make_ethosu_unary_elementwise(x, 8, "ABS")
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_identity_removal_on_binary_elementwise():
"""Check identities before binary elementwise are removed correctly."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
y = relay.var("y", shape=(1, 2, 2, 4), dtype="int8")
if not get_expected:
x = infra.make_ethosu_identity(x)
y = infra.make_ethosu_identity(y)
z = infra.make_ethosu_binary_elementwise(x, y, 4, 4, "ADD", "int8")
return relay.Function(relay.analysis.free_vars(z), z)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_identity_single_removal_on_binary_elementwise():
"""Check that identity on the second input of the binary elementwise
operation is removed while the other input has no identity."""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 4, 1, 4), dtype="int8")
y = relay.var("y", shape=(1, 2, 2, 4), dtype="int8")
y = relay.reshape(y, newshape=(1, 4, 1, 4))
if not get_expected:
y = infra.make_ethosu_identity(y)
z = infra.make_ethosu_binary_elementwise(x, y, 4, 4, "ADD", "int8")
return relay.Function(relay.analysis.free_vars(z), z)
actual = _optimize(get_graph())
expected = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(actual, expected)
def test_multiple_transform_ops_with_reduction_in_dimensionality():
"""Removal of an identity operation between two transform operations is usually okay.
However, if the dimensionality of the input is reduced by the second transformation
operation, it can lead to an output mismatch. Checking that the pass doesn't remove
an identity given this case."""
def get_graph():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.strided_slice(x, begin=(0, 0, 0, 0), end=(1, 2, 2, 2))
x = infra.make_ethosu_identity(x)
x = relay.reshape(x, newshape=(1, 2, 4))
x = infra.make_ethosu_identity(x)
return relay.Function(relay.analysis.free_vars(x), x)
actual = _optimize(get_graph())
expected = _optimize(get_graph(), optimize=False)
_assert_structural_equal(actual, expected)
def test_identity_optimizer_runs_in_compilation_pipeline():
"""Checks that the identity optimization pass is run as part of the NPU compilation pipeline."""
def get_graph():
x = relay.var("x", shape=(1, 4, 4, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 1, 16, 4))
x = relay.nn.max_pool2d(x, layout="NHWC")
func = relay.Function(relay.analysis.free_vars(x), x)
return tvm.IRModule.from_expr(func)
mod = get_graph()
mod = partition_for_ethosu(mod)
mod = relay_to_tir(mod)
external_gv_name = mod["main"].body.op.name_hint
prim_func = mod[external_gv_name]
# Check for hints in the TIR prim func that the identity optimization pass
# has ran. There should not be an identity in the prim func.
assert prim_func.body.value.args[0] == "ethosu_pooling"
def test_same_output():
"""Check that the output remains the same when the identity
optimizer pass removes some identities inserted during legalization."""
ifm_shapes = [(1, 1, 25, 8), (1, 5, 5, 8)]
@tf.function
def model(x, y):
x = tf.reshape(x, (1, 5, 5, 8))
z = tf.add(x, y)
z = tf.reshape(z, (1, 1, 25, 8))
return z
infra.compare_tvm_with_tflite(model, ifm_shapes, "ethos-u55-256", enable_cascader=False)
def test_multi_output_identity_has_same_output():
"""Check that the output remains the same with an identity with
multiple outputs."""
ifm_shape = (1, 1, 64, 16)
@tf.function
def model(x):
x = tf.reshape(x, (1, 8, 8, 16))
outputs = []
for _ in range(4):
outputs.append(tf.nn.max_pool2d(x, 1, 1, "VALID"))
outputs.append(tf.reshape(x, (1, 8, 8, 16)))
y = tf.concat(outputs, axis=0)
return y
infra.compare_tvm_with_tflite(model, [ifm_shape], "ethos-u55-256", enable_cascader=False)
def test_multiple_transform_ops_same_output():
"""Check case of identity removal between transform ops and
then without, making sure they have the same output."""
ifm_shape = (1, 2, 2, 4)
@tf.function
def model(x):
x = tf.reshape(x, (1, 1, 4, 4))
x = tf.slice(x, (0, 0, 0, 0), (1, 1, 4, 3))
x = tf.reshape(x, (12,))
return x
infra.compare_tvm_with_tflite(model, [ifm_shape], "ethos-u55-256", enable_cascader=False)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_layout_optimizer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test the layout optimization pass. This pass is used to
convert subgraphs to the preferred layout of NHCWB16.
"""
import pytest
pytest.importorskip("ethosu.vela")
import sys
import numpy as np
import tensorflow as tf
import tflite.Model
import tvm
from tvm import relay
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.relay.backend.contrib.ethosu.codegen import LayoutOptimizer
from tvm.relay.backend.contrib.ethosu.codegen import relay_to_tir
from . import infra
def _optimize(func, optimize=True):
"""Create IRModule and run layout optimizer pass."""
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
if optimize:
mod = LayoutOptimizer()(mod)
entry = mod["main"]
return entry if isinstance(func, relay.Function) else entry.body
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"LayoutOptimizer is not correctly converting layouts."
)
assert tvm.ir.structural_equal(a, b), reason
def _compile_and_compare_model(tflite_graph, ifm_shape, dtype):
"""Compare running result of compilation against TFLite."""
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={
"ifm": ifm_shape,
},
dtype_dict={
"ifm": dtype,
},
)
mod = partition_for_ethosu(mod, params)
# Generate reference data
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
test_runner = infra.create_test_runner("ethos-u55-256")
compiled_models = infra.build_source(
mod,
input_data,
output_data,
test_runner,
output_tolerance=0,
)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
infra.print_payload(cmms)
infra.verify_source(compiled_models, test_runner)
def test_single_convolution():
"""Test a single convolution to make sure the layouts remain
unaltered.
"""
def get_graph():
x = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
x = infra.make_ethosu_conv2d(
ifm=x,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(x), x)
a = _optimize(get_graph())
b = _optimize(get_graph(), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_convolution():
"""Test layout optimization pass on linear chain of convolutions. I.e,
conv_1
|
conv_2
|
conv_3
"""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
for i in range(3):
ifm_layout = "NHCWB16" if get_expected and i != 0 else "NHWC"
ofm_layout = "NHCWB16" if get_expected and i != 2 else "NHWC"
x = infra.make_ethosu_conv2d(
ifm=x,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return relay.Function(relay.analysis.free_vars(x), x)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_depthwise_convolution():
"""Test layout optimization pass on multiple depthwise convolutions.
depthwise_conv_1
|
depthwise_conv_2
|
depthwise_conv_3
"""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
for i in range(3):
ifm_layout = "NHCWB16" if get_expected and i != 0 else "NHWC"
ofm_layout = "NHCWB16" if get_expected and i != 2 else "NHWC"
x = infra.make_ethosu_depthwise_conv2d(
ifm=x,
channels=4,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return relay.Function(relay.analysis.free_vars(x), x)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_ignore_transform_operations():
"""Test layout optimization pass ignores transform operations
such as reshape and strided slice.
conv_1
|
reshape
|
strided_slice
|
conv_2
"""
def get_graph():
in_1 = relay.var("x", shape=(1, 16, 16, 8), dtype="int8")
conv_1 = infra.make_ethosu_conv2d(
ifm=in_1,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
reshape = relay.reshape(conv_1, (1, 16, 16, 8))
strided_slice = relay.strided_slice(reshape, (0, 0, 0, 0), (1, 16, 16, 8))
conv_2 = infra.make_ethosu_conv2d(
ifm=strided_slice,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(conv_2), conv_2)
a = _optimize(get_graph())
b = _optimize(get_graph(), optimize=False)
_assert_structural_equal(a, b)
def test_ignore_concatenate():
"""Test layout optimization pass ignores the concatenate operation,
when layout transformation cannot occur.
in_1 in_2
\ /
\ conv_1
\ /
concat
|
conv_2
"""
def get_graph():
in_1 = relay.var("x", shape=(1, 16, 16, 8), dtype="int8")
in_2 = relay.var("y", shape=(1, 16, 16, 8), dtype="int8")
conv_1 = infra.make_ethosu_conv2d(
ifm=in_2,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
concat = relay.concatenate([in_1, conv_1], axis=1)
conv_2 = infra.make_ethosu_conv2d(
ifm=concat,
ifm_channels=8,
ofm_channels=4,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(conv_2), conv_2)
a = _optimize(get_graph())
b = _optimize(get_graph(), optimize=False)
_assert_structural_equal(a, b)
def test_ignore_concatnate_with_layout_transform():
"""Test the layout optimization pass ignores the concatenate
operation and performs a layout transformation.
in_1 in_2
\ /
pool_1 pool_2
\ /
concat
|
pool_3
"""
def get_graph():
in_1 = relay.var("x", shape=(1, 16, 16, 8), dtype="int8")
in_2 = relay.var("y", shape=(1, 16, 16, 8), dtype="int8")
pool_1 = infra.make_ethosu_pooling(
in_1,
"MAX",
(1, 1),
ofm_channels=8,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
pool_2 = infra.make_ethosu_pooling(
in_2,
"MAX",
(1, 1),
ofm_channels=8,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
concat = relay.concatenate([pool_1, pool_2], axis=1)
pool_3 = infra.make_ethosu_pooling(
concat,
"MAX",
(1, 1),
ofm_channels=8,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(pool_3), pool_3)
a = _optimize(get_graph())
b = _optimize(get_graph(), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_inputs():
"""Test the layout optimization pass works as expected when there
are multiple inputs in the graph.
pool_1 pool_2 pool_3
\ | /
\ | /
concat
|
conv
"""
def get_graph():
poolings = []
for _ in range(3):
inp = relay.var("x", shape=(1, 3, 3, 4), dtype="int8")
pool = infra.make_ethosu_pooling(
inp,
"MAX",
(1, 1),
ofm_channels=4,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
poolings.append(pool)
concat = relay.concatenate(poolings, axis=0)
conv = infra.make_ethosu_conv2d(
ifm=concat,
ifm_channels=8,
ofm_channels=4,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
ifm_layout="NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(conv), conv)
a = _optimize(get_graph())
b = _optimize(get_graph(), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_outputs():
"""Test the layout optimization pass works as expected when there
are multiple outputs in the graph.
pool_1
/ | \
pool_2 pool_3 pool_4
\ | /
concat
"""
def get_graph(get_expected=False):
in_1 = relay.var("x", shape=(1, 4, 4, 8), dtype="int8")
pool_1 = infra.make_ethosu_pooling(
in_1,
"MAX",
(1, 1),
ofm_channels=4,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHWC",
ofm_layout="NHCWB16" if get_expected else "NHWC",
)
poolings = []
for _ in range(3):
poolings.append(
infra.make_ethosu_pooling(
pool_1,
"MAX",
(1, 1),
ofm_channels=4,
strides=(1, 1),
padding=(0, 0),
ifm_layout="NHCWB16" if get_expected else "NHWC",
ofm_layout="NHWC",
)
)
concat = relay.concatenate(poolings, axis=0)
return relay.Function(relay.analysis.free_vars(concat), concat)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_binary_elementwise():
"""Test the layout optimization pass works as expected for
binary elementwise operations.
add_1 add_2
\ /
\ /
add_3
"""
def get_graph(get_expected=False):
in_1 = relay.var("x", shape=(1, 2, 2, 2), dtype="int8")
in_2 = relay.var("y", shape=(1, 2, 2, 2), dtype="int8")
in_3 = relay.var("z", shape=(1, 2, 2, 2), dtype="int8")
add_1 = infra.make_ethosu_binary_elementwise(
in_1,
in_2,
ifm_channels=2,
ifm2_channels=2,
operator_type="ADD",
ofm_dtype="int8",
ifm_layout="NHWC",
ifm2_layout="NHWC",
ofm_layout="NHCWB16" if get_expected else "NHWC",
)
add_2 = infra.make_ethosu_binary_elementwise(
in_2,
in_3,
ifm_channels=2,
ifm2_channels=2,
operator_type="ADD",
ofm_dtype="int8",
ifm_layout="NHWC",
ifm2_layout="NHWC",
ofm_layout="NHCWB16" if get_expected else "NHWC",
)
add_3 = infra.make_ethosu_binary_elementwise(
add_1,
add_2,
ifm_channels=2,
ifm2_channels=2,
operator_type="ADD",
ofm_dtype="int8",
ifm_layout="NHCWB16" if get_expected else "NHWC",
ifm2_layout="NHCWB16" if get_expected else "NHWC",
ofm_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(add_3), add_3)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_pooling():
"""Test the layout optimization pass works as expected for
multiple pooling operations.
pool_1
|
pool_2
|
pool_3
"""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
for i in range(3):
ifm_layout = "NHCWB16" if get_expected and i != 0 else "NHWC"
ofm_layout = "NHCWB16" if get_expected and i != 2 else "NHWC"
x = infra.make_ethosu_pooling(
x,
"MAX",
(1, 1),
ofm_channels=4,
strides=(1, 1),
padding=(0, 0),
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return relay.Function(relay.analysis.free_vars(x), x)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_multiple_unary_elementwise():
"""Test the layout optimization pass works as expected for multiple
unary elementwise operations.
abs_1
|
abs_2
|
abs_3
"""
def get_graph(get_expected=False):
x = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
for i in range(3):
ifm_layout = "NHCWB16" if get_expected and i != 0 else "NHWC"
ofm_layout = "NHCWB16" if get_expected and i != 2 else "NHWC"
x = infra.make_ethosu_unary_elementwise(
x,
ofm_channels=4,
operator_type="ABS",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return relay.Function(relay.analysis.free_vars(x), x)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_op_without_ethosu_consumer():
"""Test the layout optimization pass works as expected when
there is a case that the output layout should not be altered
since not all consumers are NPU operations (in this case conv).
depthwise
|
conv
/ \
| pool
\ /
(concat)
"""
def get_graph(get_expected=False):
exp_layout = "NHCWB16" if get_expected else "NHWC"
x = relay.var("x", shape=(1, 2, 2, 2), dtype="int8")
depthwise = infra.make_ethosu_depthwise_conv2d(
x, 2, (1, 1), (0, 0), (1, 1), (0, 0), ofm_layout=exp_layout
)
conv = infra.make_ethosu_conv2d(
depthwise,
2,
2,
(1, 1),
(0, 0),
(1, 1),
(0, 0),
ifm_layout=exp_layout,
)
pool = infra.make_ethosu_pooling(conv, "MAX", (1, 1), 2, (1, 1), (0, 0))
concat = relay.concatenate([conv, pool], axis=0)
return relay.Function(relay.analysis.free_vars(concat), concat)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_diamond_graph():
"""
Test the layout optimizer pass works as expected on a diamond graph
with a case where the operation dominating the output operation
cannot be altered, but operations within the diamond can.
pool_1
|
pool_2
/ \
| pool_3
| |
| pool_4
| |
| pool_5
\ /
(concat)
"""
def get_graph(get_expected=False):
exp_layout = "NHCWB16" if get_expected else "NHWC"
x = relay.var("x", shape=(1, 2, 2, 2), dtype="int8")
pool_1 = infra.make_ethosu_pooling(
x, "MAX", (1, 1), 2, (1, 1), (0, 0), ofm_layout=exp_layout
)
pool_2 = infra.make_ethosu_pooling(
pool_1, "MAX", (1, 1), 2, (1, 1), (0, 0), ifm_layout=exp_layout
)
pool_3 = infra.make_ethosu_pooling(
pool_2, "MAX", (1, 1), 2, (1, 1), (0, 0), ofm_layout=exp_layout
)
pool_4 = infra.make_ethosu_pooling(
pool_3, "MAX", (1, 1), 2, (1, 1), (0, 0), ifm_layout=exp_layout, ofm_layout=exp_layout
)
pool_5 = infra.make_ethosu_pooling(
pool_4, "MAX", (1, 1), 2, (1, 1), (0, 0), ifm_layout=exp_layout
)
concat = relay.concatenate([pool_2, pool_5], axis=0)
return relay.Function(relay.analysis.free_vars(concat), concat)
a = _optimize(get_graph())
b = _optimize(get_graph(get_expected=True), optimize=False)
_assert_structural_equal(a, b)
def test_same_output_multiple_convolutions():
"""Test running the layout optimization pass with multiple convolutions
gives same output as TFLite."""
np.random.seed(0)
dtype = "int8"
ifm_shape = (1, 8, 8, 32)
kernel_shape = (1, 1, 32, 32)
def create_model():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
for _ in range(3):
x = tf.nn.conv2d(
x,
filters=tf.constant(np.random.uniform(size=kernel_shape), dtype=tf.float32),
strides=(1, 1),
padding="SAME",
data_format="NHWC",
dilations=1,
)
return x
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
return converter.convert()
_compile_and_compare_model(create_model(), ifm_shape, dtype)
def test_same_output_multiple_pooling():
"""Test running the layout optimization pass with multiple pooling
operations gives same output as TFLite."""
np.random.seed(0)
dtype = "int8"
ifm_shape = (1, 4, 2, 7)
def create_model():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
for _ in range(2):
x = tf.nn.max_pool2d(x, (1, 1), (1, 1), "SAME", "NHWC")
return x
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
return converter.convert()
_compile_and_compare_model(create_model(), ifm_shape, dtype)
def test_layout_optimizer_runs_in_compilation_pipeline():
"""Checks that the layout optimization pass runs as part of the NPU compilation
pipeline."""
def get_graph():
x = relay.var("x", shape=(1, 4, 4, 4), dtype="int8")
for _ in range(2):
x = relay.nn.max_pool2d(x, layout="NHWC")
func = relay.Function(relay.analysis.free_vars(x), x)
return tvm.IRModule.from_expr(func)
mod = get_graph()
mod = partition_for_ethosu(mod)
mod = relay_to_tir(mod)
external_gv_name = mod["main"].body.op.name_hint
prim_func = mod[external_gv_name]
# Check for hints in the TIR prim func that the layout optimization pass has ran
ops = prim_func.body.body.seq
max_pool1, max_pool2 = ops
assert str(max_pool1.value.args[31]) == '"NHCWB16"'
assert str(max_pool2.value.args[14]) == '"NHCWB16"'
if __name__ == "__main__":
pytest.main([__file__] + sys.argv[1:])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import math
import numpy as np
import tensorflow as tf
import tflite.Model
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu import legalize, preprocess
from tvm.relay import dataflow_pattern
from tvm.relay.op.contrib import ethosu
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.frontend.tflite import get_pad_value
from . import infra
def partition_ethosu_by_table(mod, pattern_table):
"""In case only the legalization part is supported for an operator, we don't
want to add the operator's pattern to the pattern table so that the compiler
wouldn't attempt to offload an operator without full stack support."""
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern_table)(mod)
mod = relay.transform.AnnotateTarget("ethos-u")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph()(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
def test_split_indices_legalize():
def create_graph(axis):
x = relay.var("x", shape=(1, 50, 50, 3))
x_relu = relay.nn.relu(x)
split_output = relay.split(x_relu, [5, 20, 45], axis).tuple_value
return relay.Function([x], split_output)
def expected_mod_axis1():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 5, 50, 3), float32],\
Tensor[(1, 15, 50, 3), float32],\
Tensor[(1, 25, 50, 3), float32],\
Tensor[(1, 5, 50, 3), float32]) {
%0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 5, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 5, 50, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 5, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 15, 50, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 45, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 25, 50, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 45, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 5, 50, 3), float32] */;
(%1, %2, %3, %4)
}
"""
return tvm.parser.fromtext(expected_ir_string)
def expected_mod_axis2():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 5, 3), float32],\
Tensor[(1, 50, 15, 3), float32],\
Tensor[(1, 50, 25, 3), float32],\
Tensor[(1, 50, 5, 3), float32]) {
%0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 5, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 5, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 0, 5, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 15, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 45, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 25, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 0, 45, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 5, 3), float32] */;
(%1, %2, %3, %4)
}
"""
return tvm.parser.fromtext(expected_ir_string)
rewrite_split = [legalize.PartitionedSplitRewriter(), legalize.SplitRewriter()]
mod_axis1 = tvm.IRModule()
func = create_graph(1)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis1["tvmgen_default_ethos_u_main_0"] = func
expected_axis1 = expected_mod_axis1()
tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)
mod_axis2 = tvm.IRModule()
func = create_graph(2)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis2["tvmgen_default_ethos_u_main_0"] = func
expected_axis2 = expected_mod_axis2()
tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)
def test_split_sections_legalize():
def create_graph(axis, sections):
x = relay.var("x", shape=(1, 50, 50, 3))
x_abs = relay.abs(x)
split_output = relay.split(x_abs, sections, axis).tuple_value
outputs = list()
for section_idx in range(sections):
split_single_out = relay.TupleGetItem(split_output, section_idx)
tanh = relay.tanh(split_single_out)
outputs.append(tanh)
tuple_out = relay.Tuple(outputs)
return relay.Function([x], tuple_out)
def expected_mod_axis1():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32]) {
%0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 10, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 10, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 30, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 30, 0, 0], end=[1, 40, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%5 = strided_slice(%0, begin=[0, 40, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%6 = (%1, %2, %3, %4, %5);
%7 = %6.0;
%8 = tanh(%7) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%9 = %6.1;
%10 = tanh(%9) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%11 = %6.2;
%12 = tanh(%11) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%13 = %6.3;
%14 = tanh(%13) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%15 = %6.4;
%16 = tanh(%15) /* ty=Tensor[(1, 10, 50, 3), float32] */;
(%8, %10, %12, %14, %16)
}
"""
return tvm.parser.fromtext(expected_ir_string)
def expected_mod_axis2():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32]) {
%0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 10, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 0, 10, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 30, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 0, 30, 0], end=[1, 50, 40, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%5 = strided_slice(%0, begin=[0, 0, 40, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%6 = (%1, %2, %3, %4, %5);
%7 = %6.0;
%8 = tanh(%7) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%9 = %6.1;
%10 = tanh(%9) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%11 = %6.2;
%12 = tanh(%11) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%13 = %6.3;
%14 = tanh(%13) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%15 = %6.4;
%16 = tanh(%15) /* ty=Tensor[(1, 50, 10, 3), float32] */;
(%8, %10, %12, %14, %16)
}
"""
return tvm.parser.fromtext(expected_ir_string)
rewrite_split = [legalize.PartitionedSplitRewriter(), legalize.SplitRewriter()]
mod_axis1 = tvm.IRModule()
func = create_graph(1, 5)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis1["tvmgen_default_ethos_u_main_0"] = func
expected_axis1 = expected_mod_axis1()
tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)
mod_axis2 = tvm.IRModule()
func = create_graph(2, 5)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis2["tvmgen_default_ethos_u_main_0"] = func
expected_axis2 = expected_mod_axis2()
tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)
INVERSE_LAYOUT_TRANSFORM_OHWI_MAP = {
"HWIO": [1, 2, 3, 0],
"HWOI": [1, 2, 0, 3],
"OWHI": [0, 1, 2, 3],
}
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 3), (1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("activation", [None, "RELU"])
def test_tflite_conv2d_legalize(ifm_shape, kernel_shape, padding, strides, dilation, activation):
dtype = "int8"
def create_tflite_graph_single():
class Model(tf.Module):
@tf.function
def tf_function(self, input_shape):
op = tf.nn.conv2d(
input_shape,
filters=tf.constant(
np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 3, 3)),
dtype=tf.float32,
),
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilation,
)
if activation:
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 3
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
expected_padding = infra.compute_padding_shape(
ifm_shape,
expected_ofm_shape,
padding,
(kernel_shape[0], kernel_shape[1]),
strides,
dilation,
)
assert list(op.attrs.padding) == list(expected_padding)
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
if activation == "RELU":
assert str(op.attrs.activation) == "CLIP"
conv2d_pattern_table = [
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph_single()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, conv_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], conv_params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.Conv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_conv2d_with_separate_padding_legalize():
dtype = "int8"
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
dilation = (2, 1)
padding = (0, 0, 1, 1)
def create_tflite_graph_single():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 3
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == list(padding)
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
conv2d_pattern_table = [
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph_single()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, conv_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], conv_params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.Conv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 3), (1, 123, 17, 7)])
@pytest.mark.parametrize("kernel_shape", [(7, 3), (22, 5)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("activation", ["RELU", None])
def test_tflite_depthwise_conv_2d_legalize(
ifm_shape, kernel_shape, padding, strides, dilation, activation
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def depthwise_conv2d(self, x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=padding, dilations=dilation
)
if activation:
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.depthwise_conv2d.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
expected_padding = infra.compute_padding_shape(
ifm_shape, expected_ofm_shape, padding, kernel_shape, strides, dilation
)
assert list(op.attrs.padding) == list(expected_padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
if activation == "RELU":
assert str(op.attrs.activation) == "CLIP"
depthwise_pattern_table = [
(
ethosu.QnnDepthwiseConv2DParams.composite_name,
ethosu.qnn_depthwise_conv2d_pattern(),
lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, depthwise_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.DepthwiseConv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_depthwise_conv2d_with_separate_padding_legalize():
dtype = "int8"
ifm_shape = (1, 23, 32, 7)
kernel_shape = (1, 2)
strides = (3, 2)
dilation = (1, 1)
padding = (0, 0, 1, 1)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.depthwise_conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == list(padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
depthwise_pattern_table = [
(
ethosu.QnnDepthwiseConv2DParams.composite_name,
ethosu.qnn_depthwise_conv2d_pattern(),
lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, depthwise_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.DepthwiseConv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(
"pool_shape, strides, activation_function, padding",
[([1, 2], [1, 2], "NONE", "SAME"), ([2, 3], [2, 3], "RELU", "VALID")],
)
def test_tflite_pool2d_legalize(
ifm_shape, pooling_type, strides, pool_shape, activation_function, padding
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
ofm_shape = infra.compute_ofm_shape(ifm_shape, padding, pool_shape, strides)
op = ext_func.body
assert list(op.args[0].checked_type.shape) == ifm_shape
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == ofm_shape
assert op.checked_type.dtype == dtype
assert op.attrs.pooling_type == pooling_type
assert list(op.attrs.strides) == strides
assert list(op.attrs.padding) == infra.compute_padding_shape(
ifm_shape, ofm_shape, padding, pool_shape, strides
)
assert list(op.attrs.pool_shape) == pool_shape
assert op.attrs.ofm_channels == ifm_shape[3]
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
if pooling_type == "MAX":
rewriter = legalize.MaxPoolingRewriter()
pattern_table = [
(
ethosu.MaxPool2DParams.composite_name,
ethosu.qnn_maxpool2d_pattern(),
lambda pat: ethosu.MaxPool2DParams(pat).is_valid(),
),
]
elif pooling_type == "AVG":
rewriter = legalize.AvgPoolingRewriter()
pattern_table = [
(
ethosu.AvgPool2DParams.composite_name,
ethosu.qnn_avgpool2d_pattern(),
lambda pat: ethosu.AvgPool2DParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": ifm_shape},
dtype_dict={"x": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands",
[
([1, 2, 3, 4], [1, 2, 3, 4], False),
([1, 2, 3, 4], [1, 1, 3, 1], False),
([1, 1, 3, 1], [1, 2, 3, 4], True),
([1, 4, 4], [4, 1], False),
([4], [4], False),
([4], [1, 2, 3, 4], True),
([1, 4, 4], [4, 1], False),
],
)
@pytest.mark.parametrize("activation_function", ["NONE", "RELU"])
def test_tflite_binary_elemwise_legalize(
operator_type,
ifm_shape,
ifm2_shape,
reversed_operands,
activation_function,
):
np.random.seed(0)
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, y):
if operator_type == "ADD":
op = tf.math.add(x, y)
elif operator_type == "SUB":
op = tf.math.subtract(x, y)
elif operator_type == "MUL":
op = tf.math.multiply(x, y)
elif operator_type == "MIN":
op = tf.math.minimum(x, y)
elif operator_type == "MAX":
op = tf.math.maximum(x, y)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32), tf.TensorSpec(ifm2_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
data2 = np.random.rand(*tuple(ifm2_shape)) * 2
yield [data.astype(np.float32), data2.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
out_shape = ifm2_shape if reversed_operands else ifm_shape
shapes = [ifm_shape, ifm2_shape]
ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)
op = ext_func.body
has_reshaped_output = False
shapes_padded = [[1] * (4 - len(s)) + s for s in shapes]
out_padded = [1] * (4 - len(out_shape)) + out_shape
if op.op.name != "contrib.ethosu.binary_elementwise":
has_reshaped_output = True
op = op.args[0]
assert list(op.args[0].checked_type.shape) == shapes_padded[ifm_index]
assert list(op.args[1].checked_type.shape) == shapes_padded[ifm2_index]
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == out_padded
assert op.checked_type.dtype == dtype
assert op.attrs.operator_type == operator_type
assert op.attrs.reversed_operands == reversed_operands
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
if operator_type in ["MIN", "MAX"]:
# MIN and MAX with an activation must have a requantize operation
# baked into the output. To check the extra requantize node was
# picked up by the pattern, we can make sure the quantization
# information is not default.
assert float(op.attrs.ifm_scale) != 1.0
assert int(op.attrs.ifm_zero_point) != 0
assert float(op.attrs.ifm2_scale) != 1.0
assert int(op.attrs.ifm2_zero_point) != 0
assert float(op.attrs.ofm_scale) != 1.0
assert int(op.attrs.ofm_zero_point) != 0
if has_reshaped_output:
assert list(ext_func.body.checked_type.shape) == out_shape
if operator_type == "ADD":
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
elif operator_type == "SUB":
rewriter = legalize.SubRewriter()
pattern_table = [
(
ethosu.SubParams.composite_name,
ethosu.qnn_subtract_pattern(),
lambda pat: ethosu.SubParams(pat).is_valid(),
),
]
elif operator_type == "MUL":
rewriter = legalize.MulRewriter()
pattern_table = [
(
ethosu.MulParams.composite_name,
ethosu.qnn_mul_pattern(),
lambda pat: ethosu.MulParams(pat).is_valid(),
),
]
elif operator_type == "MIN":
rewriter = legalize.MinRewriter()
pattern_table = [
(
ethosu.MinParams.composite_name,
ethosu.minimum_pattern(),
lambda pat: ethosu.MinParams(pat).is_valid(),
),
]
elif operator_type == "MAX":
rewriter = legalize.MaxRewriter()
pattern_table = [
(
ethosu.MaxParams.composite_name,
ethosu.maximum_pattern(),
lambda pat: ethosu.MaxParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": ifm_shape, "y": ifm2_shape},
dtype_dict={"x": dtype, "y": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_binary_add_from_constant_scalar():
dtype = "uint8"
ifm_shape = (1, 4, 4, 8)
def create_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(add), add)
return tvm.IRModule.from_expr(func)
def verify(ext_func):
op = ext_func.body
assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]
assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]
assert op.args[0].checked_type.dtype == "uint8"
assert list(op.checked_type.shape) == [1, 4, 4, 8]
assert op.checked_type.dtype == "uint8"
assert op.attrs.operator_type == "ADD"
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands",
[
([1, 2, 3, 4], [1, 2, 3, 4], False),
([1, 2, 3, 4], [1, 1, 3, 1], False),
([1, 1, 3, 1], [1, 2, 3, 4], True),
],
)
def test_ethosu_left_shift_binary_elemwise_legalize(ifm_shape, ifm2_shape, reversed_operands):
dtype = "int32"
operator_type = "SHL"
def create_graph():
input1 = relay.var("x1", shape=ifm_shape, dtype=dtype)
input2 = relay.var("x2", shape=ifm2_shape, dtype=dtype)
c1 = relay.left_shift(input1, input2)
f = relay.Function([input1, input2], c1)
mod = tvm.IRModule()
mod["main"] = f
return mod
def verify(ext_func):
out_shape = ifm2_shape if reversed_operands else ifm_shape
shapes = [ifm_shape, ifm2_shape]
ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)
op = ext_func.body
assert list(op.args[0].checked_type.shape) == shapes[ifm_index]
assert list(op.args[1].checked_type.shape) == shapes[ifm2_index]
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
assert op.attrs.operator_type == operator_type
assert op.attrs.reversed_operands == reversed_operands
assert str(op.attrs.activation) == "NONE"
rewriter = legalize.ShlRewriter()
pattern_table = [
(
ethosu.ShlParams.composite_name,
ethosu.shl_pattern(),
lambda pat: ethosu.ShlParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, new_shape",
[
((1, 4, 1, 2), (4, 2)),
((1, 5, 1, 20), (100,)),
((12, 20), (1, 6, 4, 10)),
((30,), (10, 1, 3)),
],
)
def test_relay_reshape_legalize(ifm_shape, new_shape):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
reshape = relay.op.reshape(ifm, new_shape)
func = relay.Function([ifm], reshape)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
reshape_pattern_table = [
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, reshape_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
ext_func = mod["tvmgen_default_ethos_u_main_0"]
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the reshape is still there
reshape = identity.args[0]
assert reshape.op.name == "reshape"
# check that identity's output shape matches reshape's output shape
assert tuple(identity.checked_type.shape) == new_shape
@pytest.mark.parametrize(
"ifm_shape, begin, size",
[
([1, 10, 50, 4], [0, 5, 11, 2], [1, 5, 11, 1]),
([15, 17, 3], [3, 0, 1], [8, 17, 2]),
([7, 6043], [0, 704], [1, 2860]),
([5000], [123], [2151]),
],
)
def test_tflite_slice(ifm_shape, begin, size):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def slice_func(self, x):
return tf.slice(x, begin, size)
model = Model()
# Save the model
concrete_func = model.slice_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the strided_slice is still there
strided_slice = identity.args[0]
assert strided_slice.op.name == "strided_slice"
# check that identity's output shape matches strided slice's output shape
assert list(identity.checked_type.shape) == size
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
strided_slice_pattern_table = [
(
ethosu.StridedSliceParams.composite_name,
ethosu.strided_slice_pattern(),
lambda pat: ethosu.StridedSliceParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.StridedSliceRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, begin, end",
[([1, 1, 5, 8], [0, 0, 0, 0], [1, 1, 2, 3]), ([1, 3, 3], [0, 1, 2], [1, 2, 3])],
)
def test_tflite_strided_slice(ifm_shape, begin, end):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def strided_slice_func(self, x):
return tf.strided_slice(x, begin, end)
model = Model()
# Save the model
concrete_func = model.strided_slice_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the strided_slice is still there
strided_slice = identity.args[0]
assert strided_slice.op.name == "strided_slice"
# check that identity's output shape matches strided slice's output shape
size = list(np.array(end) - np.array(begin))
assert list(identity.checked_type.shape) == size
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
strided_slice_pattern_table = [
(
ethosu.StridedSliceParams.composite_name,
ethosu.strided_slice_pattern(),
lambda pat: ethosu.StridedSliceParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.StridedSliceRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("operator_type", ["ABS"])
@pytest.mark.parametrize(
"ifm_shape",
[[1, 2, 3, 4], [1, 7, 3], [8, 3, 1], [11, 22], [300]],
)
def test_tflite_unary_elemwise_legalize(
operator_type,
ifm_shape,
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def abs_func(self, x):
if operator_type == "ABS":
op = tf.math.abs(x)
return op
model = Model()
# Save the model
concrete_func = model.abs_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
out_shape = ifm_shape
func_body = ext_func.body
# If we legalized the unary elementwise op into 4D
if func_body.op.name == "reshape":
reshape = func_body
unary = func_body.args[0]
reshape2 = unary.args[0]
# Check the input to the reshape
reshape2_in_shape = [i for i in reshape2.args[0].checked_type.shape]
assert reshape2_in_shape == ifm_shape
# Check that the unary elementwise operator is 4D after reshape
assert len(unary.checked_type.shape) == 4
assert unary.args[0].checked_type.dtype == dtype
# Check that the output of the graph has the same shape as input
reshape_out_shape = [i for i in reshape.checked_type.shape]
assert reshape_out_shape == ifm_shape
assert unary.attrs.operator_type == operator_type
else:
unary = func_body
# Check the IFM
assert list(unary.args[0].checked_type.shape) == ifm_shape
assert unary.args[0].checked_type.dtype == dtype
# Check the OFM
assert list(unary.checked_type.shape) == out_shape
assert unary.checked_type.dtype == dtype
# operator type check
assert unary.attrs.operator_type == operator_type
if operator_type == "ABS":
rewriter = legalize.AbsRewriter()
pattern_table = [
(
ethosu.AbsParams.composite_name,
ethosu.abs_pattern(),
lambda pat: ethosu.AbsParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_tanh_legalize():
dtype = "int8"
ifm_shape = (1, 241, 132, 7)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tanh_func(self, x):
op = tf.math.tanh(x)
return op
model = Model()
concrete_func = model.tanh_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.TanhRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "TANH"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
@pytest.mark.parametrize(
"ifm_shape, axis, keep_dims, use_same_quantization",
[
# mean to depthwise + multiply
[(1, 8, 16, 16), (1, 2), True, False],
[(1, 8, 16, 16), (2, 1), True, False],
[(1, 3, 4), (0, 1), True, False],
[(8, 5), (1, 0), True, False],
[(1, 65, 2, 1), (1, 2), True, False], # special case when h > 64
# mean to average pool
[(1, 8, 16, 16), (1,), True, True],
[(1, 8, 16, 16), (2,), False, True],
[(1, 8, 16, 16), (1, 2), False, True],
[(3, 3, 4), (0,), True, True],
[(3, 3, 4), (1,), False, True],
[(8, 5), (0,), False, True],
[(8, 5), (1,), True, True],
# mean to depthwise
[(1, 8, 16, 16), (1,), True, False],
[(1, 8, 16, 16), (2,), True, False],
[(1, 8, 16, 16), (1, 2), False, False],
[(8, 4), (0,), False, False],
],
)
def test_mean(ifm_shape, axis, keep_dims, use_same_quantization):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def create_relay_graph_with_same_quantization():
ifm = relay.var("input", shape=ifm_shape, dtype=dtype)
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=axis, keepdims=keep_dims)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(requantize), requantize)
mod = tvm.IRModule.from_expr(func)
return mod
def verify(ext_func):
out_var = ext_func.body
next_op = out_var
mul_op = None
pooling_op = None
depthwise_op = None
if (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
if util.is_named_ethosu_op(next_op, "binary_elementwise"):
mul_op = next_op
next_op = next_op.args[0]
if util.is_named_ethosu_op(next_op, "pooling"):
pooling_op = next_op
next_op = next_op.args[0]
if util.is_named_ethosu_op(next_op, "depthwise_conv2d"):
depthwise_op = next_op
next_op = next_op.args[0]
while (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
in_var = next_op
def calculate_expected_output_shape():
for i in range(len(ifm_shape)):
if i in axis:
if keep_dims:
yield 1
else:
yield ifm_shape[i]
out_shape = tuple(calculate_expected_output_shape())
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
assert tuple(out_var.checked_type.shape) == out_shape
assert out_var.checked_type.dtype == dtype
# check expected legalization case
if axis in [(1, 2), (2, 1), (0, 1), (1, 0)] and keep_dims and dtype == "int8":
assert depthwise_op and mul_op
assert mul_op.attrs.operator_type == "MUL"
elif pooling_op:
attrs = pooling_op.attrs
assert (
attrs.ifm_scale == attrs.ofm_scale and attrs.ifm_zero_point == attrs.ofm_zero_point
)
else:
assert depthwise_op
assert not mul_op
rewriter = legalize.MeanRewriter()
pattern_table = [
(
ethosu.MeanParams.composite_name,
ethosu.mean_pattern(),
lambda pat: ethosu.MeanParams(pat).is_valid(),
),
]
mod = (
create_relay_graph_with_same_quantization()
if use_same_quantization
else create_tflite_graph()
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"shapes, axis",
[
([(2, 3), (4, 3)], 0),
([(10, 2, 1), (10, 14, 1)], 1),
([(10,), (13,), (14,)], 0),
([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),
],
)
def test_tflite_concat_legalize(shapes, axis):
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, shapes, axis):
op = tf.concat(shapes, axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
[tf.TensorSpec(shape, tf.float32) for shape in shapes], axis
)
def representative_dataset():
for _ in range(100):
datas = [np.random.rand(*shape) for shape in shapes]
yield [data.astype(np.float32) for data in datas]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
new_concat_axis = np.sum(shape[axis] for shape in shapes)
out_shape = list(shapes[0])
out_shape[axis] = new_concat_axis
op = ext_func.body
for i, _ in enumerate(shapes):
assert list(op.args[0][i].checked_type.shape) == list(shapes[i])
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == "int8"
concat_pattern_table = [
(
ethosu.ConcatParams.composite_name,
ethosu.concat_pattern(),
lambda pat: ethosu.ConcatParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={("ifm" + str(i)): shape for i, shape in enumerate(shapes)},
dtype_dict={("ifm" + str(i)): "int8" for i, _ in enumerate(shapes)},
)
mod = partition_ethosu_by_table(relay_module, concat_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ConcatRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_sigmoid_legalize():
dtype = "int8"
ifm_shape = (1, 237, 91, 7)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def sigmoid_func(self, x):
op = tf.math.sigmoid(x)
return op
model = Model()
concrete_func = model.sigmoid_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_output_type = tf.int8
converter.inference_input_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.SigmoidRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "SIGMOID"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), 3, 2),
((4, 6, 8), 2, 0),
((5, 15), 3, 1),
((3, 7), 1, 1),
((100,), 25, 0),
],
)
def test_tflite_split_legalize(ifm_shape, num_or_size_splits, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, num_or_size_splits, axis):
op = tf.split(x, num_or_size_splits, axis=axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
# dig out the split
single_output_split = num_or_size_splits == 1
split = (
ext_func.body.tuple_value
if single_output_split
else ext_func.body.args[0][0].args[0].tuple_value
)
assert split.op.name == "split"
# Split is specified by number of equal chunks
assert split.attrs.indices_or_sections == num_or_size_splits
assert split.attrs.axis == axis
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PartitionedSplitRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), (1, 3, 4), 3),
((10, 18, 4), (1, 4, 3, 2), 0),
((22, 7), (4, -1), 1),
((25,), (25,), 0),
],
)
def test_tflite_split_v_legalize(ifm_shape, num_or_size_splits, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, num_or_size_splits, axis):
# TF split gets converted into TFLite's split_v
op = tf.split(x, num_or_size_splits, axis=axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
# dig out the split
single_output_split = len(num_or_size_splits) == 1
split = (
ext_func.body.tuple_value
if single_output_split
else ext_func.body.args[0][0].args[0].tuple_value
)
assert split.op.name == "split"
# Split is specified by the size of sections, so converting num_or_size_splits
# into the indices where the tensor is split at since this is how split is represented
# in Relay
split_sections = [] if single_output_split else [num_or_size_splits[0]]
for split_size in num_or_size_splits[1:-1]:
sec = split_sections[-1] + split_size
split_sections.append(sec)
assert list(split.attrs.indices_or_sections) == split_sections
assert split.attrs.axis == axis
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PartitionedSplitRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp",
[[(1, 8, 8, 3), 1.0, 0, 1.0, 0], [(1, 20, 30, 3), 1.345, 34, 0.32, -23]],
)
def test_ethosu_requantize(ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):
dtype = "int8"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
requantize = relay.qnn.op.requantize(
ifm,
relay.const(ifm_scale, dtype="float32"),
relay.const(ifm_zp, dtype="int32"),
relay.const(ofm_scale, dtype="float32"),
relay.const(ofm_zp, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
def verify(ext_func):
op = ext_func.body
# Check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
# Check OFM
ofm = op.checked_type
assert list(ofm.shape) == list(ifm_shape)
assert str(ofm.dtype) == dtype
# Check quantization params
assert math.isclose(op.attrs.ifm_scale, ifm_scale, abs_tol=1e-7)
assert op.attrs.ifm_zero_point == ifm_zp
assert math.isclose(op.attrs.ofm_scale, ofm_scale, abs_tol=1e-7)
assert op.attrs.ofm_zero_point == ofm_zp
rewriter = legalize.RequantizeRewriter()
pattern_table = [
(
ethosu.RequantizeParams.composite_name,
ethosu.requantize_pattern(),
lambda pat: ethosu.RequantizeParams(pat).is_valid(),
),
]
mod = create_model()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_multiple_requantize_offload():
"""
Testing requantize offload in the case one requantize operation is part of
an existing pattern (in this case Mean: cast->mean->requantize) and the
other is a stand-alone requantize.
"""
def create_model():
ifm = relay.var("input", shape=(1, 3, 3, 4), dtype="int8")
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=1, keepdims=True)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
requantize = relay.qnn.op.requantize(
requantize,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
def verify(ext_func):
# If mean operation and separate requantize were offloaded correctly,
# there should only be a pooling operation followed by an identity
# operation leagalized.
op = ext_func.body
assert op.op.name == "contrib.ethosu.identity"
op = op.args[0]
assert ext_func.body.args[0].op.name == "contrib.ethosu.pooling"
op = op.args[0]
assert isinstance(op, relay.Var)
mod = create_model()
mod = ethosu.partition_for_ethosu(mod)
mod = legalize.LegalizeEthosU()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape,axis", [((2,), 0), ((1, 3, 3), 2)])
def test_tflite_expand_dims(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.expand_dims(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
expected_shape = list(ifm_shape)
expected_shape.insert(axis, 1)
# Check IFM
assert list(op.args[0].checked_type.shape) == list(ifm_shape)
assert op.args[0].checked_type.dtype == dtype
# Check OFM
assert list(op.checked_type.shape) == expected_shape
assert op.checked_type.dtype == dtype
# Check op
assert op.op.name == "reshape"
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ExpandDimsRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,axis", [((1, 1, 2, 1), 0), ((1, 3, 3, 1), 3), ((1, 1, 2, 1), None)]
)
def test_tflite_squeeze(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.squeeze(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
expected_shape = list(ifm_shape)
if isinstance(axis, int):
expected_shape = ifm_shape[:axis] + ifm_shape[axis + 1 :]
else:
expected_shape = list(filter(lambda a: a != 1, expected_shape))
# Check IFM
assert list(op.args[0].checked_type.shape) == list(ifm_shape)
assert op.args[0].checked_type.dtype == dtype
# Check OFM
assert list(op.checked_type.shape) == list(expected_shape)
assert op.checked_type.dtype == dtype
# Check op
assert op.op.name == "reshape"
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.SqueezeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,size",
[
[(1, 2, 2, 1), (4, 4)],
[(1, 4, 7, 3), (8, 14)],
[(1, 3, 5, 3), (3, 5)],
],
)
def test_tflite_resize2d_nearest_neighbor(ifm_shape, size):
align_corners = False
dtype = "int8"
def create_tflite_graph():
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_nearest_neighbor(
x, size, align_corners=align_corners, half_pixel_centers=False
)
concrete_func = resize_model.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def verify(ext_func):
op = ext_func.body
in_var = op.args[0]
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
attrs = dict(op.attrs)
out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])
assert tuple(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
# Check Op attributes
if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:
assert op.op.name == "contrib.ethosu.identity"
else:
assert attrs["pooling_type"] == "AVG"
assert attrs["upscale"] == "NEAREST"
rewriter = legalize.Resize2dRewriter()
pattern_table = [
(
ethosu.Resize2dParams.composite_name,
ethosu.resize2d_pattern(),
lambda pat: ethosu.Resize2dParams(pat).is_valid(),
),
]
mod = create_tflite_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,size,align_corners",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 4, 7, 3), (8, 14), False],
[(1, 2, 2, 1), (3, 3), True],
[(1, 4, 7, 3), (7, 13), True],
[(1, 3, 5, 3), (3, 5), False],
],
)
def test_tflite_resize2d_bilinear(ifm_shape, size, align_corners):
dtype = "int8"
def create_tflite_graph():
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_bilinear(
x, size, align_corners=align_corners, half_pixel_centers=False
)
concrete_func = resize_model.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def verify(ext_func):
op = ext_func.body
in_var = op.args[0]
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
attrs = dict(op.attrs)
out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])
assert tuple(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
# Check Op attributes
if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:
assert op.op.name == "contrib.ethosu.identity"
else:
assert attrs["pooling_type"] == "AVG"
assert attrs["upscale"] == "NEAREST"
# Check padding
if align_corners:
assert list(attrs["padding"]) == [0, 0, 0, 0]
else:
assert list(attrs["padding"]) == [0, 0, 1, 1]
rewriter = legalize.Resize2dRewriter()
pattern_table = [
(
ethosu.Resize2dParams.composite_name,
ethosu.resize2d_pattern(),
lambda pat: ethosu.Resize2dParams(pat).is_valid(),
),
]
mod = create_tflite_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,ofm_shape,kernel_shape,padding",
[
[(1, 2, 2, 1), (1, 4, 4, 1), (3, 3), "SAME"],
[(1, 2, 2, 1), (1, 9, 9, 1), (7, 7), "VALID"],
[(1, 2, 4, 3), (1, 4, 8, 3), (3, 3), "SAME"],
[(1, 10, 5, 3), (1, 21, 13, 3), (3, 5), "VALID"],
],
)
@pytest.mark.parametrize("has_bias", [False, True])
def test_tflite_transpose_convolution(ifm_shape, ofm_shape, kernel_shape, padding, has_bias):
dtype = "int8"
dilations = (1, 1)
strides = (2, 2)
def create_tflite_graph():
@tf.function
def conv2d_transpose(x):
bias_shape = ofm_shape[3]
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], ofm_shape[3]]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d_transpose(
x,
weight,
output_shape=ofm_shape,
strides=tf_strides,
padding=padding,
dilations=dilations,
)
if has_bias:
op = tf.nn.bias_add(op, bias)
return op
concrete_func = conv2d_transpose.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod, params
def verify(ext_func):
strided_slice = ext_func.body
conv = strided_slice.args[0]
ofm_channels = conv.attrs.ofm_channels
# Check IFM
ifm = conv.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# Check OFM
ofm = strided_slice.checked_type
assert list(ofm.shape) == list(ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# Check weights
weights_ohwi = conv.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert list(weights_ohwi.shape) == [
ofm_channels,
kernel_shape[0],
kernel_shape[1],
ifm_shape[3],
]
# Check that scale_bias matches weight tensor
assert list(conv.args[2].checked_type.shape)[0] == ofm_channels
# Calculate expected padding for conv2d op
if padding == "VALID":
expected_padding = [0, 0, 0, 0]
elif padding == "SAME":
pad_top, pad_bottom = get_pad_value(ofm_shape[1], kernel_shape[0], strides[0])
pad_left, pad_right = get_pad_value(ofm_shape[2], kernel_shape[1], strides[1])
expected_padding = [pad_top, pad_left, pad_bottom, pad_right]
pad_top = kernel_shape[0] - 1 - expected_padding[0]
pad_left = kernel_shape[1] - 1 - expected_padding[1]
pad_bottom = kernel_shape[0] - 1 - expected_padding[2]
pad_right = kernel_shape[1] - 1 - expected_padding[3]
if strides == [2, 2]:
pad_bottom -= 1
pad_right -= 1
expected_padding = [pad_top, pad_left, pad_bottom, pad_right]
assert list(conv.attrs.padding) == list(expected_padding)
assert list(conv.attrs.strides) == [1, 1]
rewriter = legalize.Conv2DTransposeRewriter()
pattern_table = [
(
ethosu.QnnConv2DTransposeParams.composite_name,
ethosu.qnn_conv2d_transpose_pattern(),
lambda pat: ethosu.QnnConv2DTransposeParams(pat).is_valid(),
),
]
mod, params = create_tflite_graph()
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shapes,axis",
[
([(1, 2, 2), (1, 2, 2), (1, 2, 2)], 2),
([(5, 4), (5, 4)], 1),
([(1,), (1,)], 0),
([(3, 1), (3, 1), (3, 1), (3, 1)], 0),
],
)
def test_tflite_pack(ifm_shapes, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, inputs, axis):
return tf.stack(inputs, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
[tf.TensorSpec(shape, tf.float32) for shape in ifm_shapes], axis
)
def representative_dataset():
for _ in range(100):
datas = [np.random.rand(*shape) for shape in ifm_shapes]
yield [data.astype(np.float32) for data in datas]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
new_pack_axis = len(ifm_shapes)
ifm_shape = list(ifm_shapes[0])
op = ext_func.body
after_reshape = ifm_shape[:axis] + [1] + ifm_shape[axis:]
out_shape = ifm_shape[:axis] + [new_pack_axis] + ifm_shape[axis:]
assert op.op.name == "concatenate"
# Check shapes after expand_dims (legalized as reshape)
for i in range(len(ifm_shapes)):
assert list(op.args[0][i].checked_type.shape) == after_reshape
assert op.args[0][i].checked_type.dtype == dtype
# Check output
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
pack_pattern_table = [
(
ethosu.ConcatParams.composite_name,
ethosu.concat_pattern(),
lambda pat: ethosu.ConcatParams(pat).is_valid(),
),
(
ethosu.ExpandDimsParams.composite_name,
ethosu.expand_dims_pattern(),
lambda pat: ethosu.ExpandDimsParams(pat).is_valid(),
),
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={("ifm" + str(i)): shape for i, shape in enumerate(ifm_shapes)},
dtype_dict={("ifm" + str(i)): dtype for i, _ in enumerate(ifm_shapes)},
)
mod = partition_ethosu_by_table(relay_module, pack_pattern_table)
seq = [
legalize.ConcatRewriter(),
legalize.ExpandDimsRewriter(),
legalize.ReshapeRewriter(),
legalize.NoOpRewriter(),
]
for legalizer in seq:
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalizer, mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,axis",
[[(1, 2, 3, 4), 1], [(2, 3), 1], [(5, 6, 7), 2]],
)
def test_tflite_unpack(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, axis):
return tf.unstack(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
outputs = ext_func.body.args[0].fields
shape = list(ifm_shape)
unpacked_shape = shape[:axis] + shape[axis + 1 :]
split_shape = shape[:axis] + [1] + shape[axis + 1 :]
assert len(outputs) == shape[axis]
for i, output in enumerate(outputs):
expr = output.args[0].args[0]
expr = expr.tuple_value[expr.index]
expr = expr.args[0]
# Checking expected unpacked output shape.
# Squeeze is legalized to a reshape.
assert expr.op.name == "reshape"
assert list(expr.checked_type.shape) == unpacked_shape
assert output.checked_type.dtype == dtype
expr = expr.args[0]
expr = expr.tuple_value[expr.index]
expr = expr.args[0]
# Check input is split correctly
assert list(expr.args[0].checked_type.shape) == shape
assert list(expr.checked_type.shape) == split_shape
assert expr.checked_type.dtype == dtype
# Check split attrs
begin_shape = [0] * len(ifm_shape)
begin_shape[axis] = i
assert list(expr.attrs.begin) == begin_shape
end_shape = shape[:axis] + [i + 1] + shape[axis + 1 :]
assert list(expr.attrs.end) == end_shape
assert list(expr.attrs.strides) == [1]
pack_pattern_table = [
(
ethosu.SplitParams.composite_name,
ethosu.split_pattern(),
lambda pat: ethosu.SplitParams(pat).is_valid(),
),
(
ethosu.SqueezeParams.composite_name,
ethosu.squeeze_pattern(),
lambda pat: ethosu.SqueezeParams(pat).is_valid(),
),
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = partition_ethosu_by_table(mod, pack_pattern_table)
seq = [
legalize.PartitionedSplitRewriter(),
legalize.SplitRewriter(),
legalize.SqueezeRewriter(),
legalize.ReshapeRewriter(),
legalize.NoOpRewriter(),
]
for legalizer in seq:
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalizer, mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 15, 15, 3), (1, 8, 9, 1)])
@pytest.mark.parametrize("alpha", [0.2, 0.634])
def test_tflite_leaky_relu(ifm_shape, alpha):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def leaky_relu_func(self, x):
return tf.nn.leaky_relu(x, alpha=alpha)
model = Model()
concrete_func = model.leaky_relu_func.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32),
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
func_body = ext_func.body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "LUT"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.LeakyReLURewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 14), (1, 151)])
@pytest.mark.parametrize("ofm_channels", [32, 64])
@pytest.mark.parametrize("use_bias", [True, False])
@pytest.mark.parametrize("activation_function", ["RELU", "NONE"])
def test_tflite_fully_connected(
ifm_shape,
ofm_channels,
use_bias,
activation_function,
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def fully_connected(self, x):
bias_shape = ofm_channels
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
w = tf.constant(
np.random.uniform(size=[ifm_shape[1], ofm_channels]),
dtype=tf.float32,
)
x = tf.matmul(x, w)
if use_bias:
x = tf.nn.bias_add(x, bias)
if activation_function:
x = tf.nn.relu(x)
return x
model = Model()
concrete_func = model.fully_connected.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body.args[0]
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == [1, 1] + list(ifm_shape)
assert str(ifm.dtype) == dtype
# check OFM
ofm = op.checked_type
assert list(ofm.shape) == [1, 1, 1, ofm_channels]
assert str(ofm.dtype) == dtype
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert list(weights_ohwi.shape) == [ofm_channels, 1, 1, ifm_shape[1]]
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == [0, 0, 0, 0]
assert list(op.attrs.strides) == [1, 1]
assert list(op.attrs.dilation) == [1, 1]
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
fc_pattern_table = [
(
ethosu.FullyConnectedParams.composite_name,
ethosu.qnn_fc_pattern(),
lambda pat: ethosu.FullyConnectedParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, fc_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], fc_params)
mod = partition_ethosu_by_table(mod, fc_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.FullyConnectedRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 5, 5, 3), (1, 12, 9, 1)])
def test_tflite_hard_swish(ifm_shape):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.keras.layers.Lambda(
lambda x: x * tf.keras.activations.relu(x + 3.0, max_value=6.0) / 6.0
)(x)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.HardSwishRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "LUT"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_legalize_no_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tensorflow as tf
import tflite.Model
from tvm import relay
from tvm.relay.backend.contrib.ethosu import legalize
from tvm.relay.op.contrib import ethosu
from tvm.relay.build_module import bind_params_by_name
# There's a bug in TFLite converter which doesn't allow us to create single operator
# reshape and strided_slice graphs, so in order to have some testing coverage for these
# operators starting from TFLite, we test them alongside other operators
def test_tflite_reshape_and_strided_slice():
dtype = "int8"
ifm_shape = [1, 8, 3, 6]
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def model_func(self, x):
weight_shape = [3, 3, 6, 1] # HWO1
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.depthwise_conv2d(x, weight, strides=[1, 1, 1, 1], padding="SAME")
op = tf.nn.relu(op)
op = tf.reshape(op, [1, 8, 6, 3])
op = tf.nn.pool(op, [2, 2], "MAX")
op = tf.strided_slice(op, [0, 2, 3, 1], [1, 6, 5, 2])
return op
model = Model()
concrete_func = model.model_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(func):
# This TFLite graph gets lowered into
# deptwhise_conv2d -> clip -> reshape -> max_pool -> strided_slice -> reshape
# which gets legalized into ethosu_depthwise_conv2d -> reshape -> ehtosu_identity
# -> ethosu_pooling -> strided_slice -> identity -> reshape -> identity
identity3 = func.body
reshape2 = identity3.args[0]
identity2 = reshape2.args[0]
strided_slice = identity2.args[0]
max_pool = strided_slice.args[0]
identity1 = max_pool.args[0]
reshape1 = identity1.args[0]
depthwise_conv2d = reshape1.args[0]
assert identity3.op.name == "contrib.ethosu.identity"
assert reshape2.op.name == "reshape"
assert identity2.op.name == "contrib.ethosu.identity"
assert strided_slice.op.name == "strided_slice"
assert max_pool.op.name == "contrib.ethosu.pooling"
assert identity1.op.name == "contrib.ethosu.identity"
assert reshape1.op.name == "reshape"
assert depthwise_conv2d.op.name == "contrib.ethosu.depthwise_conv2d"
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = ethosu.partition_for_ethosu(mod)
mod = legalize.LegalizeEthosU()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_lookup_table.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tflite.Model
import tvm
import tensorflow as tf
from tvm import relay
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.relay.build_module import bind_params_by_name # type: ignore
from . import infra
ACCEL_TYPES = ["ethos-u55-256", "ethos-u55-128", "ethos-u55-64", "ethos-u55-32"]
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_tflite_lut_activations(accel_type):
dtype = "int8"
ifm_shape = (1, 55, 55, 3)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_func(self, x):
weight_shape = (3, 3, ifm_shape[3], 4)
weight = tf.constant(
np.random.uniform(low=0, high=0.3, size=weight_shape), dtype=tf.float32
)
# The input strides to the TensorFlow API needs to be of shape 1x4
op = tf.nn.conv2d(x, weight, strides=(1, 2, 2, 1), padding="SAME", dilations=(1, 1))
op = tf.nn.tanh(op)
op = tf.nn.tanh(op)
weight_shape2 = (2, 3, 4, 1)
weight2 = tf.constant(
np.random.uniform(low=0, high=0.3, size=weight_shape2), dtype=tf.float32
)
op = tf.nn.depthwise_conv2d(
op, weight2, strides=(1, 1, 1, 1), padding="VALID", dilations=(2, 2)
)
op = tf.nn.sigmoid(op)
op = tf.nn.max_pool(op, (1, 1), strides=(1, 1, 1, 1), padding="SAME")
op = tf.nn.tanh(op)
return op
model = Model()
concrete_func = model.tf_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = 0.7 * np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = partition_for_ethosu(relay_module, params)
# Generate reference data
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
test_runner = infra.create_test_runner(accel_type)
compiled_models = infra.build_source(
mod,
input_data,
output_data,
test_runner,
)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
infra.print_payload(cmms)
infra.verify_source(compiled_models, test_runner)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_random_lut(accel_type):
dtype = "int8"
ifm_shape = (1, 55, 55, 3)
lut_data = np.random.randint(-128, high=127, size=[256])
lut_data_map = {idx: lut_data[idx + 128] for idx in range(-128, 128)}
in_data = np.random.randint(-128, high=127, size=ifm_shape, dtype=dtype)
out_data = np.array([lut_data_map[i] for i in in_data.ravel()]).reshape(ifm_shape).astype(dtype)
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm0 = relay.var("ifm0", shape=ifm_shape, dtype=dtype)
lut1 = relay.var("lut1", shape=(256,), dtype="uint8")
identity = infra.make_ethosu_identity(ifm0, lut=lut1, activation="LUT")
glb_ethosu = relay.GlobalVar("tvmgen_default_ethos_u_main_0")
func = (
relay.Function([ifm0, lut1], identity)
.with_attr("Inline", 1)
.with_attr("Compiler", "ethos-u")
.with_attr("global_symbol", "tvmgen_default_ethos_u_main_0")
.with_attr("Primitive", 1)
)
params = {"lut1": tvm.nd.array(lut_data.astype("uint8"))}
func = bind_params_by_name(func, params)
mod = tvm.IRModule()
mod[glb_ethosu] = func
mod = relay.transform.InferType()(mod)
call = relay.Call(glb_ethosu, [ifm])
mod["main"] = relay.Function([ifm], call)
mod = relay.transform.InferType()(mod)
test_runner = infra.create_test_runner(accel_type)
compiled_models = infra.build_source(
mod,
{"ifm": in_data},
{"output": out_data},
test_runner,
)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
infra.print_payload(cmms)
infra.verify_source(compiled_models, test_runner)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_lower_to_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_te
from tvm.relay.backend.contrib.ethosu.tir.scheduler import OperatorCompute
import tvm.relay.backend.contrib.ethosu.op as ethosu_ops
def test_ethosu_conv2d():
ifm = relay.var("ifm", shape=(1, 10, 20, 30), dtype="uint8")
weight = relay.var("weight", shape=(40, 3, 3, 30), dtype="uint8")
scale_bias = relay.var("scale_bias", shape=(40, 10), dtype="uint8")
lut = relay.var("lut", shape=(), dtype="uint8")
conv = ethosu_ops.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut,
ifm_scale=0.5,
ifm_zero_point=10,
weight_zero_point=12,
ofm_scale=0.25,
ofm_zero_point=14,
ofm_channels=40,
padding=(1, 1, 1, 1),
kernel_shape=(3, 3),
strides=(1, 1),
dilation=(1, 1),
)
expr = relay.Function(relay.analysis.free_vars(conv), conv)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
lowered = lower_to_te(mod["main"])
assert len(lowered.outputs) == 1
assert len(lowered.inputs) == 4
conv2d_compute = OperatorCompute.from_output(lowered.outputs[0])
assert conv2d_compute.op.name == "ethosu_conv2d"
input_shapes = set()
for inp in lowered.inputs:
input_shapes.add(tuple([x.value for x in inp.shape]))
assert input_shapes == {(40, 10), (1, 10, 20, 30), (40, 3, 3, 30), ()}
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_lut_optimizer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test the pass that removes unnecssary identity operation if the identity
uses LUT and the preceding operator is LUT capable and doesn't already have a LUT.
"""
import pytest
pytest.importorskip("ethosu.vela")
import tensorflow as tf
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import LUTsOptimizer
from tvm.relay.backend.contrib.ethosu.codegen import relay_to_tir
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from . import infra
def test_merge_lut_into_conv():
"""If an operator that has a LUT attribute is followed by an identity operator
with LUT, we can merge the two operataors."""
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
lut1 = relay.const([i for i in range(256)], dtype="int8")
lut2 = relay.const([i for i in reversed(range(256))], dtype="int8")
def before():
conv1 = infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
id1 = infra.make_ethosu_identity(conv1, lut=lut1, activation="TANH")
conv2 = infra.make_ethosu_conv2d(id1, 4, 7, (2, 2), (1, 1), (1, 1), (1, 1))
id2 = infra.make_ethosu_identity(conv2, lut=lut2, activation="SIGMOID")
func = relay.Function(relay.analysis.free_vars(id2), id2)
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
return mod
def after():
conv1 = infra.make_ethosu_conv2d(
ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1), lut=lut1, activation="TANH"
)
conv2 = infra.make_ethosu_conv2d(
conv1, 4, 7, (2, 2), (1, 1), (1, 1), (1, 1), lut=lut2, activation="SIGMOID"
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod
mod = LUTsOptimizer()(before())
mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod, after())
def test_multiple_luts():
"""Test that when an operation already has a LUT, we don't overwrite that LUT"""
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
lut1 = relay.const([i for i in range(256)], dtype="int8")
lut2 = relay.const([i for i in reversed(range(256))], dtype="int8")
def before():
conv1 = infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
id1 = infra.make_ethosu_identity(conv1, lut=lut1, activation="TANH")
id2 = infra.make_ethosu_identity(id1, lut=lut2, activation="TANH")
func = relay.Function(relay.analysis.free_vars(id2), id2)
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
return mod
def after():
conv1 = infra.make_ethosu_conv2d(
ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1), lut=lut1, activation="TANH"
)
id2 = infra.make_ethosu_identity(conv1, lut=lut2, activation="TANH")
func = relay.Function(relay.analysis.free_vars(id2), id2)
func = func.with_attr("Compiler", "ethos-u")
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod
mod = LUTsOptimizer()(before())
mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod, after())
def test_lut_optimizer_runs_in_compilation_pipeline():
"""Test that the LUT optimization pass runs as part of the NPU compilation pipeline."""
ifm_shape = (1, 4, 4, 4)
@tf.function
def get_graph(x):
weight1 = tf.constant(np.random.uniform(size=(1, 1, 4, 4)), dtype=tf.float32)
op = tf.nn.conv2d(x, weight1, (1, 1), "VALID")
op = tf.nn.tanh(op)
weight2 = tf.constant(np.random.uniform(size=(1, 1, 4, 1)), dtype=tf.float32)
op = tf.nn.depthwise_conv2d(op, weight2, (1, 1, 1, 1), "VALID")
return tf.nn.tanh(op)
mod, _ = infra.get_tflite_graph(get_graph, [ifm_shape])
mod = partition_for_ethosu(mod)
mod = relay_to_tir(mod)
external_gv_name = mod["main"].body.op.name_hint
prim_func = mod[external_gv_name]
# Check for hints in the TIR prim func that the LUT optimization pass has ran.
# If the module was optimized, there should be no identity operations.
def check_identity(stmt):
if isinstance(stmt, tvm.tir.expr.Call):
assert stmt.args[0] != "ethosu_identity"
tvm.tir.stmt_functor.post_order_visit(prim_func.body, check_identity)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_merge_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm.relay.backend.contrib.ethosu.tir.passes import MergeConstants
import numpy as np
def check_const_dictionaries(const_dict, new_const_dict):
assert list(const_dict) == list(new_const_dict)
for key, value in const_dict.items():
new_value = new_const_dict[key]
assert len(value) == len(new_value)
for i in range(len(value)):
assert value[i] == new_value[i]
def test_only_one_operator():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer2: T.Buffer[(128,), "uint8"], buffer3: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1_data = T.allocate([128], "uint8", "global")
p1 = T.buffer_decl([128], "uint8", data=p1_data)
p4_data = T.allocate([32], "uint8", "global")
p4 = T.buffer_decl([32], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer2: T.Buffer[(160,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p4_data = T.allocate([160], "uint8", "global")
p4 = T.buffer_decl([160], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 160, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p4[0], 128, 12, p4[128], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
0: np.array([0, 10], dtype=np.uint8),
1: np.array([1, 11], dtype=np.uint8),
}
new_const_dict = {0: np.concatenate((const_dict[0], const_dict[1]))}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_all_operators_with_weights():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer2: T.Buffer[(128,), "uint8"], buffer3: T.Buffer[(32,), "uint8"], buffer4: T.Buffer[(112,), "uint8"], buffer5: T.Buffer[(32,), "uint8"], buffer6: T.Buffer[(112,), "uint8"], buffer7: T.Buffer[(32,), "uint8"], buffer8: T.Buffer[(112,), "uint8"], buffer9: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1_data = T.allocate([128], "uint8", "global")
p1 = T.buffer_decl([128], "uint8", data=p1_data)
p2_data = T.allocate([112], "uint8", "global")
p2 = T.buffer_decl([112], "uint8", data=p2_data)
p3_data = T.allocate([112], "uint8", "global")
p3 = T.buffer_decl([112], "uint8", data=p3_data)
p4_data = T.allocate([32], "uint8", "global")
p4 = T.buffer_decl([32], "uint8", data=p4_data)
p5_data = T.allocate([32], "uint8", "global")
p5 = T.buffer_decl([32], "uint8", data=p5_data)
p6_data = T.allocate([32], "uint8", "global")
p6 = T.buffer_decl([32], "uint8", data=p6_data)
p7_data = T.allocate([112], "uint8", "global")
p7 = T.buffer_decl([112], "uint8", data=p7_data)
p8_data = T.allocate([3], "uint8", "global")
p8 = T.buffer_decl([3], "uint8", data=p8_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 112, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 112, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 32, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, 12, p5[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 112, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer9[0], 32, p8[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 112, 12, p6[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer2: T.Buffer[(160,), "uint8"], buffer4: T.Buffer[(144,), "uint8"], buffer6: T.Buffer[(144,), "uint8"], buffer8: T.Buffer[(144,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p4_data = T.allocate([160], "uint8", "global")
p4 = T.buffer_decl([160], "uint8", data=p4_data)
p7_data = T.allocate([144], "uint8", "global")
p7 = T.buffer_decl([144], "uint8", data=p7_data)
p10_data = T.allocate([144], "uint8", "global")
p10 = T.buffer_decl([144], "uint8", data=p10_data)
p11_data = T.allocate([144], "uint8", "global")
p11 = T.buffer_decl([144], "uint8", data=p11_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 160, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 144, p7[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p4[0], 128, 12, p4[128], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 144, p10[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p7[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 144, p11[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p10[0], 112, 12, p10[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p11[0], 112, 12, p11[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
0: np.array([0], dtype=np.uint8),
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
3: np.array([3], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
6: np.array([6], dtype=np.uint8),
7: np.array([7], dtype=np.uint8),
}
new_const_dict = {
0: np.concatenate((const_dict[0], const_dict[1])),
1: np.concatenate((const_dict[2], const_dict[3])),
2: np.concatenate((const_dict[4], const_dict[5])),
3: np.concatenate((const_dict[6], const_dict[7])),
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_operators_with_and_without_weights():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer2: T.Buffer[(80,), "uint8"], buffer3: T.Buffer[(64,), "uint8"]) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer0 = T.buffer_decl([390336], "int8")
buffer1 = T.buffer_decl([97156], "int8")
buffer6 = T.buffer_decl([390336], "int8")
# body
p2_data = T.allocate([80], "uint8", "global")
p2 = T.buffer_decl([80], "uint8", data=p2_data)
p3_data = T.allocate([64], "uint8", "global")
p3 = T.buffer_decl([64], "uint8", data=p3_data)
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, buffer0[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 80, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 64, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, buffer0[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, buffer6[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p2[0], 80, 0, p3[0], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer2: T.Buffer[(144,), "uint8"]) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer0 = T.buffer_decl([390336], "int8")
buffer1 = T.buffer_decl([97156], "int8")
buffer6 = T.buffer_decl([390336], "int8")
# body
p3_data = T.allocate([144], "uint8", "global")
p3 = T.buffer_decl([144], "uint8", data=p3_data)
T.evaluate(T.call_extern("ethosu_pooling", "int8", 214, 227, 2, 214, 0, 227, buffer1[0], 0, 0, 0, T.float32(1), 0, "NHWC", 454, 2, 1, "int8", 214, 114, 2, 214, 0, 114, buffer0[0], 0, 0, 0, T.float32(1), 0, "NHCWB16", 1824, 16, 1, "MAX", 2, 1, 2, 1, 1, 1, 0, 0, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 144, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 214, 114, 2, 214, 0, 114, buffer0[0], 0, 0, 0, T.float32(0.00392157), -128, "NHCWB16", 1824, 16, 1, "int8", 214, 114, 5, 214, 0, 114, buffer6[0], 0, 0, 0, T.float32(0.0174839), -128, "NHCWB16", 1824, 16, 1, 3, 1, 1, 1, 1, 2, p3[0], 80, 0, p3[80], 64, 0, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
0: np.array([0], dtype=np.uint8),
1: np.array([1], dtype=np.uint8),
}
new_const_dict = {0: np.concatenate((const_dict[0], const_dict[1]))}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_copy_to_buffer_with_local_scope():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer1: T.Buffer[(64,), "uint8"],
buffer2: T.Buffer[(48,), "uint8"],
buffer3: T.Buffer[(256,), "uint8"],
buffer4: T.Buffer[(256,), "uint8"],
buffer5: T.Buffer[(16,), "uint8"],
buffer6: T.Buffer[(48,), "uint8"],
buffer7: T.Buffer[(256,), "uint8"],
buffer8: T.Buffer[(64,), "uint8"],
buffer9: T.Buffer[(256,), "int8"],
) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([48], "uint8", "global")
p1 = T.buffer_decl([48], "uint8", data=p1_data)
p2_data = T.allocate([48], "uint8", "global")
p2 = T.buffer_decl([48], "uint8", data=p2_data)
p3_data = T.allocate([256], "int8", "local")
p3 = T.buffer_decl([256], "int8", data=p3_data, scope="local")
p5_data = T.allocate([16], "uint8", "global")
p5 = T.buffer_decl([16], "uint8", data=p5_data)
p6_data = T.allocate([48], "uint8", "global")
p6 = T.buffer_decl([48], "uint8", data=p6_data)
p7_data = T.allocate([256], "int8", "local")
p7 = T.buffer_decl([256], "int8", data=p7_data, scope="local")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 48, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 48, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 256, p3[0], dtype="handle")) # Local
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 16, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 48, p6[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer1[0], 0, 0, 0, T.float32(0.00392081), -128, "NHWC", 16, 4, 1, "int8", 4, 4, 4, 4, 0, 4, buffer9[0], 0, 0, 0, T.float32(0.00839574), -128, "NHCWB16", 64, 16, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, 0, p2[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 256, p7[0], dtype="handle")) # Local
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer9[0], 0, 0, 0, T.float32(0.0078125), 0, "NHCWB16", 64, 16, 1, "int8", 4, 4, 4, 4, 0, 4, buffer8[0], 0, 0, 0, T.float32(0.00372155), -128, "NHWC", 16, 4, 1, 1, 1, 1, 1, 1, 1, p5[0], 16, 0, p6[0], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer1: T.Buffer[(64,), "uint8"],
buffer2: T.Buffer[(96,), "uint8"],
buffer4: T.Buffer[(256,), "uint8"],
buffer5: T.Buffer[(64,), "uint8"],
buffer7: T.Buffer[(256,), "uint8"],
buffer8: T.Buffer[(64,), "uint8"],
buffer9: T.Buffer[(256,), "int8"],
) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([96], "uint8", "global")
p1 = T.buffer_decl([96], "uint8", data=p1_data)
p2_data = T.allocate([64], "uint8", "global")
p2 = T.buffer_decl([64], "uint8", data=p2_data)
p3_data = T.allocate([256], "int8", "local")
p3 = T.buffer_decl([256], "int8", data=p3_data, scope="local")
p7_data = T.allocate([256], "int8", "local")
p7 = T.buffer_decl([256], "int8", data=p7_data, scope="local")
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 96, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 256, p3[0], dtype="handle")) # Local
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 64, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer1[0], 0, 0, 0, T.float32(0.00392081), -128, "NHWC", 16, 4, 1, "int8", 4, 4, 4, 4, 0, 4, buffer9[0], 0, 0, 0, T.float32(0.00839574), -128, "NHCWB16", 64, 16, 1, 1, 1, 1, 1, 1, 1, p1[0], 48, 0, p1[48], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 256, p7[0], dtype="handle")) # Local
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 4, 4, 4, 4, 0, 4, buffer9[0], 0, 0, 0, T.float32(0.0078125), 0, "NHCWB16", 64, 16, 1, "int8", 4, 4, 4, 4, 0, 4, buffer8[0], 0, 0, 0, T.float32(0.00372155), -128, "NHWC", 16, 4, 1, 1, 1, 1, 1, 1, 1, p2[0], 16, 0, p2[16], 48, 0, 0, 0, 0, "TANH", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
3: np.array([3], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
6: np.array([6], dtype=np.uint8),
}
new_const_dict = {
1: np.concatenate((const_dict[1], const_dict[2])),
2: const_dict[3],
3: np.concatenate((const_dict[4], const_dict[5])),
4: const_dict[6],
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_no_copies():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main() -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
placeholder = T.buffer_decl([20], "int8")
ethosu_write = T.buffer_decl([16], "int8")
# body
ethosu_write_4_data = T.allocate([16], "int8", "global")
ethosu_write_4 = T.buffer_decl([16], "int8", data=ethosu_write_4_data)
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 1, 4, 4, 1, 0, 4, placeholder[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "int8", 1, 4, 1, 1, 0, 4, placeholder[16], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 1, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "MAX", 0, "CLIP", -128, 127, "TFL", 1, 4, 4, dtype="handle"))
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main() -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
placeholder = T.buffer_decl([20], "int8")
ethosu_write = T.buffer_decl([16], "int8")
# body
ethosu_write_4_data = T.allocate([16], "int8", "global")
ethosu_write_4 = T.buffer_decl([16], "int8", data=ethosu_write_4_data)
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 1, 4, 4, 1, 0, 4, placeholder[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "int8", 1, 4, 1, 1, 0, 4, placeholder[16], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 1, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(0.00783747), -128, "NHWC", 1, 4, 1, "MAX", 0, "CLIP", -128, 127, "TFL", 1, 4, 4, dtype="handle"))
T.evaluate(T.call_extern("ethosu_identity", "int8", 1, 4, 4, 1, 0, 4, ethosu_write_4[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "int8", 1, 4, 4, 1, 0, 4, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1, 4, 1, "AVG", 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {}
new_const_dict = {}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_copies_to_the_same_buffer():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer2: T.Buffer[(128,), "uint8"], buffer3: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1_data = T.allocate([128], "uint8", "global")
p1 = T.buffer_decl([128], "uint8", data=p1_data)
p4_data = T.allocate([32], "uint8", "global")
p4 = T.buffer_decl([32], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer2: T.Buffer[(160,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p5_data = T.allocate([160], "uint8", "global")
p5 = T.buffer_decl([160], "uint8", data=p5_data)
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 160, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p5[0], 128, 12, p5[128], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 160, p5[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p5[0], 128, 12, p5[128], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
0: np.array([0], dtype=np.uint8),
1: np.array([1], dtype=np.uint8),
}
new_const_dict = {0: np.concatenate((const_dict[0], const_dict[1]))}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_read_from_the_same_buffer():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(368,), "uint8"], buffer2: T.Buffer[(96,), "uint8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
p1_data = T.allocate([368], "uint8", "global")
p1 = T.buffer_decl([368], "uint8", data=p1_data)
p2_data = T.allocate([96], "uint8", "global")
p2 = T.buffer_decl([96], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 368, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 96, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p2[0], 48, p2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(464,), "uint8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([464], "uint8", "global")
p1 = T.buffer_decl([464], "uint8", data=p1_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 464, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
const_dict = {
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
}
new_const_dict = {1: np.concatenate((const_dict[1], const_dict[2]))}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_arbitrary_argument_order():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(368,), "uint8"], buffer2: T.Buffer[(96,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"], buffer3: T.Buffer[(368,), "uint8"], buffer4: T.Buffer[(96,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
p1_data = T.allocate([368], "uint8", "global")
p1 = T.buffer_decl([368], "uint8", data=p1_data)
p2_data = T.allocate([96], "uint8", "global")
p2 = T.buffer_decl([96], "uint8", data=p2_data)
p3_data = T.allocate([368], "uint8", "global")
p3 = T.buffer_decl([368], "uint8", data=p3_data)
p4_data = T.allocate([96], "uint8", "global")
p4 = T.buffer_decl([96], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 368, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 96, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p2[0], 48, p2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 368, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 192, p3[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(464,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"], buffer2: T.Buffer[(464,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([464], "uint8", "global")
p1 = T.buffer_decl([464], "uint8", data=p1_data)
p2_data = T.allocate([464], "uint8", "global")
p2 = T.buffer_decl([464], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 464, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
const_dict = {
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
}
new_const_dict = {
1: np.concatenate((const_dict[1], const_dict[2])),
3: np.concatenate((const_dict[4], const_dict[5])),
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, False)
check_const_dictionaries(const_dict, new_const_dict)
def test_arbitrary_argument_order_const_split():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(368,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"], buffer2: T.Buffer[(96,), "uint8"], buffer3: T.Buffer[(368,), "uint8"], buffer4: T.Buffer[(96,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
p1_data = T.allocate([368], "uint8", "global")
p1 = T.buffer_decl([368], "uint8", data=p1_data)
p2_data = T.allocate([96], "uint8", "global")
p2 = T.buffer_decl([96], "uint8", data=p2_data)
p3_data = T.allocate([368], "uint8", "global")
p3 = T.buffer_decl([368], "uint8", data=p3_data)
p4_data = T.allocate([96], "uint8", "global")
p4 = T.buffer_decl([96], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 368, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 96, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p2[0], 48, p2[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 368, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 192, p3[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(464,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"], buffer2: T.Buffer[(464,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([464], "uint8", "global")
p1 = T.buffer_decl([464], "uint8", data=p1_data)
p2_data = T.allocate([464], "uint8", "global")
p2 = T.buffer_decl([464], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 464, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
const_dict = {
1: np.array([1], dtype=np.uint8),
3: np.array([3], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
}
new_const_dict = {
1: np.concatenate((const_dict[1], const_dict[3])),
3: np.concatenate((const_dict[4], const_dict[5])),
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_arbitrary_argument_order_const_split_mixed():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(368,), "uint8"], buffer2: T.Buffer[(368,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"], buffer3: T.Buffer[(96,), "uint8"], buffer4: T.Buffer[(96,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
T.preflattened_buffer(placeholder, [1, 16, 16, 32], dtype="int8", data=placeholder.data)
T.preflattened_buffer(ethosu_write, [1, 16, 16, 8], dtype="int8", data=ethosu_write.data)
# body
p1_data = T.allocate([368], "uint8", "global")
p1 = T.buffer_decl([368], "uint8", data=p1_data)
p2_data = T.allocate([368], "uint8", "global")
p2 = T.buffer_decl([368], "uint8", data=p2_data)
p3_data = T.allocate([96], "uint8", "global")
p3 = T.buffer_decl([96], "uint8", data=p3_data)
p4_data = T.allocate([96], "uint8", "global")
p4 = T.buffer_decl([96], "uint8", data=p4_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 368, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 96, p3[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p3[0], 48, p3[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 368, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 96, p4[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p4[0], 48, p4[48], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], buffer1: T.Buffer[(464,), "uint8"], buffer2: T.Buffer[(464,), "uint8"], ethosu_write: T.Buffer[(4096,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# body
p1_data = T.allocate([464], "uint8", "global")
p1 = T.buffer_decl([464], "uint8", data=p1_data)
p2_data = T.allocate([464], "uint8", "global")
p2 = T.buffer_decl([464], "uint8", data=p2_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 464, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 192, p1[192], 176, 12, p1[368], 48, p1[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 464, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 8, 32, 16, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 8, 8, 16, 0, 8, ethosu_write[2048], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 192, p2[192], 176, 12, p2[368], 48, p2[416], 48, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
const_dict = {
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
}
new_const_dict = {
1: np.concatenate((const_dict[1], const_dict[4])),
2: np.concatenate((const_dict[2], const_dict[5])),
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_cycle_count():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main(buffer2: T.Buffer[(128,), "uint8"], buffer3: T.Buffer[(32,), "uint8"], buffer4: T.Buffer[(112,), "uint8"], buffer5: T.Buffer[(32,), "uint8"], buffer6: T.Buffer[(112,), "uint8"], buffer7: T.Buffer[(32,), "uint8"], buffer8: T.Buffer[(112,), "uint8"], buffer9: T.Buffer[(32,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
v1a = T.var("int32")
v1b = T.var("int32")
v1c = T.var("int32")
v2a = T.var("int32")
v2b = T.var("int32")
v2c = T.var("int32")
v3a = T.var("int32")
v3b = T.var("int32")
v3c = T.var("int32")
v4a = T.var("int32")
v4b = T.var("int32")
v4c = T.var("int32")
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p1_data = T.allocate([128], "uint8", "global")
p1 = T.buffer_decl([128], "uint8", data=p1_data)
p2_data = T.allocate([112], "uint8", "global")
p2 = T.buffer_decl([112], "uint8", data=p2_data)
p3_data = T.allocate([112], "uint8", "global")
p3 = T.buffer_decl([112], "uint8", data=p3_data)
p4_data = T.allocate([32], "uint8", "global")
p4 = T.buffer_decl([32], "uint8", data=p4_data)
p5_data = T.allocate([32], "uint8", "global")
p5 = T.buffer_decl([32], "uint8", data=p5_data)
p6_data = T.allocate([32], "uint8", "global")
p6 = T.buffer_decl([32], "uint8", data=p6_data)
p7_data = T.allocate([112], "uint8", "global")
p7 = T.buffer_decl([112], "uint8", data=p7_data)
p8_data = T.allocate([3], "uint8", "global")
p8 = T.buffer_decl([3], "uint8", data=p8_data)
with T.attr(T.iter_var(v1a, None, "DataPar", ""), "pragma_compute_cycles_hint", 100):
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 128, p1[0], dtype="handle"))
with T.attr(T.iter_var(v1b, None, "DataPar", ""), "pragma_compute_cycles_hint", 101):
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 32, p4[0], dtype="handle"))
with T.attr(T.iter_var(v2a, None, "DataPar", ""), "pragma_compute_cycles_hint", 102):
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 112, p2[0], dtype="handle"))
with T.attr(T.iter_var(v2b, None, "DataPar", ""), "pragma_compute_cycles_hint", 103):
T.evaluate(T.call_extern("ethosu_copy", buffer5[0], 32, p5[0], dtype="handle"))
with T.attr(T.iter_var(v1c, None, "DataPar", ""), "pragma_compute_cycles_hint", 300):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p1[0], 128, 12, p4[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v3a, None, "DataPar", ""), "pragma_compute_cycles_hint", 104):
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 112, p3[0], dtype="handle"))
with T.attr(T.iter_var(v3b, None, "DataPar", ""), "pragma_compute_cycles_hint", 105):
T.evaluate(T.call_extern("ethosu_copy", buffer7[0], 32, p6[0], dtype="handle"))
with T.attr(T.iter_var(v2c, None, "DataPar", ""), "pragma_compute_cycles_hint", 301):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p2[0], 112, 12, p5[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v4a, None, "DataPar", ""), "pragma_compute_cycles_hint", 106):
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 112, p7[0], dtype="handle"))
with T.attr(T.iter_var(v4b, None, "DataPar", ""), "pragma_compute_cycles_hint", 107):
T.evaluate(T.call_extern("ethosu_copy", buffer9[0], 32, p8[0], dtype="handle"))
with T.attr(T.iter_var(v3c, None, "DataPar", ""), "pragma_compute_cycles_hint", 302):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p3[0], 112, 12, p6[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v4c, None, "DataPar", ""), "pragma_compute_cycles_hint", 303):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p8[0], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(buffer2: T.Buffer[(160,), "uint8"], buffer4: T.Buffer[(144,), "uint8"], buffer6: T.Buffer[(144,), "uint8"], buffer8: T.Buffer[(144,), "uint8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
v1a = T.var("int32")
v1c = T.var("int32")
v2a = T.var("int32")
v2c = T.var("int32")
v3a = T.var("int32")
v3c = T.var("int32")
v4a = T.var("int32")
v4c = T.var("int32")
buffer1 = T.buffer_decl([8192], "int8")
buffer10 = T.buffer_decl([2048], "int8")
# body
p4_data = T.allocate([160], "uint8", "global")
p4 = T.buffer_decl([160], "uint8", data=p4_data)
p7_data = T.allocate([144], "uint8", "global")
p7 = T.buffer_decl([144], "uint8", data=p7_data)
p10_data = T.allocate([144], "uint8", "global")
p10 = T.buffer_decl([144], "uint8", data=p10_data)
p11_data = T.allocate([144], "uint8", "global")
p11 = T.buffer_decl([144], "uint8", data=p11_data)
with T.attr(T.iter_var(v1a, None, "DataPar", ""), "pragma_compute_cycles_hint", 201):
T.evaluate(T.call_extern("ethosu_copy", buffer2[0], 160, p4[0], dtype="handle"))
with T.attr(T.iter_var(v2a, None, "DataPar", ""), "pragma_compute_cycles_hint", 205):
T.evaluate(T.call_extern("ethosu_copy", buffer4[0], 144, p7[0], dtype="handle"))
with T.attr(T.iter_var(v1c, None, "DataPar", ""), "pragma_compute_cycles_hint", 300):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p4[0], 128, 12, p4[128], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v3a, None, "DataPar", ""), "pragma_compute_cycles_hint", 209):
T.evaluate(T.call_extern("ethosu_copy", buffer6[0], 144, p10[0], dtype="handle"))
with T.attr(T.iter_var(v2c, None, "DataPar", ""), "pragma_compute_cycles_hint", 301):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p7[0], 112, 12, p7[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v4a, None, "DataPar", ""), "pragma_compute_cycles_hint", 213):
T.evaluate(T.call_extern("ethosu_copy", buffer8[0], 144, p11[0], dtype="handle"))
with T.attr(T.iter_var(v3c, None, "DataPar", ""), "pragma_compute_cycles_hint", 302):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p10[0], 112, 12, p10[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
with T.attr(T.iter_var(v4c, None, "DataPar", ""), "pragma_compute_cycles_hint", 303):
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, buffer1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, buffer10[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, p11[0], 112, 12, p11[112], 32, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
const_dict = {
0: np.array([0], dtype=np.uint8),
1: np.array([1], dtype=np.uint8),
2: np.array([2], dtype=np.uint8),
3: np.array([3], dtype=np.uint8),
4: np.array([4], dtype=np.uint8),
5: np.array([5], dtype=np.uint8),
6: np.array([6], dtype=np.uint8),
7: np.array([7], dtype=np.uint8),
}
new_const_dict = {
0: np.concatenate((const_dict[0], const_dict[1])),
1: np.concatenate((const_dict[2], const_dict[3])),
2: np.concatenate((const_dict[4], const_dict[5])),
3: np.concatenate((const_dict[6], const_dict[7])),
}
test_mod, const_dict = MergeConstants(const_dict)(InputModule)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod, reference_mod, True)
check_const_dictionaries(const_dict, new_const_dict)
def test_multiple_prim_funcs():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def main():
T.evaluate(0)
@T.prim_func
def abc():
T.evaluate(0)
# fmt: on
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the MergeConstants pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
MergeConstants({})(InputModule)
def test_no_main_prim_func():
# fmt: off
@tvm.script.ir_module
class InputModule:
@T.prim_func
def abs():
T.evaluate(0)
# fmt: on
err_rgx = (
r"Expected a single primitive function called 'main'. "
r"Please run the MergeConstants pass in conjunction with the LowerToTIR\(\) pass."
)
with pytest.raises(tvm.TVMError, match=err_rgx):
MergeConstants({})(InputModule)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, wrong-import-position
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.micro import model_library_format as mlf
from tvm import WorkspaceMemoryPools, WorkspacePoolInfo, PoolInfoProperties
import tvm
from tvm.testing.aot import convert_to_relay
from . import infra
MOBILENET_V1_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
MOBILENET_V2_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
@pytest.mark.parametrize(
"accel_type, model_url, workspace_size",
[
("ethos-u65-256", MOBILENET_V1_URL, 1793376),
("ethos-u65-256", MOBILENET_V2_URL, 2217152),
("ethos-u55-256", MOBILENET_V1_URL, 1793376),
("ethos-u55-256", MOBILENET_V2_URL, 2217152),
("ethos-u55-128", MOBILENET_V2_URL, 2217152),
("ethos-u55-64", MOBILENET_V2_URL, 2217152),
("ethos-u55-32", MOBILENET_V2_URL, 2217152),
],
)
def test_networks_without_usmp(accel_type, model_url, workspace_size):
np.random.seed(23)
tflite_model_buf = infra.get_tflite_model(model_url)
input_data, output_data = infra.generate_ref_data_tflite(tflite_model_buf)
mod, params = convert_to_relay(tflite_model_buf)
mod = partition_for_ethosu(mod, params)
test_runner = infra.create_test_runner(accel_type, enable_usmp=False)
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
mlf_memory_map = mlf._build_function_memory_map(
compiled_models[0].executor_factory.function_metadata
)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == workspace_size
infra.verify_source(compiled_models, test_runner)
@pytest.mark.parametrize(
"accel_type, model_url, workspace_size",
[
("ethos-u65-256", MOBILENET_V1_URL, 1206880),
("ethos-u55-256", MOBILENET_V2_URL, 1509408),
],
)
def test_networks_with_usmp(accel_type, model_url, workspace_size):
np.random.seed(23)
tflite_model_buf = infra.get_tflite_model(model_url)
input_data, output_data = infra.generate_ref_data_tflite(tflite_model_buf)
mod, params = convert_to_relay(tflite_model_buf)
mod = partition_for_ethosu(mod, params)
test_runner = infra.create_test_runner(accel_type, enable_usmp=True)
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
allocated_pool_info = list(
dict(compiled_models[0].executor_factory.executor_codegen_metadata.pool_inputs).values()
)[0]
assert allocated_pool_info.allocated_size == workspace_size
infra.verify_source(compiled_models, test_runner)
@pytest.mark.parametrize(
"accel_type, model_url, workspace_size",
[
("ethos-u55-256", MOBILENET_V1_URL, 1205872),
("ethos-u55-256", MOBILENET_V2_URL, 1509408),
],
)
def test_networks_with_usmp_and_cascader_wo_striping(accel_type, model_url, workspace_size):
np.random.seed(23)
pool_name = "my_memory_pool"
host_target = tvm.target.Target("c")
ethosu_target = tvm.target.Target("ethos-u")
workspace_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
pool_name,
[host_target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=2400000,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
)
]
)
tflite_model_buf = infra.get_tflite_model(model_url)
input_data, output_data = infra.generate_ref_data_tflite(tflite_model_buf)
mod, params = convert_to_relay(tflite_model_buf)
mod = partition_for_ethosu(mod, params)
test_runner = infra.create_test_runner(
accel_type,
enable_usmp=True,
enable_cascader=True,
enable_striping=False,
workspace_pools=workspace_pools,
)
compiled_models = infra.build_source(
mod, input_data, output_data, test_runner, workspace_pools=workspace_pools
)
infra.verify_source(compiled_models, test_runner)
allocated_pool_info = list(
dict(compiled_models[0].executor_factory.executor_codegen_metadata.pool_inputs).values()
)[0]
assert allocated_pool_info.allocated_size == workspace_size
@pytest.mark.parametrize(
"accel_type, model_url, workspace_size",
[
# Checks the same test case multiple times to make sure its not flaky
("ethos-u55-256", MOBILENET_V1_URL, 1010000),
("ethos-u55-256", MOBILENET_V1_URL, 1010000),
("ethos-u55-256", MOBILENET_V1_URL, 1010000),
("ethos-u55-256", MOBILENET_V1_URL, 1010000),
("ethos-u55-256", MOBILENET_V1_URL, 1010000),
# Checks the same test case multiple times to make sure its not flaky
("ethos-u55-256", MOBILENET_V2_URL, 1400000),
("ethos-u55-256", MOBILENET_V2_URL, 1400000),
("ethos-u55-256", MOBILENET_V2_URL, 1400000),
("ethos-u55-256", MOBILENET_V2_URL, 1400000),
("ethos-u55-256", MOBILENET_V2_URL, 1400000),
],
)
def test_networks_with_usmp_and_cascader_with_striping(accel_type, model_url, workspace_size):
np.random.seed(23)
pool_name = "my_memory_pool"
host_target = tvm.target.Target("c")
ethosu_target = tvm.target.Target("ethos-u")
workspace_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
pool_name,
[host_target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=workspace_size,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
)
]
)
tflite_model_buf = infra.get_tflite_model(model_url)
input_data, output_data = infra.generate_ref_data_tflite(tflite_model_buf)
mod, params = convert_to_relay(tflite_model_buf)
mod = partition_for_ethosu(mod, params)
test_runner = infra.create_test_runner(
accel_type,
enable_usmp=True,
enable_cascader=True,
enable_striping=True,
workspace_pools=workspace_pools,
)
compiled_models = infra.build_source(
mod, input_data, output_data, test_runner, workspace_pools=workspace_pools
)
infra.verify_source(compiled_models, test_runner)
allocated_pool_info = list(
dict(compiled_models[0].executor_factory.executor_codegen_metadata.pool_inputs).values()
)[0]
assert allocated_pool_info.allocated_size <= workspace_size
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_outline_compiler_functions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test the outline compiler functions pass.
"""
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import OutlineCompilerFunctions
def test_outline_compiler_functions():
compiler_name = "my-compiler"
wrong_compiler_name = "wrong-compiler"
def before():
inp = relay.var("input")
# Inlined functions for "my-compiler"
x = relay.var("x", shape=(1, 2, 2, 4))
x = relay.reshape(x, newshape=(1, 4, 4))
x = relay.Function(relay.analysis.free_vars(x), x)
x = x.with_attr("Compiler", compiler_name)
x = x.with_attr("global_symbol", "ext_func")
# Inlined function for "wrong-compiler"
y = relay.var("y", shape=(1, 4, 4))
y = relay.reshape(y, newshape=(1, 16))
y = relay.Function(relay.analysis.free_vars(y), y)
y = y.with_attr("Compiler", wrong_compiler_name)
y = y.with_attr("global_symbol", "ext_func_2")
out = relay.Call(x, [inp])
out = relay.Call(y, [out])
out = relay.Function([inp], out)
return tvm.ir.IRModule.from_expr(out)
def expected():
mod = tvm.ir.IRModule()
inp = relay.var("input")
x = relay.var("x", shape=(1, 2, 2, 4))
x = relay.reshape(x, newshape=(1, 4, 4))
x = relay.Function(relay.analysis.free_vars(x), x)
x = x.with_attr("Compiler", compiler_name)
x = x.with_attr("global_symbol", "ext_func")
mod["ext_func"] = x
y = relay.var("y", shape=(1, 4, 4))
y = relay.reshape(y, newshape=(1, 16))
y = relay.Function(relay.analysis.free_vars(y), y)
y = y.with_attr("Compiler", wrong_compiler_name)
y = y.with_attr("global_symbol", "ext_func_2")
out = relay.Call(mod.get_global_var("ext_func"), [inp])
out = relay.Call(y, [out])
mod["main"] = relay.Function([inp], out)
return mod
after = OutlineCompilerFunctions(compiler_name)(before())
exp = expected()
global_vars = [str(gv) for gv in after.get_global_vars()]
assert "@ext_func" in global_vars
assert "@ext_func_2" not in global_vars
assert tvm.ir.structural_equal(after["ext_func"], exp["ext_func"])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position
"""
Tests to check that the NPU partitioning frontend partitions
only supported operations.
"""
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.op.contrib import ethosu
@pytest.mark.parametrize(
"count_include_pad,pool_shape,padding",
[
(True, [2, 2], [0, 0, 0, 0]),
(False, [2, 2], [4, 4, 5, 5]),
(False, [9, 9], [1, 1, 1, 1]),
],
)
def test_invalid_avg_pool2d(count_include_pad, pool_shape, padding):
"""
Test unsupported variants of avg_pool2d don't get partitioned.
"""
ifm_shape = [1, 4, 4, 3]
strides = [2, 2]
def get_graph():
x = relay.var("x", shape=ifm_shape, dtype="int8")
x = relay.cast(x, dtype="int32")
x = relay.nn.avg_pool2d(
x,
pool_shape,
strides,
padding=padding,
layout="NHWC",
count_include_pad=count_include_pad,
)
x = relay.cast(x, dtype="int8")
func = relay.Function(relay.analysis.free_vars(x), x)
return tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(get_graph())
partitioned_mod = ethosu.partition_for_ethosu(mod)
assert tvm.ir.structural_equal(mod, partitioned_mod)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_placeholder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This file contains a placeholder test that always run"""
def test_placeholder():
"""
This test always run on every docker image.
Otherwise, pytest will return exit code 5
and breaks CI in the docker images where
microNPU tests are not run.
"""
pass
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_preprocess.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu import preprocess
def set_func_attr(func, compile_name, symbol_name):
"""
Helper function to attach attributes to the external function.
"""
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def test_single_io():
"""
This test will test the pass wont touch external functions that
have a single input and a single output.
"""
def create_graph():
def create_external_func1(mod_, compiler_name, symbol_name):
x_int = relay.var("x_int", shape=(10, 10))
z0 = relay.nn.relu(x_int)
f1 = relay.Function([x_int], z0)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
r = relay.Call(glb_symbol_f1, [x])
main = relay.Function([x], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
mod = create_graph()
exp = create_graph()
mod = preprocess.preprocess_ext_io()(mod)
assert tvm.ir.structural_equal(mod, exp, map_free_vars=True)
def test_2ins_single_out():
"""
The test is check two inputs and a single output of external function
"""
def create_graph():
def create_external_func1(mod_, compiler_name, symbol_name):
x_int = relay.var("x_int", shape=(10, 10))
w0_int = relay.var("w0_int", shape=(10, 10))
z0 = relay.add(x_int, w0_int)
f1 = relay.Function([x_int, w0_int], z0)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
r = relay.Call(glb_symbol_f1, [x, w0])
main = relay.Function([x, w0], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
def expected():
def create_external_func1(mod_, compiler_name, symbol_name):
ifms_int = relay.var("ifms_int", shape=[200])
# splits
(x_int_flat, w0_int_flat) = relay.split(ifms_int, [100])
# reshapes
x_int = relay.reshape(x_int_flat, newshape=(10, 10))
w0_int = relay.reshape(w0_int_flat, newshape=(10, 10))
z0 = relay.add(x_int, w0_int)
f1 = relay.Function([ifms_int], z0)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
# reshapes
x_reshaped = relay.reshape(x, newshape=100)
w0_reshaped = relay.reshape(w0, newshape=100)
# concat
ifms = relay.concatenate((x_reshaped, w0_reshaped), 0)
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
r = relay.Call(glb_symbol_f1, [ifms])
main = relay.Function([x, w0], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
mod = create_graph()
exp = expected()
mod = preprocess.preprocess_ext_io()(mod)
assert tvm.ir.structural_equal(mod, exp, map_free_vars=True)
def test_single_in_2outs():
"""
The test is to check a single input and two outputs of external function
"""
def create_graph():
def create_external_func1(mod_, compiler_name, symbol_name):
x_int = relay.var("x_int", shape=(10, 10))
p0 = relay.nn.relu(x_int)
q0 = relay.tanh(x_int)
f1_o_tuple = relay.Tuple([p0, q0])
f1 = relay.Function([x_int], f1_o_tuple)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
pq_tuple = relay.Call(glb_symbol_f1, [x])
p0 = relay.TupleGetItem(pq_tuple, 0)
q0 = relay.TupleGetItem(pq_tuple, 1)
r = relay.concatenate((p0, q0), axis=0)
main = relay.Function([x], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
def expected():
def create_external_func1(mod_, compiler_name, symbol_name):
x_int = relay.var("x_int", shape=(10, 10))
p0 = relay.nn.relu(x_int)
q0 = relay.tanh(x_int)
# reshapes
p0_reshaped = relay.reshape(p0, newshape=100)
q0_reshaped = relay.reshape(q0, newshape=100)
ofms = relay.concatenate((p0_reshaped, q0_reshaped), 0)
f1 = relay.Function([x_int], ofms)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
ofms = relay.Call(glb_symbol_f1, [x])
# splits
(p0_flat, q0_flat) = relay.split(ofms, [100])
# reshapes
p0_flat_reshaped = relay.reshape(p0_flat, newshape=(10, 10))
q0_flat_reshaped = relay.reshape(q0_flat, newshape=(10, 10))
# original output
tuple_out = relay.Tuple([p0_flat_reshaped, q0_flat_reshaped])
p0 = relay.TupleGetItem(tuple_out, 0)
q0 = relay.TupleGetItem(tuple_out, 1)
r = relay.concatenate((p0, q0), axis=0)
main = relay.Function([x], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
mod = create_graph()
exp = expected()
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
assert tvm.ir.structural_equal(mod, exp, map_free_vars=True)
def test_4ins_2outs():
"""
The test is to check a 4 inputs and two outputs of external function.
This just stand as a general test for multiple ins/outs.
"""
def create_graph():
def create_external_func1(mod_, compiler_name, symbol_name):
x_int = relay.var("x_int", shape=(10, 10))
w0_int = relay.var("w0_int", shape=(10, 10))
w1_int = relay.var("w1_int", shape=(10, 10))
w2_int = relay.var("w2_int", shape=(10, 10))
z0 = relay.add(x_int, w0_int)
p0 = relay.subtract(z0, w1_int)
q0 = relay.multiply(z0, w2_int)
f1_o_tuple = relay.Tuple([p0, q0])
f1 = relay.Function([x_int, w0_int, w1_int, w2_int], f1_o_tuple)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
glb_symbol_f1, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
pq_tuple = relay.Call(glb_symbol_f1, [x, w0, w1, w2])
p0 = relay.TupleGetItem(pq_tuple, 0)
q0 = relay.TupleGetItem(pq_tuple, 1)
r = relay.concatenate((p0, q0), axis=0)
main = relay.Function([x, w0, w1, w2], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
def expected():
def create_external_func1(mod_, compiler_name, symbol_name):
ifms_int = relay.var("ifms_int", shape=[400])
# splits
(x_int_flat, w0_int_flat, w1_int_flat, w2_int_flat) = relay.split(
ifms_int, [100, 200, 300]
)
# reshapes
x_int = relay.reshape(x_int_flat, newshape=(10, 10))
w0_int = relay.reshape(w0_int_flat, newshape=(10, 10))
w1_int = relay.reshape(w1_int_flat, newshape=(10, 10))
w2_int = relay.reshape(w2_int_flat, newshape=(10, 10))
z0 = relay.add(x_int, w0_int)
p0 = relay.subtract(z0, w1_int)
q0 = relay.multiply(z0, w2_int)
# f1_o_tuple = relay.Tuple([p0, q0])
# reshapes
p0_reshaped = relay.reshape(p0, newshape=100)
q0_reshaped = relay.reshape(q0, newshape=100)
ofms = relay.concatenate((p0_reshaped, q0_reshaped), 0)
f1 = relay.Function([ifms_int], ofms)
f1 = set_func_attr(f1, compiler_name, symbol_name)
glb_f1 = relay.GlobalVar(symbol_name)
mod_[glb_f1] = f1
mod_ = relay.transform.InferType()(mod_)
return glb_f1, mod_
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
# reshapes
x_reshaped = relay.reshape(x, newshape=100)
w0_reshaped = relay.reshape(w0, newshape=100)
w1_reshaped = relay.reshape(w1, newshape=100)
w2_reshaped = relay.reshape(w2, newshape=100)
# concat
ifms = relay.concatenate((x_reshaped, w0_reshaped, w1_reshaped, w2_reshaped), 0)
# call
glb_func, mod = create_external_func1(mod, "ethos-u", "ethosu_0")
ofms = relay.Call(glb_func, [ifms])
# splits
(p0_flat, q0_flat) = relay.split(ofms, [100])
# reshapes
p0_flat_reshaped = relay.reshape(p0_flat, newshape=(10, 10))
q0_flat_reshaped = relay.reshape(q0_flat, newshape=(10, 10))
# original output
tuple_out = relay.Tuple([p0_flat_reshaped, q0_flat_reshaped])
p0 = relay.TupleGetItem(tuple_out, 0)
q0 = relay.TupleGetItem(tuple_out, 1)
r = relay.concatenate((p0, q0), axis=0)
main = relay.Function([x, w0, w1, w2], r)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
mod = create_graph()
exp = expected()
mod = preprocess.preprocess_ext_io()(mod)
assert tvm.ir.structural_equal(mod, exp, map_free_vars=True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_remove_concatenates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
import tvm.script
from tvm.script import tir as T
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_conv2d
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder: T.Buffer[(1536,), "int8"], placeholder_1: T.Buffer[(1280,), "int8"], T_concat: T.Buffer[(4096,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([2992], "uint8")
buffer_1 = T.buffer_decl([160], "uint8")
buffer_2 = T.buffer_decl([2992], "uint8")
buffer_3 = T.buffer_decl([160], "uint8")
buffer_4 = T.buffer_decl([2992], "uint8")
buffer_5 = T.buffer_decl([160], "uint8")
buffer_6 = T.buffer_decl([2992], "uint8")
buffer_7 = T.buffer_decl([160], "uint8")
# body
T_concat_1_data = T.allocate([2816], "int8", "global", annotations={"disable_lower_builtin":True})
T_concat_1 = T.buffer_decl([2816], "int8", data=T_concat_1_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 10, 16, 8, 0, 10, placeholder_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 160, 16, 1, "int8", 8, 10, 16, 8, 0, 10, T_concat_1[192], 0, 0, 0, T.float32(0.25), 14, "NHWC", 352, 16, 1, 3, 3, 1, 1, 1, 1, buffer[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 10, 16, 8, 0, 10, T_concat_1[192], 0, 0, 0, T.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 10, 16, 8, 0, 10, T_concat[352], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, buffer_2[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_3[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 12, 16, 8, 0, 12, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 192, 16, 1, "int8", 8, 12, 16, 8, 0, 12, T_concat_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 352, 16, 1, 3, 3, 1, 1, 1, 1, buffer_4[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_5[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 22, 16, 8, 0, 22, T_concat_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 352, 16, 1, "int8", 8, 22, 16, 8, 0, 22, T_concat[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 16, 1, 3, 3, 1, 1, 1, 1, buffer_6[0], 2992, T.int8(-1), T.int8(-1), 12, buffer_7[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_concat():
def _get_func():
ifm1 = relay.var("ifm1", shape=(1, 8, 12, 16), dtype="int8")
ifm2 = relay.var("ifm2", shape=(1, 8, 10, 16), dtype="int8")
conv1 = make_ethosu_conv2d(ifm1, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = make_ethosu_conv2d(ifm2, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conc1 = relay.concatenate((conv1, conv2), axis=2)
conv3 = make_ethosu_conv2d(conc1, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conv4 = make_ethosu_conv2d(conv2, 16, 16, (3, 3), (1, 1), (1, 1), (1, 1))
conc2 = relay.concatenate((conv3, conv4), axis=2)
func = relay.Function(relay.analysis.free_vars(conc2), conc2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_binary_elementwise, get_binary_elementwise_args
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, ifm_channels, ifm2_channels, ifm_layout, ofm_layout, rounding_mode",
[
((1, 5, 9, 3), (1, 5, 9, 3), 3, 3, "NHWC", "NHWC", "TFL"),
((1, 8, 3, 9, 16), (1, 8, 3, 9, 16), 40, 40, "NHCWB16", "NHCWB16", "NATURAL"),
((1, 8, 3, 9, 16), (1, 8, 3, 9, 16), 40, 40, "NHCWB16", "NHWC", "TRUNCATE"),
((1, 8, 9, 40), (1, 8, 9, 40), 40, 40, "NHWC", "NHCWB16", "TFL"),
# Broadcast
((1, 5, 9, 3), (1, 1, 9, 1), 3, 1, "NHWC", "NHWC", "NATURAL"),
((1, 8, 9, 40), (1, 1, 1, 1), 40, 1, "NHWC", "NHCWB16", "TRUNCATE"),
],
)
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX"])
@pytest.mark.parametrize("activation", ["NONE", "CLIP"])
def test_binary_elementwise_single(
ifm_shape,
ifm2_shape,
ifm_channels,
ifm2_channels,
ifm_layout,
ofm_layout,
rounding_mode,
operator_type,
activation,
):
dtype = "int8"
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
dtype,
False,
activation,
ifm_layout,
ifm_layout,
ofm_layout,
rounding_mode,
)
func = relay.Function(relay.analysis.free_vars(binary_elementwise), binary_elementwise)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_binary_elementwise_args(stmt))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3] if ifm_shape[2] != 1 else 1
ifm_stride_h = ifm_shape[2] * ifm_shape[3] if ifm_shape[1] != 1 else 1
ifm2_stride_c = 1
ifm2_stride_w = ifm2_shape[3] if ifm2_shape[2] != 1 else 1
ifm2_stride_h = ifm2_shape[2] * ifm2_shape[3] if ifm2_shape[1] != 1 else 1
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[2]
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ifm2_stride_w = 16
ifm2_stride_c = 16 * ifm2_shape[3]
ifm2_stride_h = 16 * ifm2_shape[2] * ifm2_shape[3]
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[3]
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ifm_channels if ofm_width > 1 else 1
ofm_stride_h = ifm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((ifm_channels - 1) // 16 + 1)
serial_binary_elementwise = spec.SerialBinaryElementwise(
ifm=spec.SerialFeatureMap(
data_type=dtype,
height=ifm_shape[1],
width=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels=ifm_channels,
tile_height_0=ifm_shape[1],
tile_height_1=0,
tile_width_0=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ifm2=spec.SerialFeatureMap(
data_type=dtype,
height=ifm2_shape[1],
width=ifm2_shape[2] if ifm_layout == "NHWC" else ifm2_shape[3],
channels=ifm2_channels,
tile_height_0=ifm2_shape[1],
tile_height_1=0,
tile_width_0=ifm2_shape[2] if ifm_layout == "NHWC" else ifm2_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm2_stride_h,
stride_w=ifm2_stride_w,
stride_c=ifm2_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type=dtype,
height=ofm_height,
width=ofm_width,
channels=ifm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ofm_layout,
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
operator_type=operator_type,
reversed_operands=False,
activation=spec.SerialActivation(
op=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
),
rounding_mode=rounding_mode,
block_config=spec.SerialBlockConfig(0, 0, 0),
)
assert data[0] == ["ethosu_binary_elementwise"] + list(serial_binary_elementwise)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, ifm_channels, ifm2_channels, ifm_layout, ofm_layout",
[
((1, 5, 9, 3), (1, 5, 9, 3), 3, 3, "NHWC", "NHWC"),
((1, 8, 3, 9, 16), (1, 8, 3, 9, 16), 40, 40, "NHCWB16", "NHCWB16"),
((1, 8, 3, 9, 16), (1, 8, 3, 9, 16), 40, 40, "NHCWB16", "NHWC"),
((1, 8, 9, 40), (1, 8, 9, 40), 40, 40, "NHWC", "NHCWB16"),
# Broadcast
((1, 5, 9, 3), (1, 1, 9, 1), 3, 1, "NHWC", "NHWC"),
((1, 8, 9, 40), (1, 1, 1, 1), 40, 1, "NHWC", "NHCWB16"),
],
)
@pytest.mark.parametrize("operator_type", ["SHR", "SHL"])
@pytest.mark.parametrize("rounding_mode", ["TFL", "NATURAL", "TRUNCATE"])
def test_shift_binary_elementwise_single(
ifm_shape,
ifm2_shape,
ifm_channels,
ifm2_channels,
ifm_layout,
ofm_layout,
operator_type,
rounding_mode,
):
dtype = "int32"
activation = "NONE" # Only NONE is available if the activation type is int32
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
dtype,
False,
"NONE",
ifm_layout,
ifm_layout,
ofm_layout,
rounding_mode,
)
func = relay.Function(relay.analysis.free_vars(binary_elementwise), binary_elementwise)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_binary_elementwise_args(stmt))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3] if ifm_shape[2] != 1 else 1
ifm_stride_h = ifm_shape[2] * ifm_shape[3] if ifm_shape[1] != 1 else 1
ifm2_stride_c = 1
ifm2_stride_w = ifm2_shape[3] if ifm2_shape[2] != 1 else 1
ifm2_stride_h = ifm2_shape[2] * ifm2_shape[3] if ifm2_shape[1] != 1 else 1
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[2]
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ifm2_stride_w = 16
ifm2_stride_c = 16 * ifm2_shape[3]
ifm2_stride_h = 16 * ifm2_shape[2] * ifm2_shape[3]
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[3]
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ifm_channels if ofm_width > 1 else 1
ofm_stride_h = ifm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((ifm_channels - 1) // 16 + 1)
serial_binary_elementwise = spec.SerialBinaryElementwise(
ifm=spec.SerialFeatureMap(
data_type=dtype,
height=ifm_shape[1],
width=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels=ifm_channels,
tile_height_0=ifm_shape[1],
tile_height_1=0,
tile_width_0=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ifm2=spec.SerialFeatureMap(
data_type=dtype,
height=ifm2_shape[1],
width=ifm2_shape[2] if ifm_layout == "NHWC" else ifm2_shape[3],
channels=ifm2_channels,
tile_height_0=ifm2_shape[1],
tile_height_1=0,
tile_width_0=ifm2_shape[2] if ifm_layout == "NHWC" else ifm2_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm2_stride_h,
stride_w=ifm2_stride_w,
stride_c=ifm2_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type=dtype,
height=ofm_height,
width=ofm_width,
channels=ifm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ofm_layout,
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
operator_type=operator_type,
reversed_operands=False,
activation=spec.SerialActivation(
op=activation,
clip_min=0,
clip_max=0,
),
rounding_mode=rounding_mode,
block_config=spec.SerialBlockConfig(0, 0, 0),
)
assert data[0] == ["ethosu_binary_elementwise"] + list(serial_binary_elementwise)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.relay.backend.contrib.ethosu.tir.scheduler import total_cascader
from .infra import make_ethosu_conv2d
def _create_serial_conv2d_params(
ifm_shape,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
upscale="NONE",
):
dtype = "int8"
dilated_kernel_h = (kernel_shape[0] - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_shape[1] - 1) * dilation[1] + 1
upscale_factor = 2 if upscale != "NONE" else 1
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3]
ifm_stride_h = ifm_shape[2] * ifm_shape[3]
ofm_height = (
ifm_shape[1] * upscale_factor - dilated_kernel_h + padding[0] + padding[2]
) // strides[0] + 1
ofm_width = (
ifm_shape[2] * upscale_factor - dilated_kernel_w + padding[1] + padding[3]
) // strides[1] + 1
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ofm_height = (
ifm_shape[1] * upscale_factor - dilated_kernel_h + padding[0] + padding[2]
) // strides[0] + 1
ofm_width = (
ifm_shape[3] * upscale_factor - dilated_kernel_w + padding[1] + padding[3]
) // strides[1] + 1
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ofm_channels if ofm_width > 1 else 1
ofm_stride_h = ofm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((ofm_channels - 1) // 16 + 1)
return [
dtype,
ifm_shape[1],
ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
ifm_channels,
ifm_shape[1],
0,
ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
0,
0,
0,
0,
0.5,
10,
ifm_layout,
ifm_stride_h,
ifm_stride_w,
ifm_stride_c,
dtype,
ofm_height,
ofm_width,
ofm_channels,
ofm_height,
0,
ofm_width,
0,
0,
0,
0,
0.25,
14,
ofm_layout,
ofm_stride_h,
ofm_stride_w,
ofm_stride_c,
kernel_shape[1],
kernel_shape[0],
strides[1],
strides[0],
dilation[1],
dilation[0],
12,
padding[0],
padding[1],
padding[2],
padding[3],
activation,
10 if activation == "CLIP" else 0,
100 if activation == "CLIP" else 0,
rounding_mode,
upscale,
0,
0,
0,
]
def get_conv2d_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
# call.args[41]: BufferLoad for the first half of the weights
# call.args[42]: length of the load of the first half of the weights
# call.args[43]: BufferLoad for the second half of the weights
# call.args[44]: length of the load of the second half of the weights
# call.args[46]: BufferLoad for the first half of the bias
# call.args[47]: length of the load of the first half of the bias
# call.args[48]: BufferLoad for the second half of the bias
# call.args[49]: length of the load of the second half of the bias
if remove_constants:
remove_indices += [41, 42, 43, 44, 46, 47, 48, 49]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.BufferLoad) and not include_buffers:
conv_args.append(arg.indices[0])
else:
conv_args.append(arg)
return conv_args
@pytest.mark.parametrize(
"trial",
[
[
(1, 8, 8, 3),
3,
16,
(1, 1),
(2, 1, 2, 1),
(1, 1),
(1, 1),
"CLIP",
"NHWC",
"NHWC",
"TFL",
"NONE",
],
[
(1, 8, 8, 3),
3,
16,
(1, 1),
(0, 0, 0, 0),
(1, 1),
(1, 1),
"NONE",
"NHWC",
"NHWC",
"NATURAL",
"NONE",
],
[
(1, 1, 1, 1),
1,
16,
(1, 1),
(0, 0, 0, 0),
(1, 1),
(1, 1),
"CLIP",
"NHWC",
"NHWC",
"TRUNCATE",
"NONE",
],
[
(1, 7, 9, 4),
4,
13,
(3, 2),
(1, 2, 1, 2),
(2, 1),
(1, 2),
"NONE",
"NHWC",
"NHWC",
"TFL",
"NONE",
],
[
(1, 8, 2, 8, 16),
18,
12,
(1, 1),
(2, 1, 2, 1),
(1, 1),
(1, 1),
"CLIP",
"NHCWB16",
"NHWC",
"NATURAL",
"ZEROS",
],
[
(1, 7, 9, 4),
4,
71,
(3, 2),
(1, 2, 0, 2),
(2, 1),
(1, 2),
"CLIP",
"NHWC",
"NHCWB16",
"TRUNCATE",
"ZEROS",
],
[
(1, 4, 12, 9, 16),
182,
67,
(2, 3),
(6, 3, 6, 2),
(2, 2),
(1, 1),
"CLIP",
"NHCWB16",
"NHCWB16",
"TFL",
"ZEROS",
],
[
(1, 7, 9, 4),
4,
13,
(3, 2),
(1, 2, 0, 3),
(2, 1),
(2, 2),
"CLIP",
"NHWC",
"NHWC",
"NATURAL",
"NEAREST",
],
[
(1, 7, 9, 4),
4,
71,
(3, 2),
(1, 2, 0, 2),
(2, 1),
(2, 2),
"CLIP",
"NHWC",
"NHCWB16",
"TRUNCATE",
"NEAREST",
],
[
(1, 13, 12, 19, 16),
182,
67,
(1, 3),
(5, 3, 2, 3),
(2, 1),
(2, 1),
"CLIP",
"NHCWB16",
"NHCWB16",
"TFL",
"NEAREST",
],
],
)
def test_conv2d_single(trial):
def _get_func(
ifm_shape,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
activation,
ifm_layout,
ofm_layout,
rounding_mode,
upscale,
):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
conv = make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
activation=activation,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
rounding_mode=rounding_mode,
upscale=upscale,
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
# TODO(@mbaret) Fix the tests for these known failures
# These are anticipated to actually be correct, just a testing issue to do with
# equivalent convolutions.
known_failures = [
[(1, 3, 12, 9, 16), 182, 67, (2, 3), (1, 3), (2, 2), (1, 1), "CLIP", "NHCWB16", "NHCWB16"],
[(1, 2, 12, 9, 16), 182, 67, (1, 3), (6, 3), (2, 2), (1, 1), "CLIP", "NHCWB16", "NHCWB16"],
]
func = _get_func(*trial)
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_conv2d_args(stmt, remove_constants=True))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
answer = _create_serial_conv2d_params(*trial)
assert data[0] == answer, data[0]
# fmt: off
@tvm.script.ir_module
class Conv2dDoubleCascade1:
@T.prim_func
def main(placeholder_5: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(512,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([304], "uint8")
buffer_1 = T.buffer_decl([80], "uint8")
buffer_2 = T.buffer_decl([320], "uint8")
buffer_3 = T.buffer_decl([160], "uint8")
# body
ethosu_write_2_data = T.allocate([1024], "int8", "global", annotations={"disable_lower_builtin": True})
ethosu_write_2 = T.buffer_decl([1024], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 3, 8, 0, 4, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 32, 1, 1, 1, 1, 1, 1, 1, buffer_3[0], 160, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 128, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, buffer[0], 304, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 3, 8, 0, 4, placeholder_5[12], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 32, 1, 1, 1, 1, 1, 1, 1, buffer_3[0], 160, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 4, 32, 8, 0, 4, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 128, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[32], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, buffer[0], 304, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dDoubleCascade2:
@T.prim_func
def main(placeholder_5: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(512,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([80], "uint8")
buffer_1 = T.buffer_decl([320], "uint8")
buffer_2 = T.buffer_decl([1312], "uint8")
buffer_3 = T.buffer_decl([2608], "uint8")
# body
ethosu_write_2_data = T.allocate([1536], "int8", "global", annotations={"disable_lower_builtin": True})
ethosu_write_2 = T.buffer_decl([1536], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 3, 3, 1, 1, 1, 1, buffer_2[0], 1312, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[256], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 8, 8, 4, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 3, 3, 1, 1, 1, 1, buffer_3[0], 2608, T.int8(-1), T.int8(-1), 12, buffer[0], 80, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[48], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 3, 3, 1, 1, 1, 1, buffer_2[0], 1312, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 32, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 8, 8, 4, 0, 8, ethosu_write_1[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 3, 3, 1, 1, 1, 1, buffer_3[0], 2608, T.int8(-1), T.int8(-1), 12, buffer[0], 80, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dDoubleCascade3:
@T.prim_func
def main(placeholder_5: T.Buffer[(768,), "int8"], ethosu_write_1: T.Buffer[(640,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([1744], "uint8")
buffer_1 = T.buffer_decl([80], "uint8")
buffer_2 = T.buffer_decl([320], "uint8")
buffer_3 = T.buffer_decl([880], "uint8")
# body
ethosu_write_2_data = T.allocate([2560], "int8", "global", annotations={"disable_lower_builtin": True})
ethosu_write_2 = T.buffer_decl([2560], "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 16, 3, 8, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 48, 3, 1, "int8", 8, 8, 32, 8, 0, 8, ethosu_write_2[512], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 2, 3, 2, 1, 2, 1, buffer_3[0], 880, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 2, 1, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 8, 32, 8, 0, 8, ethosu_write_2[512], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 32, 8, 1, 2, 3, 2, 1, 2, 1, buffer[0], 1744, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 2, 1, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 12, 16, 3, 12, 0, 16, placeholder_5[192], 0, 0, 0, T.float32(0.5), 10, "NHWC", 48, 3, 1, "int8", 10, 8, 32, 10, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 2, 3, 2, 1, 2, 1, buffer_3[0], 880, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 1, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 10, 8, 32, 10, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 8, 4, 8, 8, 0, 4, ethosu_write_1[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 32, 8, 1, 2, 3, 2, 1, 2, 1, buffer[0], 1744, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 1, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 16, 3, 4, 0, 16, placeholder_5[576], 0, 0, 0, T.float32(0.5), 10, "NHWC", 48, 3, 1, "int8", 4, 8, 32, 4, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 2, 3, 2, 1, 2, 1, buffer_3[0], 880, T.int8(-1), T.int8(-1), 12, buffer_2[0], 320, T.int8(-1), T.int8(-1), 0, 1, 2, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 8, 32, 4, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "int8", 4, 4, 8, 4, 0, 4, ethosu_write_1[512], 0, 0, 0, T.float32(0.25), 14, "NHWC", 32, 8, 1, 2, 3, 2, 1, 2, 1, buffer[0], 1744, T.int8(-1), T.int8(-1), 12, buffer_1[0], 80, T.int8(-1), T.int8(-1), 0, 1, 2, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dDoubleCascade4:
@T.prim_func
def main(placeholder_5: T.Buffer[(1024,), "int8"], ethosu_write_1: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([1456], "uint8")
buffer_1 = T.buffer_decl([352], "uint8")
buffer_2 = T.buffer_decl([272], "uint8")
buffer_3 = T.buffer_decl([11040], "uint8")
# body
ethosu_write_2_data = T.allocate([2304], "int8", "global", annotations={"disable_lower_builtin": True})
ethosu_write_2 = T.buffer_decl((2304,), "int8", data=ethosu_write_2_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 128, 16, 1, "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[384], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 384, 16, 128, 3, 3, 1, 1, 1, 1, buffer[0], 1456, T.int8(-1), T.int8(-1), 12, buffer_1[0], 352, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[384], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 384, 16, 128, "int8", 4, 8, 26, 4, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 256, 16, 128, 3, 3, 1, 1, 1, 1, buffer_3[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_2[0], 272, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 6, 8, 3, 6, 0, 8, placeholder_5[256], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 128, 16, 1, "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 384, 16, 128, 3, 3, 1, 1, 1, 1, buffer[0], 1456, T.int8(-1), T.int8(-1), 12, buffer_1[0], 352, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 8, 35, 5, 0, 8, ethosu_write_2[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 384, 16, 128, "int8", 4, 8, 26, 4, 0, 8, ethosu_write_1[1024], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 256, 16, 128, 3, 3, 1, 1, 1, 1, buffer_3[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_2[0], 272, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dDoubleCascade5:
@T.prim_func
def main(placeholder: T.Buffer[(192,), "int8"], ethosu_write: T.Buffer[(8192,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([320], "uint8")
buffer_2 = T.buffer_decl([304], "uint8")
buffer_3 = T.buffer_decl([80], "uint8")
# body
ethosu_write_1_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
ethosu_write_1 = T.buffer_decl([4096], "int8", data=ethosu_write_1_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 8, 3, 4, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 32, 1, 1, 1, 1, 1, 1, 1, buffer[0], 160, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 32, 8, 16, 0, 32, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 304, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 4, 8, 3, 4, 0, 8, placeholder[96], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 512, 32, 1, 1, 1, 1, 1, 1, 1, buffer[0], 160, T.int8(-1), T.int8(-1), 12, buffer_1[0], 320, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 16, 32, 8, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 32, 8, 16, 0, 32, ethosu_write[4096], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 8, 1, 1, 1, 1, 1, 1, 1, buffer_2[0], 304, T.int8(-1), T.int8(-1), 12, buffer_3[0], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "ZEROS", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dDoubleCascade6:
@T.prim_func
def main(placeholder: T.Buffer[(1024,), "int8"], ethosu_write: T.Buffer[(32768,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([1456], "uint8")
buffer_1 = T.buffer_decl([352], "uint8")
buffer_2 = T.buffer_decl([11040], "uint8")
buffer_3 = T.buffer_decl([272], "uint8")
# body
ethosu_write_1_data = T.allocate([12288], "int8", "global", annotations={"disable_lower_builtin":True})
ethosu_write_1 = T.buffer_decl([12288], "int8", data=ethosu_write_1_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 8, 3, 8, 0, 8, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 128, 16, 1, "int8", 16, 16, 35, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 768, 16, 256, 3, 3, 1, 1, 1, 1, buffer[0], 1456, T.int8(-1), T.int8(-1), 12, buffer_1[0], 352, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NEAREST", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 35, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHCWB16", 768, 16, 256, "int8", 32, 32, 26, 32, 0, 32, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHCWB16", 1024, 16, 512, 3, 3, 1, 1, 1, 1, buffer_2[0], 11040, T.int8(-1), T.int8(-1), 12, buffer_3[0], 272, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NEAREST", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"trial",
[
[
Conv2dDoubleCascade1,
(1, 8, 8, 3),
3,
32,
8,
(1, 1),
(0, 0, 0, 0),
(1, 1),
(1, 1),
"NHWC",
"NONE",
(1, 8, 4, 8),
],
[
Conv2dDoubleCascade2,
(1, 8, 8, 3),
3,
32,
8,
(3, 3),
(1, 1, 1, 1),
(1, 1),
(1, 1),
"NHWC",
"NONE",
(1, 4, 8, 8),
],
[
Conv2dDoubleCascade3,
(1, 16, 16, 3),
3,
32,
8,
(3, 2),
(2, 1, 2, 1),
(1, 2),
(1, 2),
"NHWC",
"NONE",
(1, 8, 4, 8),
],
[
Conv2dDoubleCascade4,
(1, 8, 1, 8, 16),
3,
35,
26,
(3, 3),
(1, 1, 1, 1),
(1, 1),
(1, 1),
"NHCWB16",
"NONE",
(1, 4, 2, 8, 16),
],
[
Conv2dDoubleCascade5,
(1, 8, 8, 3),
3,
32,
8,
(1, 1),
(0, 0, 0, 0),
(1, 1),
(1, 1),
"NHWC",
"ZEROS",
(1, 16, 32, 8),
],
[
Conv2dDoubleCascade6,
(1, 8, 1, 8, 16),
3,
35,
26,
(3, 3),
(1, 1, 1, 1),
(1, 1),
(1, 1),
"NHCWB16",
"NEAREST",
(1, 32, 2, 32, 16),
],
],
)
def test_conv2d_double_cascade(trial):
def _get_func(
ifm_shape,
ifm_channels,
mid_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
layout,
upscale,
):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
conv1 = make_ethosu_conv2d(
ifm,
ifm_channels,
mid_channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout=layout,
ofm_layout=layout,
upscale=upscale,
)
conv2 = make_ethosu_conv2d(
conv1,
mid_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout=layout,
ofm_layout=layout,
upscale=upscale,
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
reference_mod = trial[0]
params = trial[1:]
func = _get_func(*params[:-1])
mod, _ = _lower_to_tir(func, cascader=total_cascader(params[-1]))
script = mod.script(show_meta=True)
mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True)
# fmt: off
@tvm.script.ir_module
class Conv2dInlineCopy1:
@T.prim_func
def main(placeholder_3: T.Buffer[(960,), "int8"], ethosu_write_1: T.Buffer[(1024,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([848], "uint8")
buffer_1 = T.buffer_decl([160], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 8, 8, 4, 8, 0, 8, placeholder_3[120], 0, 0, 0, T.float32(0.5), 10, "NHWC", 96, 8, 1, "int8", 8, 8, 16, 8, 0, 8, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 16, 1, 3, 3, 1, 1, 1, 1, buffer[0], 848, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dInlineCopy2:
@T.prim_func
def main(placeholder_3: T.Buffer[(315,), "int8"], ethosu_write_1: T.Buffer[(240,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([656], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 3, 5, 3, 3, 0, 5, placeholder_3[146], 0, 0, 0, T.float32(0.5), 10, "NHWC", 45, 5, 1, "int8", 3, 5, 16, 3, 0, 5, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 80, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 656, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"trial",
[
[Conv2dInlineCopy1, (1, 10, 12, 8), (0, 1, 3, 0), (1, 9, 11, 4)],
[Conv2dInlineCopy2, (1, 7, 9, 5), (0, 3, 2, 1), (1, 6, 7, 4)],
],
)
def test_conv2d_inline_copy(trial):
def _get_func(ifm_shape, lower, upper, ofm_channels=16):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
sliced = relay.strided_slice(ifm, lower, upper)
conv = make_ethosu_conv2d(
sliced, upper[3] - lower[3], ofm_channels, (3, 3), (1, 1), (1, 1), (1, 1)
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
reference_mod = trial[0]
params = trial[1:]
func = _get_func(*params)
mod, _ = _lower_to_tir(func)
script = mod.script(show_meta=True)
mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True)
# fmt: off
@tvm.script.ir_module
class Conv2dInlineReshape1:
@T.prim_func
def main(placeholder_3: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(768,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([848], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dInlineReshape2:
@T.prim_func
def main(placeholder_3: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(768,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([848], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dInlineReshape3:
@T.prim_func
def main(placeholder_3: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(768,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([848], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
@tvm.script.ir_module
class Conv2dInlineReshape4:
@T.prim_func
def main(placeholder_3: T.Buffer[(192,), "int8"], ethosu_write_1: T.Buffer[(768,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([160], "uint8")
buffer_1 = T.buffer_decl([848], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 1, 1, 0, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 5, 6, 4, 5, 0, 6, placeholder_3[72], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 4, 1, "int8", 4, 6, 16, 4, 0, 6, ethosu_write_1[384], 0, 0, 0, T.float32(0.25), 14, "NHWC", 96, 16, 1, 3, 3, 1, 1, 1, 1, buffer_1[0], 848, T.int8(-1), T.int8(-1), 12, buffer[0], 160, T.int8(-1), T.int8(-1), 0, 1, 1, 1, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
"trial",
[
[Conv2dInlineReshape1, (4, 6, 8, 1), (1, 8, 6, 4), "NHWC"],
[Conv2dInlineReshape2, (1, 4 * 6, 8), (1, 8, 6, 4), "NHWC"],
[Conv2dInlineReshape3, (4 * 6 * 8, 1), (1, 8, 6, 4), "NHWC"],
[Conv2dInlineReshape4, (4 * 6 * 8,), (1, 8, 6, 4), "NHWC"],
],
)
def test_conv2d_inline_reshape(trial):
def _get_func(ifm_shape, reshaped, ifm_layout):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
ifm_reshaped = relay.reshape(ifm, reshaped)
conv = make_ethosu_conv2d(
ifm_reshaped,
reshaped[3],
16,
(3, 3),
(1, 1),
(1, 1),
(1, 1),
activation="NONE",
ifm_layout=ifm_layout,
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
reference_mod = trial[0]
params = trial[1:]
func = _get_func(*params)
mod, _ = _lower_to_tir(func, cascader=total_cascader((1, 4, 6, 16)))
script = mod.script(show_meta=True)
mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(mod["main"], reference_mod["main"], True)
# TODO(@mbaret) Fix this case
@pytest.mark.xfail(raises=Exception, strict=True)
def test_conv2d_big_pad():
def _get_func():
ifm_shape = (1, 2, 2, 8)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
conv = make_ethosu_conv2d(
ifm, ifm_shape[3], 16, (1, 1), (7, 7), (1, 1), (1, 1), ifm_layout="NHWC"
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func, cascader=total_cascader((1, 4, 4, 16)))
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.relay.backend.contrib.ethosu.tir.scheduler import copy_constants, OperatorCompute
from .infra import make_ethosu_conv2d
# fmt: off
@tvm.script.ir_module
class ReferenceModule:
@T.prim_func
def main(placeholder_3: T.Buffer[(8192,), "int8"], ethosu_write_1: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer_1 = T.buffer_decl([384], "uint8")
# body
placeholder_global_data = T.allocate([384], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_global = T.buffer_decl([384], "uint8", data=placeholder_global_data)
T.evaluate(T.call_extern("ethosu_copy", buffer_1[0], 384, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 8, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 304, T.int8(-1), T.int8(-1), 12, placeholder_global[304], 80, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_copy():
def _get_func():
data = relay.var("data", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
data,
32,
8,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func, cascader=copy_constants())
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = ReferenceModule
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
# fmt: off
@tvm.script.ir_module
class WeightStream:
@T.prim_func
def main(placeholder_5: T.Buffer[(8192,), "int8"], ethosu_write_1: T.Buffer[(4096,), "int8"]) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer = T.buffer_decl([528], "uint8")
buffer_2 = T.buffer_decl([336], "uint8")
# body
placeholder_d_global_data = T.allocate([528], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global = T.buffer_decl([528], "uint8", data=placeholder_d_global_data)
placeholder_d_global_1_data = T.allocate([336], "uint8", "global", annotations={"disable_lower_builtin": True})
placeholder_d_global_1 = T.buffer_decl([336], "uint8", data=placeholder_d_global_1_data)
T.evaluate(T.call_extern("ethosu_copy", buffer[0], 528, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 336, placeholder_d_global_1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 10, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global[0], 416, T.int8(-1), T.int8(-1), 12, placeholder_d_global[416], 112, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder_5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 6, 16, 0, 16, ethosu_write_1[10], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_d_global_1[0], 272, T.int8(-1), T.int8(-1), 12, placeholder_d_global_1[272], 64, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_weight_stream():
def _cascader(cached_func, const_dict, sch):
weight = cached_func.inputs[1]
scale_bias = cached_func.inputs[2]
out = cached_func.outputs[0]
conv_compute = OperatorCompute.from_output(out)
co = conv_compute.split(sch, 3, 10)
cache_weight = sch.cache_read(weight, "global", [conv_compute.op])
cache_scale_bias = sch.cache_read(scale_bias, "global", [conv_compute.op])
sch[cache_weight].compute_at(sch[out], co)
sch[cache_scale_bias].compute_at(sch[out], co)
def _get_func():
ifm = relay.var("ifm", shape=(1, 16, 16, 32), dtype="int8")
conv = make_ethosu_conv2d(
ifm,
32,
16,
(1, 1),
(0, 0),
(1, 1),
(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv), conv)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
mod, _ = _lower_to_tir(func, cascader=_cascader)
script = mod.script(show_meta=True)
test_mod = tvm.script.from_source(script)
reference_mod = WeightStream
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_depthwise_conv2d, get_convolutional_args
@pytest.mark.parametrize(
"trial",
[
[(1, 8, 8, 3), 3, (3, 2), (0, 0), (1, 1), (1, 1), "CLIP", "NHWC", "NHWC", "TFL"],
[(1, 8, 8, 3), 3, (1, 1), (2, 1), (1, 1), (1, 1), "NONE", "NHWC", "NHWC", "NATURAL"],
[(1, 8, 8, 3), 3, (1, 1), (0, 0), (1, 1), (1, 1), "NONE", "NHWC", "NHWC", "TRUNCATE"],
[(1, 1, 1, 1), 1, (1, 1), (0, 0), (1, 1), (1, 1), "CLIP", "NHWC", "NHWC", "TFL"],
[(1, 7, 9, 4), 4, (3, 2), (1, 2), (2, 1), (1, 2), "NONE", "NHWC", "NHWC", "NATURAL"],
[
(1, 8, 2, 8, 16),
18,
(1, 1),
(2, 1),
(1, 1),
(1, 1),
"CLIP",
"NHCWB16",
"NHWC",
"TRUNCATE",
],
[(1, 7, 9, 40), 40, (3, 2), (1, 2), (2, 1), (1, 2), "CLIP", "NHWC", "NHCWB16", "TFL"],
[
(1, 4, 12, 9, 16),
182,
(2, 3),
(6, 3),
(2, 2),
(1, 1),
"CLIP",
"NHCWB16",
"NHCWB16",
"NATURAL",
],
[(1, 7, 9, 4), 4, (3, 2), (1, 2), (2, 1), (2, 2), "CLIP", "NHWC", "NHWC", "TRUNCATE"],
[(1, 7, 9, 41), 41, (3, 2), (1, 2), (2, 1), (2, 2), "CLIP", "NHWC", "NHCWB16", "TFL"],
[
(1, 13, 12, 19, 16),
182,
(1, 3),
(5, 3),
(2, 1),
(2, 1),
"CLIP",
"NHCWB16",
"NHCWB16",
"NATURAL",
],
],
)
@tvm.testing.skip_parameterizations(
"trial3", reason="See https://github.com/apache/tvm/issues/12841"
)
def test_depthwise_conv2d_single(request, trial):
def _get_func(
ifm_shape,
channels,
kernel_shape,
padding,
strides,
dilation,
activation,
ifm_layout,
ofm_layout,
rounding_mode,
):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
depthwise = make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
activation,
ifm_layout,
ofm_layout,
"int8",
"uint8",
rounding_mode,
)
func = relay.Function(relay.analysis.free_vars(depthwise), depthwise)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func(*trial)
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_convolutional_args(stmt, remove_constants=True))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
(
ifm_shape,
channels,
kernel_shape,
padding,
strides,
dilation,
activation,
ifm_layout,
ofm_layout,
rounding_mode,
) = trial
dilated_kernel_h = (kernel_shape[0] - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_shape[1] - 1) * dilation[1] + 1
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3]
ifm_stride_h = ifm_shape[2] * ifm_shape[3]
ofm_height = (ifm_shape[1] - dilated_kernel_h + padding[0] + padding[0]) // strides[0] + 1
ofm_width = (ifm_shape[2] - dilated_kernel_w + padding[1] + padding[1]) // strides[1] + 1
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ofm_height = (ifm_shape[1] - dilated_kernel_h + padding[0] + padding[0]) // strides[0] + 1
ofm_width = (ifm_shape[3] - dilated_kernel_w + padding[1] + padding[1]) // strides[1] + 1
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = channels if ofm_width > 1 else 1
ofm_stride_h = channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((channels - 1) // 16 + 1)
answer = [
"int8",
ifm_shape[1],
ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels,
ifm_shape[1],
0,
ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
0,
0,
0,
0,
0.6,
11,
ifm_layout,
ifm_stride_h,
ifm_stride_w,
ifm_stride_c,
"int8",
ofm_height,
ofm_width,
channels,
ofm_height,
0,
ofm_width,
0,
0,
0,
0,
0.26,
15,
ofm_layout,
ofm_stride_h,
ofm_stride_w,
ofm_stride_c,
kernel_shape[1],
kernel_shape[0],
strides[1],
strides[0],
dilation[1],
dilation[0],
13,
padding[0],
padding[1],
padding[0],
padding[1],
activation,
15 if activation == "CLIP" else 0,
105 if activation == "CLIP" else 0,
rounding_mode,
"NONE",
0,
0,
0,
]
assert data[0] == answer, data[0]
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_identity, get_pooling_args
@pytest.mark.parametrize("ifm_shape", [[1, 5, 9, 3], [20, 14, 7], [31, 40], [101]])
def test_identity(ifm_shape):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
identity = make_ethosu_identity(ifm)
func = relay.Function(relay.analysis.free_vars(identity), identity)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_pooling_args(stmt))
# Construct the ifm shape that the initial ifm shape gets legalized into
ref_ifm_shape = ifm_shape
if len(ref_ifm_shape) < 4:
ref_ifm_shape = [1] + ref_ifm_shape
while len(ref_ifm_shape) < 4:
ref_ifm_shape.append(1)
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
ifm_stride_c = 1
ifm_stride_w = ref_ifm_shape[3]
ifm_stride_h = ref_ifm_shape[2] * ref_ifm_shape[3]
ofm_height = ref_ifm_shape[1]
ofm_width = ref_ifm_shape[2]
ofm_channels = ref_ifm_shape[3]
ofm_stride_c = 1
ofm_stride_w = ofm_channels if ofm_width > 1 else 1
ofm_stride_h = ofm_channels * ofm_width if ofm_height > 1 else 1
# The identity operator TIR gets converted into serial pooling
serial_pooling = spec.SerialPooling(
ifm=spec.SerialFeatureMap(
data_type="int8",
height=ref_ifm_shape[1],
width=ref_ifm_shape[2],
channels=ofm_channels,
tile_height_0=ref_ifm_shape[1],
tile_height_1=0,
tile_width_0=ref_ifm_shape[2],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout="NHWC",
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type="int8",
height=ofm_height,
width=ofm_width,
channels=ofm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout="NHWC",
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
pooling_type="AVG",
pool_shape=spec.SerialKernel(1, 1, 1, 1, 1, 1),
padding=spec.SerialPadding(0, 0, 0, 0),
activation=spec.SerialActivation(op="NONE", clip_min=0, clip_max=0),
upscale="NONE",
rounding_mode="TFL",
block_config=spec.SerialBlockConfig(0, 0, 0),
)
assert data[0] == ["ethosu_identity"] + list(serial_pooling)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_pooling, get_pooling_args
def _create_serial_pooling(
ifm_shape,
ofm_channels,
ifm_layout,
ofm_layout,
pool_shape,
pooling_type,
strides,
padding,
activation="NONE",
rounding_mode="TFL",
upscale="NONE",
):
upscale_factor = 2 if upscale != "NONE" else 1
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3]
ifm_stride_h = ifm_shape[2] * ifm_shape[3]
ofm_height = (
ifm_shape[1] * upscale_factor - pool_shape[0] + padding[0] + padding[2]
) // strides[0] + 1
ofm_width = (
ifm_shape[2] * upscale_factor - pool_shape[1] + padding[1] + padding[3]
) // strides[1] + 1
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3] if ofm_channels >= 16 else 1
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ofm_height = (
ifm_shape[1] * upscale_factor - pool_shape[0] + padding[0] + padding[2]
) // strides[0] + 1
ofm_width = (
ifm_shape[3] * upscale_factor - pool_shape[1] + padding[1] + padding[3]
) // strides[1] + 1
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ofm_channels if ofm_width > 1 else 1
ofm_stride_h = ofm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width if ofm_channels >= 16 else 1
ofm_stride_h = 16 * ofm_width * ((ofm_channels - 1) // 16 + 1)
return spec.SerialPooling(
ifm=spec.SerialFeatureMap(
data_type="int8",
height=ifm_shape[1],
width=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels=ofm_channels,
tile_height_0=ifm_shape[1],
tile_height_1=0,
tile_width_0=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type="int8",
height=ofm_height,
width=ofm_width,
channels=ofm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ofm_layout,
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
pooling_type=pooling_type,
pool_shape=spec.SerialKernel(
width=pool_shape[1],
height=pool_shape[0],
stride_w=strides[1],
stride_h=strides[0],
dilation_w=1,
dilation_h=1,
),
padding=spec.SerialPadding(
top=padding[0], left=padding[1], bottom=padding[2], right=padding[3]
),
activation=spec.SerialActivation(
op=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
),
rounding_mode=rounding_mode,
upscale=upscale,
block_config=spec.SerialBlockConfig(0, 0, 0),
)
@pytest.mark.parametrize(
"ifm_shape, ofm_channels, ifm_layout, ofm_layout, rounding_mode, upscale",
[
((1, 5, 9, 3), 3, "NHWC", "NHWC", "TFL", "NONE"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHCWB16", "NATURAL", "NONE"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHWC", "TRUNCATE", "ZEROS"),
((1, 8, 9, 40), 40, "NHWC", "NHCWB16", "TFL", "ZEROS"),
((1, 8, 9, 8), 8, "NHWC", "NHCWB16", "TFL", "NEAREST"),
((1, 5, 9, 3), 3, "NHWC", "NHWC", "TFL", "NEAREST"),
],
)
@pytest.mark.parametrize("pooling_type", ["AVG", "MAX"])
@pytest.mark.parametrize("activation", ["NONE", "CLIP"])
def test_pooling_single(
ifm_shape,
ofm_channels,
ifm_layout,
ofm_layout,
pooling_type,
activation,
rounding_mode,
upscale,
):
pool_shape = (3, 2)
strides = (1, 2)
# When strides are not (1, 1) it is possible to create invalid
# padding configurations. It is possible to construct a pooling
# operation with invalid padding, but the compiler will account
# for this and adjust the padding accordingly, leading to a
# mismatch between the expected and actual result. Therefore,
# hardcoded padding values are used for each case.
padding = (1, 1, 1, 0) if upscale == "NONE" else (0, 0, 0, 0)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
pooling = make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
activation,
ifm_layout,
ofm_layout,
rounding_mode,
upscale,
)
func = relay.Function(relay.analysis.free_vars(pooling), pooling)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_pooling_args(stmt))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
serial_pooling = _create_serial_pooling(
ifm_shape,
ofm_channels,
ifm_layout,
ofm_layout,
pool_shape,
pooling_type,
strides,
padding,
activation,
rounding_mode,
upscale,
)
assert data[0] == ["ethosu_pooling"] + list(serial_pooling)
def test_correct_stride_with_multiple_pooling():
"""Testing a specific case of two pooling operations with NHWC inputs/outputs
but a NHCWB16 intermediate tensor. This lead to elements being accessed in the
wrong order by the NPU, due to incorrect stride values being calculated."""
ifm_shape = (1, 4, 4, 8)
ofm_channels = 8
pooling_type = "MAX"
pool_shape = (1, 1)
strides = (1, 1)
padding = (0, 0, 0, 0)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
op = make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
ifm_layout="NHWC",
ofm_layout="NHCWB16",
)
op = make_ethosu_pooling(
op,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
ifm_layout="NHCWB16",
ofm_layout="NHWC",
)
func = relay.Function(relay.analysis.free_vars(op), op)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_pooling_args(stmt))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
serial_pooling_1 = _create_serial_pooling(
[1, 4, 4, 8],
8,
"NHWC",
"NHCWB16",
pool_shape,
pooling_type,
strides,
padding,
)
serial_pooling_2 = _create_serial_pooling(
[1, 4, 1, 4, 16],
8,
"NHCWB16",
"NHWC",
pool_shape,
pooling_type,
strides,
padding,
)
assert data[0] == ["ethosu_pooling"] + list(serial_pooling_1)
assert data[1] == ["ethosu_pooling"] + list(serial_pooling_2)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_replace_unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
import tvm.script
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from .infra import make_ethosu_unary_elementwise
def _get_unary_elementwise_args(call, include_buffers=False, remove_constants=False):
args = call.args
unary_elementwise_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
unary_elementwise_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.BufferLoad) and not include_buffers:
unary_elementwise_args.append(arg.indices[0])
else:
unary_elementwise_args.append(arg)
return unary_elementwise_args
@pytest.mark.parametrize(
"ifm_shape, ifm_channels, ifm_layout, ofm_layout, rounding_mode",
[
((1, 5, 9, 3), 3, "NHWC", "NHWC", "TFL"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHCWB16", "NATURAL"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHWC", "TRUNCATE"),
((1, 8, 9, 40), 40, "NHWC", "NHCWB16", "TFL"),
],
)
@pytest.mark.parametrize("operator_type, data_type", [("ABS", "int8"), ("CLZ", "int32")])
@pytest.mark.parametrize("activation", ["NONE"])
def test_unary_elementwise_single(
ifm_shape,
ifm_channels,
ifm_layout,
ofm_layout,
rounding_mode,
operator_type,
activation,
data_type,
):
ifm = relay.var("ifm", shape=ifm_shape, dtype=data_type)
unary_elementwise = make_ethosu_unary_elementwise(
ifm, ifm_channels, operator_type, activation, ifm_layout, ofm_layout, rounding_mode
)
func = relay.Function(relay.analysis.free_vars(unary_elementwise), unary_elementwise)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = _lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(_get_unary_elementwise_args(stmt, remove_constants=True))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3] if ifm_shape[2] != 1 else 1
ifm_stride_h = ifm_shape[2] * ifm_shape[3] if ifm_shape[1] != 1 else 1
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[2]
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ofm_height = ifm_shape[1]
ofm_width = ifm_shape[3]
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ifm_channels if ofm_width > 1 else 1
ofm_stride_h = ifm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((ifm_channels - 1) // 16 + 1)
serial_unary_elementwise = spec.SerialUnaryElementwise(
ifm=spec.SerialFeatureMap(
data_type=data_type,
height=ifm_shape[1],
width=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels=ifm_channels,
tile_height_0=ifm_shape[1],
tile_height_1=0,
tile_width_0=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type=data_type,
height=ofm_height,
width=ofm_width,
channels=ifm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ofm_layout,
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
operator_type=operator_type,
activation=spec.SerialActivation(
op=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
),
rounding_mode=rounding_mode,
block_config=spec.SerialBlockConfig(0, 0, 0),
)
assert data[0] == ["ethosu_unary_elementwise"] + list(serial_unary_elementwise)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_rolling_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.relay.backend.contrib.ethosu.tir.scheduler import OperatorCompute
import tvm.relay.backend.contrib.ethosu.codegen as codegen
import tensorflow as tf
from . import infra
@pytest.mark.parametrize(
"axis, ifm_shape, pool_shape",
[
(1, (1, 12, 1, 2), (3, 1)),
(1, (1, 12, 12, 2), (3, 3)),
(2, (1, 1, 12, 2), (1, 3)),
(2, (1, 12, 12, 2), (3, 3)),
],
)
def test_rolling_buffer_2_layers(axis, ifm_shape, pool_shape):
accel_type = "ethos-u55-256"
strides = (1, 1)
@tf.function
def tf_model(x):
padding = "VALID"
pool_0 = tf.nn.max_pool(x, pool_shape, strides, padding)
pool_1 = tf.nn.max_pool(pool_0, pool_shape, strides, padding)
return pool_1
def _cascader(cached_func, const_dict, sch):
pool_b_out = cached_func.outputs[0]
pool_b_compute = OperatorCompute.from_output(pool_b_out)
pool_a_out = pool_b_compute.read.op.input_tensors[0]
pool_a_compute = OperatorCompute.from_output(pool_a_out)
outer = pool_b_compute.split(sch, axis=axis, val=4)
pool_a_compute.compute_at(sch, stage=sch[pool_b_out], axis=outer)
pool_a_compute.rolling_buffer(sch)
codegen.SCHEDULER = lambda: _cascader
infra.compare_tvm_with_tflite(tf_model, [ifm_shape], accel_type)
@pytest.mark.parametrize(
"axis, ifm_shape, pool_shape",
[
(1, (1, 12, 1, 2), (3, 1)),
(1, (1, 12, 1, 17), (3, 1)),
(1, (1, 12, 12, 2), (3, 3)),
(1, (1, 12, 12, 17), (3, 3)),
(2, (1, 1, 12, 2), (1, 3)),
(2, (1, 1, 12, 17), (1, 3)),
(2, (1, 12, 12, 2), (3, 3)),
(2, (1, 12, 12, 17), (3, 3)),
],
)
def test_rolling_buffer_3_layers(axis, ifm_shape, pool_shape):
accel_type = "ethos-u55-256"
strides = (1, 1)
@tf.function
def tf_model(x):
padding = "VALID"
pool_0 = tf.nn.max_pool(x, pool_shape, strides, padding)
pool_1 = tf.nn.max_pool(pool_0, pool_shape, strides, padding)
pool_2 = tf.nn.max_pool(pool_1, pool_shape, strides, padding)
return pool_2
def _cascader(cached_func, const_dict, sch):
pool_b_out = cached_func.outputs[0]
pool_b_compute = OperatorCompute.from_output(pool_b_out)
pool_a_out = pool_b_compute.read.op.input_tensors[0]
pool_a_compute = OperatorCompute.from_output(pool_a_out)
outer = pool_b_compute.split(sch, axis=axis, val=4)
pool_a_compute.compute_at(sch, stage=sch[pool_b_out], axis=outer)
pool_a_compute.rolling_buffer(sch)
codegen.SCHEDULER = lambda: _cascader
infra.compare_tvm_with_tflite(tf_model, [ifm_shape], accel_type)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm.script import tir as T
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm import te, topi
from tvm.relay.backend.contrib.ethosu.tir.scheduler import (
tile_nd,
schedule_pragmas,
inline_no_ops,
total_cascader,
copy_constants,
schedule_cache_reads,
copy_luts,
)
from tvm.relay.backend.contrib.ethosu.tir.compiler import (
lower_to_te,
extract_constants,
_lower_to_tir,
)
from .infra import (
AttachType,
make_ethosu_conv2d,
make_ethosu_identity,
make_ethosu_binary_elementwise,
)
class TestTEGraph:
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
def test_tile_nd():
input = te.placeholder((12, 12), dtype="uint8", name="input")
out = topi.nn.relu(input)
sch = te.create_schedule([out.op])
outer_iters, inner_iters = tile_nd(sch, out, (3, 4))
assert tuple(sch[out].leaf_iter_vars) == (*outer_iters, *inner_iters)
def test_schedule_pragmas():
input = te.placeholder((12, 12), dtype="uint8", name="input")
out = te.compute(
(12, 12),
lambda i, j: input[i, j],
attrs={
"op": "unity",
"info": 1,
},
)
sch = te.create_schedule([out.op])
sch[out].split(out.op.axis[0], 3)
schedule_pragmas(sch)
iter_var = sch[out].leaf_iter_vars[1]
assert list(sch[out].iter_var_attrs[iter_var].pragma_keys) == ["op", "info"]
assert list(sch[out].iter_var_attrs[iter_var].pragma_values) == ["unity", 1]
def test_schedule_pragmas_for_const():
input = te.placeholder((12, 12), dtype="uint8", name="input")
const = te.compute((), lambda: 2)
add = topi.add(input, const)
sch = te.create_schedule([add.op])
schedule_pragmas(sch)
def test_inline_no_ops():
input = relay.var("input", shape=(12, 12), dtype="uint8")
slice = relay.strided_slice(input, [0, 0], [6, 6])
relu1 = relay.nn.relu(slice)
reshape = relay.reshape(relu1, (36,))
relu2 = relay.nn.relu(reshape)
func = relay.Function(relay.analysis.free_vars(relu2), relu2)
func = run_opt_pass(func, relay.transform.InferType())
cached_func = lower_to_te(func)
sch = te.create_schedule([cached_func.outputs[0].op])
inline_no_ops(cached_func, sch)
reshape_tensor = cached_func.outputs[0].op.input_tensors[0]
slice_tensor = reshape_tensor.op.input_tensors[0].op.input_tensors[0]
assert sch[reshape_tensor].attach_type == AttachType.kInline
assert sch[slice_tensor].attach_type == AttachType.kInline
def test_total_cascader():
input = te.placeholder((12, 12), dtype="uint8", name="input")
relu1 = topi.nn.relu(input)
relu2 = topi.nn.relu(relu1)
relu3 = topi.nn.relu(relu2)
sch = te.create_schedule([relu3.op])
cascader = total_cascader((4, 4))
cascader(TestTEGraph([input], [relu3]), {}, sch)
assert sch[relu1].attach_type == AttachType.kScope
assert sch[relu2].attach_type == AttachType.kScope
assert sch[relu3].attach_type == AttachType.kGroupRoot
# Check that the attaches are at the correct iter var
assert sch[relu1].attach_ivar == sch[relu3].leaf_iter_vars[1]
assert sch[relu2].attach_ivar == sch[relu3].leaf_iter_vars[1]
def test_copy_constants():
ifm_a = relay.var("IFM_A", shape=(1, 26, 26, 32), dtype="int8")
conv_a = make_ethosu_conv2d(ifm_a, 32, 8, (3, 3), (0, 0), (1, 1), (1, 1))
conv_b = make_ethosu_conv2d(conv_a, 8, 4, (1, 1), (0, 0), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv_b), conv_b)
func = run_opt_pass(func, relay.transform.InferType())
func, const_dict = extract_constants(func)
cached_func = lower_to_te(func)
sch = te.create_schedule([cached_func.outputs[0].op])
planner = copy_constants()
planner(cached_func, const_dict, sch)
assert len(sch.stages) == 23
assert ".global" in sch.stages[6].op.name
assert ".global" in sch.stages[8].op.name
assert ".global" in sch.stages[17].op.name
assert ".global" in sch.stages[19].op.name
# This test makes sure that constants and LUTs have a correct storage scope
def test_copy_luts():
ifm_shape = (1, 33, 33, 11)
ifm = relay.var("IFM", shape=ifm_shape, dtype="int8")
lut = relay.const([i for i in range(256)], dtype="int8")
conv = make_ethosu_conv2d(
ifm, ifm_shape[3], 8, (3, 3), (0, 0), (1, 1), (1, 1), lut=lut, activation="TANH"
)
identity = make_ethosu_identity(conv, lut=lut, activation="TANH")
func = relay.Function(relay.analysis.free_vars(identity), identity)
func = run_opt_pass(func, relay.transform.InferType())
func, const_dict = extract_constants(func)
te_graph = lower_to_te(func)
sch = te.create_schedule([te_graph.outputs[0].op])
copy_constants()(te_graph, const_dict, sch)
copy_luts()(te_graph, const_dict, sch)
assert len(sch.stages) == 17
assert ".global" in sch.stages[6].op.name
assert ".global" in sch.stages[8].op.name
assert ".local" in sch.stages[10].op.name
def test_schedule_cache_reads():
a = te.placeholder((12, 12), dtype="uint8", name="a")
b = te.placeholder((12, 12), dtype="uint8", name="b")
add = topi.add(a, b)
sch = te.create_schedule([add.op])
cr = sch.cache_read(b, "global", [add])
schedule_cache_reads(sch)
assert len(sch.stages) == 4
assert len(sch[cr].leaf_iter_vars) == 1
iv = sch[cr].leaf_iter_vars[0]
assert list(sch[cr].iter_var_attrs[iv].pragma_keys) == ["op"]
assert list(sch[cr].iter_var_attrs[iv].pragma_values) == ["ethosu_copy"]
# fmt: off
@tvm.script.ir_module
class DiamondGraphTir:
@T.prim_func
def main(placeholder: T.Buffer[(301056,), "int8"], ethosu_write: T.Buffer[(75264,), "int8"]) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
buffer1 = T.buffer_decl([2848], "uint8")
buffer3 = T.buffer_decl([976], "uint8")
p1_data = T.allocate([2848], "uint8", "global", annotations={"disable_lower_builtin":True})
p1 = T.buffer_decl([2848], "uint8", data=p1_data)
p2_data = T.allocate([976], "uint8", "global", annotations={"disable_lower_builtin":True})
p2 = T.buffer_decl([976], "uint8", data=p2_data)
p5_data = T.allocate([75264], "int8", "global", annotations={"disable_lower_builtin":True})
p5 = T.buffer_decl([75264], "int8", data=p5_data)
p6_data = T.allocate([75264], "int8", "global", annotations={"disable_lower_builtin":True})
p6 = T.buffer_decl([75264], "int8", data=p6_data)
T.evaluate(T.call_extern("ethosu_copy", buffer1[0], 2848, p1[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer3[0], 976, p2[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 56, 56, 96, 56, 0, 56, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 5376, 96, 1, "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 1344, 24, 1, 1, 1, 1, 1, 1, 1, p1[0], 2608, T.int8(-1), T.int8(-1), 12, p1[2608], 240, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, p6[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 1344, 24, 1, 1, 1, 1, 1, 1, 1, p2[0], 736, T.int8(-1), T.int8(-1), 12, p2[736], 240, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 56, 56, 24, 56, 0, 56, p5[0], 0, 0, 0,T.float32(1), 0, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, p6[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1344, 24, 1, "int8", 56, 56, 24, 56, 0, 56, ethosu_write[0], 0, 0, 0, T.float32(1), 0, "NHWC", 1344, 24, 1, "ADD", 0, "NONE", 0, 0, "TFL", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_schedule_diamond_graph():
ifm_a = relay.var("IFM_A", shape=(1, 56, 56, 96), dtype="int8")
conv_a = make_ethosu_conv2d(ifm_a, 96, 24, (1, 1), (0, 0), (1, 1), (1, 1))
conv_b = make_ethosu_conv2d(conv_a, 24, 24, (1, 1), (0, 0), (1, 1), (1, 1))
add = make_ethosu_binary_elementwise(conv_a, conv_b, 24, 24, "ADD", "int8")
func = relay.Function(relay.analysis.free_vars(add), add)
func = run_opt_pass(func, relay.transform.InferType())
test_mod, _ = _lower_to_tir(func, copy_constants())
reference_mod = DiamondGraphTir
tvm.ir.assert_structural_equal(test_mod["main"], reference_mod["main"], True)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_tir_to_cs_translator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm.tir import stmt_functor
from tvm.script import tir as T
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator
from tvm.relay.backend.contrib.ethosu import util
import ethosu.vela.api as vapi
# fmt: off
"""A sample tir test case for translator"""
@tvm.script.ir_module
class SingleEthosUConv2D:
@T.prim_func
def main(placeholder_3: T.Buffer[(8192,), "int8"], ethosu_conv2d_1: T.Buffer[(1024,), "int8"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_4 = T.buffer_decl([1], "uint8")
placeholder_5 = T.buffer_decl([1], "uint8")
# body
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 8, 8, 3, 8, 0, 8, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "uint8", 8, 8, 16, 8, 0, 8, ethosu_conv2d_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 16, 1, 1, 1, 1, 1, 1, 1, placeholder_4[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_5[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "CLIP", 0, 255, "TFL", "NONE", 0, 0, 0, dtype="uint8"))
# fmt: on
# fmt: off
"""A sample tir test case with multiple convolutions for translator"""
@tvm.script.ir_module
class MultiEthosUConv2D:
@T.prim_func
def main(placeholder_6: T.Buffer[(192,), "int8"], ethosu_conv2d_1: T.Buffer[(512,), "int8"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_9 = T.buffer_decl([1], "uint8")
placeholder_7 = T.buffer_decl([1], "uint8")
placeholder_8 = T.buffer_decl([1], "uint8")
placeholder_5 = T.buffer_decl([1], "uint8")
# body
ethosu_conv2d_2 = T.decl_buffer([1024], "uint8")
ethosu_conv2d_3 = T.decl_buffer([2048], "uint8")
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 4, 8, 3, 4, 0, 8, placeholder_6[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "uint8", 4, 8, 32, 4, 0, 8, ethosu_conv2d_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 1, 1, 1, 1, 1, 1, placeholder_7[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_8[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="uint8"))
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 4, 8, 32, 4, 0, 8, ethosu_conv2d_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "uint8", 4, 8, 8, 4, 0, 8, ethosu_conv2d_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_9[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_5[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "CLIP", 0, 255, "TFL", "NONE", 0, 0, 0, dtype="uint8"))
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 4, 8, 3, 4, 0, 8, placeholder_6[96], 0, 0, 0, T.float32(0.5), 10, "NHWC", 24, 3, 1, "uint8", 4, 8, 32, 4, 0, 8, ethosu_conv2d_2[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 32, 1, 1, 1, 1, 1, 1, 1, placeholder_7[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_8[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "CLIP", 0, 255, "TFL", "NONE", 0, 0, 0, dtype="uint8"))
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 4, 8, 32, 4, 0, 8, ethosu_conv2d_2[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 32, 1, "uint8", 4, 8, 8, 4, 0, 8, ethosu_conv2d_1[256], 0, 0, 0, T.float32(0.25), 14, "NHWC", 64, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_9[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_5[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "CLIP", 0, 255, "TFL", "NONE", 0, 0, 0, dtype="uint8"))
# fmt: on
# fmt: off
"""A sample tir test case with copy operations for translator"""
@tvm.script.ir_module
class MultiEthosUCopy:
@T.prim_func
def main(placeholder_3: T.Buffer[(8192,), "int8"], ethosu_conv2d_1: T.Buffer[(2048,), "int8"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_5 = T.buffer_decl([1], "int32")
placeholder_4 = T.buffer_decl([1], "uint8")
# body
placeholder_global = T.decl_buffer([256], "uint8")
placeholder_d_global = T.decl_buffer([8], "int32")
T.evaluate(T.call_extern("ethosu_copy", placeholder_4[0], 256, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", placeholder_5[0], 8, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "uint8", 16, 16, 32, 16, 0, 16, placeholder_3[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "uint8", 16, 16, 8, 16, 0, 16, ethosu_conv2d_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 0, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 0, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "CLIP", 0, 255, "TFL", "NONE", 0, 0, 0, dtype="handle"))
# fmt: on
# fmt: off
"""A TIR test module of weight streaming"""
@tvm.script.ir_module
class WeightStreamOnly:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
buffer = T.buffer_decl([1], "uint8")
buffer_1 = T.buffer_decl([1], "uint8")
buffer_2 = T.buffer_decl([1], "uint8")
buffer_3 = T.buffer_decl([1], "uint8")
buffer_4 = T.buffer_decl([1], "uint8")
buffer_5 = T.buffer_decl([1], "uint8")
buffer_6 = T.buffer_decl([1], "uint8")
buffer_7 = T.buffer_decl([1], "uint8")
# function attr dict
T.func_attr({"from_legacy_te_schedule": True,
"global_symbol": "main", "tir.noalias": True,
"constants": {buffer.name: buffer,
buffer_1.name: buffer_1,
buffer_2.name: buffer_2,
buffer_3.name: buffer_3,
buffer_4.name: buffer_4,
buffer_5.name: buffer_5,
buffer_6.name: buffer_6,
buffer_7.name: buffer_7}})
# body
placeholder_global_data = T.allocate([128], "uint8", "global", annotations={"disable_lower_builtin":True})
placeholder_global = T.decl_buffer([128], "uint8", data=placeholder_global_data)
placeholder_d_global_data = T.allocate([32], "uint8", "global", annotations={"disable_lower_builtin":True})
placeholder_d_global = T.decl_buffer([32], "uint8", data=placeholder_d_global_data)
T.evaluate(T.call_extern("ethosu_copy", buffer[0], 128, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_1[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 128, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 112, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_3[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 112, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_4[0], 112, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_5[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 112, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_6[0], 112, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_7[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 112, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A TIR test module of weight streaming and direct reading"""
@tvm.script.ir_module
class MixedRead:
@T.prim_func
def main(placeholder: T.Buffer[(8192,), "int8"], ethosu_write: T.Buffer[(2048,), "int8"]) -> None:
buffer = T.buffer_decl([1], "uint8")
buffer_1 = T.buffer_decl([1], "uint8")
buffer_2 = T.buffer_decl([1], "uint8")
buffer_3 = T.buffer_decl([1], "uint8")
buffer_4 = T.buffer_decl([1], "uint8")
buffer_5 = T.buffer_decl([1], "uint8")
buffer_6 = T.buffer_decl([1], "uint8")
buffer_7 = T.buffer_decl([1], "uint8")
buffer_8 = T.buffer_decl([1], "uint8")
buffer_9 = T.buffer_decl([1], "uint8")
# function attr dict
T.func_attr({"from_legacy_te_schedule": True,
"global_symbol": "main", "tir.noalias": True,
"constants": {buffer.name: buffer,
buffer_1.name: buffer_1,
buffer_2.name: buffer_2,
buffer_3.name: buffer_3,
buffer_4.name: buffer_4,
buffer_5.name: buffer_5,
buffer_6.name: buffer_6,
buffer_7.name: buffer_7,
buffer_8.name: buffer_8,
buffer_9.name: buffer_9}})
# body
ethosu_write_1_data = T.allocate([4096], "int8", "global", annotations={"disable_lower_builtin":True})
ethosu_write_1 = T.buffer_decl([4096], "int8", data=ethosu_write_1_data)
placeholder_global_data = T.allocate([80], "uint8", "global", annotations={"disable_lower_builtin":True})
placeholder_global = T.buffer_decl([80], "uint8", data=placeholder_global_data)
placeholder_d_global_data = T.allocate([32], "uint8", "global", annotations={"disable_lower_builtin":True})
placeholder_d_global = T.buffer_decl([32], "uint8", data=placeholder_d_global_data)
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 32, 16, 0, 16, placeholder[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 512, 32, 1, "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 256, 16, 1, 1, 1, 1, 1, 1, 1, buffer[0], 592, T.int8(-1), T.int8(-1), 12, buffer_1[0], 160, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 80, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_3[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[0], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 80, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_4[0], 80, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_5[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[2], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 80, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_6[0], 80, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_7[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[4], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 80, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_8[0], 80, placeholder_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_copy", buffer_9[0], 32, placeholder_d_global[0], dtype="handle"))
T.evaluate(T.call_extern("ethosu_conv2d", "int8", 16, 16, 16, 16, 0, 16, ethosu_write_1[0], 0, 0, 0, T.float32(0.5), 10, "NHWC", 256, 16, 1, "int8", 16, 16, 2, 16, 0, 16, ethosu_write[6], 0, 0, 0, T.float32(0.25), 14, "NHWC", 128, 8, 1, 1, 1, 1, 1, 1, 1, placeholder_global[0], 80, T.int8(-1), T.int8(-1), 12, placeholder_d_global[0], 32, T.int8(-1), T.int8(-1), 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="handle"))
__tvm_meta__ = None
# fmt: on
def test_buffer_info_extraction():
test_cases = [
{
# Stimulus
"tir_module": SingleEthosUConv2D,
"param_dict": {
tvm.tir.Var("placeholder_4", "uint8"): np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 3, 16], "uint8"
),
tvm.tir.Var("placeholder_5", "uint8"): np.random.randint(
np.iinfo("int32").min, np.iinfo("int32").max, [16], "int32"
),
},
# Reference Outputs
"data_buffers": {
"placeholder_3": (
[1, 8, 8, 3],
"uint8",
tir_to_cs_translator.BufferType.input_or_output,
),
"ethosu_conv2d_1": (
[1, 8, 8, 16],
"uint8",
tir_to_cs_translator.BufferType.input_or_output,
),
},
},
{
"tir_module": MultiEthosUConv2D,
"param_dict": {
tvm.tir.Var("placeholder_7", "uint8"): np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 3, 32], "uint8"
),
tvm.tir.Var("placeholder_8", "uint8"): np.random.randint(
np.iinfo("int32").min, np.iinfo("int32").max, [32], "int32"
),
tvm.tir.Var("placeholder_8", "uint8"): np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 32, 8], "uint8"
),
tvm.tir.Var("placeholder_5", "uint8"): np.random.randint(
np.iinfo("int32").min, np.iinfo("int32").max, [8], "int32"
),
},
# Reference Outputs
"data_buffers": {
"placeholder_6": (
[1, 8, 8, 3],
"uint8",
tir_to_cs_translator.BufferType.input_or_output,
),
"ethosu_conv2d_1": (
[1, 8, 8, 8],
"uint8",
tir_to_cs_translator.BufferType.input_or_output,
),
"ethosu_conv2d_2": (
[1024],
"uint8",
tir_to_cs_translator.BufferType.scratch,
),
"ethosu_conv2d_3": (
[2048],
"uint8",
tir_to_cs_translator.BufferType.scratch,
),
},
},
]
for test_case in test_cases:
# With Target Hooks the TIR module needs a target attached
# and lowered via make unpacked API.
tir_mod = test_case["tir_module"]
tir_mod["main"] = tir_mod["main"].with_attr("target", tvm.target.Target("ethos-u"))
tir_mod = tvm.tir.transform.MakeUnpackedAPI()(tir_mod)
buffer_info = tir_to_cs_translator.extract_buffer_info(tir_mod, test_case["param_dict"])
for buffer_var, info in buffer_info.items():
if buffer_var in test_case["param_dict"].keys():
assert (
info.values.flatten() == test_case["param_dict"][buffer_var].flatten()
).all()
assert info.dtype == test_case["param_dict"][buffer_var].dtype
info.btype == tir_to_cs_translator.BufferType.constant
else:
buffer_name = buffer_var.name
assert info.btype == test_case["data_buffers"][buffer_name][2]
def test_translate_ethosu_conv2d():
test_cases = [
{
# Stimulus
"tir_module": SingleEthosUConv2D,
"param_dict": {
1: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 3, 16], "uint8"
),
2: np.random.randint(np.iinfo("int32").min, np.iinfo("int32").max, [16], "int32"),
},
# Reference outputs
"ref": [
{
"ifm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(8, 8, 3),
"tiles": vapi.NpuTileBox(8, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.5, 10),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(24, 3, 1),
},
"ofm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(8, 8, 16),
"tiles": vapi.NpuTileBox(8, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.25, 14),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(128, 16, 1),
},
"kernel": vapi.NpuKernel(
w=1, h=1, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1
),
"padding": vapi.NpuPadding(top=0, left=0, bottom=0, right=0),
"activation": {
"op": vapi.NpuActivationOp.NONE_OR_RELU,
"min": -3.5,
"max": 60.25,
},
"rounding_mode": vapi.NpuRoundingMode.TFL,
"ifm_upscale": vapi.NpuResamplingMode.NONE,
"w_zero_point": 12,
}
],
},
{
"tir_module": MultiEthosUConv2D,
"param_dict": {
1: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 3, 32], "uint8"
),
2: np.random.randint(np.iinfo("int32").min, np.iinfo("int32").max, [32], "int32"),
3: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [1, 1, 32, 8], "uint8"
),
4: np.random.randint(np.iinfo("int32").min, np.iinfo("int32").max, [8], "int32"),
},
# Reference Outputs
"ref": [
{
"ifm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 3),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.5, 10),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(24, 3, 1),
},
"ofm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 32),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.25, 14),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(256, 32, 1),
},
"kernel": vapi.NpuKernel(
w=1, h=1, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1
),
"padding": vapi.NpuPadding(top=0, left=0, bottom=0, right=0),
"activation": {"op": None},
"rounding_mode": vapi.NpuRoundingMode.TFL,
"ifm_upscale": vapi.NpuResamplingMode.NONE,
"w_zero_point": 12,
},
{
"ifm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 32),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.5, 10),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(256, 32, 1),
},
"ofm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 8),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.25, 14),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(64, 8, 1),
},
"kernel": vapi.NpuKernel(
w=1, h=1, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1
),
"padding": vapi.NpuPadding(top=0, left=0, bottom=0, right=0),
"activation": {
"op": vapi.NpuActivationOp.NONE_OR_RELU,
"min": -3.5,
"max": 60.25,
},
"rounding_mode": vapi.NpuRoundingMode.TFL,
"ifm_upscale": vapi.NpuResamplingMode.NONE,
"w_zero_point": 12,
},
{
"ifm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 3),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.5, 10),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(24, 3, 1),
},
"ofm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 32),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.25, 14),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(256, 32, 1),
},
"kernel": vapi.NpuKernel(
w=1, h=1, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1
),
"padding": vapi.NpuPadding(top=0, left=0, bottom=0, right=0),
"activation": {
"op": vapi.NpuActivationOp.NONE_OR_RELU,
"min": -3.5,
"max": 60.25,
},
"rounding_mode": vapi.NpuRoundingMode.TFL,
"ifm_upscale": vapi.NpuResamplingMode.NONE,
"w_zero_point": 12,
},
{
"ifm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 32),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.5, 10),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(256, 32, 1),
},
"ofm": {
"data_type": vapi.NpuDataType.UINT8,
"shape": vapi.NpuShape3D(4, 8, 8),
"tiles": vapi.NpuTileBox(4, 0, 8, [0, 0, 0, 0]),
"quantization": vapi.NpuQuantization(0.25, 14),
"layout": vapi.NpuLayout.NHWC,
"strides": vapi.NpuShape3D(64, 8, 1),
},
"kernel": vapi.NpuKernel(
w=1, h=1, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1
),
"padding": vapi.NpuPadding(top=0, left=0, bottom=0, right=0),
"activation": {
"op": vapi.NpuActivationOp.NONE_OR_RELU,
"min": -3.5,
"max": 60.25,
},
"rounding_mode": vapi.NpuRoundingMode.TFL,
"ifm_upscale": vapi.NpuResamplingMode.NONE,
"w_zero_point": 12,
},
],
},
]
def extract_ethosu_conv2d_extern_calls(mod):
"""This function will obtain all ethosu_conv2d
calls from a NPU TIR module
Parameters
----------
mod : tvm.IRModule
This is a NPU TIR Module
Returns
-------
list
of tvm.tir.Call objects
that are tir extern calls
for ethosu_conv2d
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_conv2d_calls = list()
def populate_ethosu_conv2d_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_conv2d"
):
ethosu_conv2d_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_conv2d_calls)
return ethosu_conv2d_calls
for test_case in test_cases:
ethosu_conv2d_calls = extract_ethosu_conv2d_extern_calls(test_case["tir_module"])
for idx, ethosu_conv2d_call in enumerate(ethosu_conv2d_calls):
ref = test_case["ref"][idx]
npu_op, w_zero_point = tir_to_cs_translator.translate_ethosu_conv2d(ethosu_conv2d_call)
# Compare IFM
assert npu_op.ifm.data_type == ref["ifm"]["data_type"]
assert npu_op.ifm.shape == ref["ifm"]["shape"]
assert npu_op.ifm.tiles.height_0 == ref["ifm"]["tiles"].height_0
assert npu_op.ifm.tiles.height_1 == ref["ifm"]["tiles"].height_1
assert npu_op.ifm.tiles.width_0 == ref["ifm"]["tiles"].width_0
assert npu_op.ifm.quantization == ref["ifm"]["quantization"]
assert npu_op.ifm.layout == ref["ifm"]["layout"]
assert npu_op.ifm.strides == ref["ifm"]["strides"]
# Compare OFM
assert npu_op.ofm.data_type == ref["ofm"]["data_type"]
assert npu_op.ofm.shape == ref["ofm"]["shape"]
assert npu_op.ofm.tiles.height_0 == ref["ofm"]["tiles"].height_0
assert npu_op.ofm.tiles.height_1 == ref["ofm"]["tiles"].height_1
assert npu_op.ofm.tiles.width_0 == ref["ofm"]["tiles"].width_0
assert npu_op.ofm.quantization == ref["ofm"]["quantization"]
assert npu_op.ofm.layout == ref["ofm"]["layout"]
assert npu_op.ofm.strides == ref["ofm"]["strides"]
# Compare kernel and padding
assert npu_op.kernel.__dict__ == ref["kernel"].__dict__
assert npu_op.padding == ref["padding"]
# Compare activation
if ref["activation"]["op"] is None:
assert npu_op.activation is None
else:
assert npu_op.activation.op_type == ref["activation"]["op"]
assert npu_op.activation.min == ref["activation"]["min"]
assert npu_op.activation.max == ref["activation"]["max"]
# Compare rounding mode
assert npu_op.rounding_mode == ref["rounding_mode"]
# Compare ifm upscaling
assert npu_op.ifm_upscale == ref["ifm_upscale"]
# Compare weight quantization parameters
assert w_zero_point == ref["w_zero_point"]
# fmt: off
"""A ethosu_depthwise_conv2d tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuDepthwiseConv2D:
@T.prim_func
def main(placeholder: T.handle, placeholder_1: T.handle, placeholder_2: T.handle, ethosu_depthwise_conv2d: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_1, [18], dtype="int8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_2, [30], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_3 = T.match_buffer(placeholder, [192], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_depthwise_conv2d_1 = T.match_buffer(ethosu_depthwise_conv2d, [126], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 8, 8, 3, 8, 0, 8, placeholder_3[0], 0, 0, 0, T.float32(0.6), 11, "NHWC", 24, 3, 1, "int8", 6, 7, 3, 6, 0, 7, ethosu_depthwise_conv2d_1[0], 0, 0, 0, T.float32(0.26), 15, "NHWC", 21, 3, 1, 2, 3, 1, 1, 1, 1, placeholder_4[0], 18, 13, placeholder_5[0], 30, 0, 0, 0, 0, "CLIP", 15, 105, "TFL", "NONE", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
def test_translate_ethosu_depthwise_conv2d():
def extract_ethosu_depthwise_conv2d_extern_call(mod):
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_depthwise_conv2d_calls = list()
def populate_ethosu_depthwise_conv2d_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_depthwise_conv2d"
):
ethosu_depthwise_conv2d_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_depthwise_conv2d_calls)
return ethosu_depthwise_conv2d_calls[0]
depthwise_conv2d_call = extract_ethosu_depthwise_conv2d_extern_call(SingleEthosuDepthwiseConv2D)
npu_op, w_zero_point = tir_to_cs_translator.translate_ethosu_depthwise_conv2d(
depthwise_conv2d_call
)
assert npu_op.ifm.data_type == vapi.NpuDataType.INT8
assert npu_op.ifm.shape == vapi.NpuShape3D(8, 8, 3)
assert npu_op.ifm.tiles.height_0 == vapi.NpuTileBox(8, 0, 8, [0, 0, 0, 0]).height_0
assert npu_op.ifm.tiles.height_1 == vapi.NpuTileBox(8, 0, 8, [0, 0, 0, 0]).height_1
assert npu_op.ifm.tiles.width_0 == vapi.NpuTileBox(8, 0, 8, [0, 0, 0, 0]).width_0
assert npu_op.ifm.quantization == pytest.approx(vapi.NpuQuantization(0.6, 11))
assert npu_op.ifm.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm.strides == vapi.NpuShape3D(24, 3, 1)
# Compare OFM
assert npu_op.ofm.data_type == vapi.NpuDataType.INT8
assert npu_op.ofm.shape == vapi.NpuShape3D(6, 7, 3)
assert npu_op.ofm.tiles.height_0 == vapi.NpuTileBox(6, 0, 8, [0, 0, 0, 0]).height_0
assert npu_op.ofm.tiles.height_1 == vapi.NpuTileBox(6, 0, 7, [0, 0, 0, 0]).height_1
assert npu_op.ofm.tiles.width_0 == vapi.NpuTileBox(6, 0, 7, [0, 0, 0, 0]).width_0
assert npu_op.ofm.quantization == pytest.approx(vapi.NpuQuantization(0.26, 15))
assert npu_op.ofm.layout == vapi.NpuLayout.NHWC
assert npu_op.ofm.strides == vapi.NpuShape3D(21, 3, 1)
# Compare kernel and padding
assert (
npu_op.kernel.__dict__
== vapi.NpuKernel(w=2, h=3, stride_x=1, stride_y=1, dilation_x=1, dilation_y=1).__dict__
)
assert npu_op.padding == vapi.NpuPadding(top=0, left=0, bottom=0, right=0)
# Compare activation
assert npu_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
assert npu_op.activation.min == 0
assert npu_op.activation.max == pytest.approx(23.4)
# Compare rounding mode
assert npu_op.rounding_mode == vapi.NpuRoundingMode.TFL
# Compare ifm upscaling
assert npu_op.ifm_upscale == vapi.NpuResamplingMode.NONE
# Compare weight quantization parameters
assert w_zero_point == 13
def test_translate_ethosu_copy():
def extract_ethosu_copy_extern_calls(mod):
"""This function will obtain all ethosu_conv2d
calls from a NPU TIR module
Parameters
----------
mod : tvm.IRModule
This is a NPU TIR Module
Returns
-------
list
of tvm.tir.Call objects
that are tir extern calls
for ethosu_conv2d
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_copy_calls = list()
def populate_ethosu_copy_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_copy"
):
ethosu_copy_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_copy_calls)
return ethosu_copy_calls
test_cases = [
{
# Stimulus
"tir_module": MultiEthosUCopy,
"param_dict": {
1: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [8, 1, 1, 32], "uint8"
),
2: np.random.randint(np.iinfo("int32").min, np.iinfo("int32").max, [8], "int32"),
},
# Reference outputs
"ref": [
{
"src": "placeholder_4",
"dest": "placeholder_global",
"length": 256,
},
{
"src": "placeholder_5",
"dest": "placeholder_d_global",
"length": 32,
},
],
},
]
for test_case in test_cases:
ethosu_copy_calls = extract_ethosu_copy_extern_calls(test_case["tir_module"])
for idx, ethosu_copy_call in enumerate(ethosu_copy_calls):
npu_dma_op = tir_to_cs_translator.translate_ethosu_tir_call_extern(ethosu_copy_call)
assert npu_dma_op.src.address.buffer.name == test_case["ref"][idx]["src"]
assert npu_dma_op.dest.address.buffer.name == test_case["ref"][idx]["dest"]
assert npu_dma_op.src.length == test_case["ref"][idx]["length"]
assert npu_dma_op.dest.length == test_case["ref"][idx]["length"]
# fmt: off
@tvm.script.ir_module
class MixedConstantDatatypes:
@T.prim_func
def main(placeholder_4: T.Buffer[(2048,), "int8"], ethosu_write_1: T.Buffer[(16,), "int8"]) -> None:
buffer = T.buffer_decl([1], "uint8")
buffer_1 = T.buffer_decl([1], "uint8")
buffer_2 = T.buffer_decl([1], "int16")
# function attr dict
T.func_attr({"from_legacy_te_schedule": True,
"global_symbol": "main", "tir.noalias": True,
"constants": {buffer.name: buffer,
buffer_1.name: buffer_1,
buffer_2.name: buffer_2}})
# body
placeholder_global = T.decl_buffer([272], "uint8")
placeholder_d_global = T.decl_buffer([160], "uint8")
ethosu_write_2 = T.decl_buffer([16], "int16")
placeholder_d_global_1 = T.decl_buffer([1], "int16")
T.evaluate(T.call_extern("ethosu_copy", buffer_1[0], 272, placeholder_global[0], dtype="uint8"))
T.evaluate(T.call_extern("ethosu_copy", buffer[0], 160, placeholder_d_global[0], dtype="uint8"))
T.evaluate(T.call_extern("ethosu_depthwise_conv2d", "int8", 8, 16, 16, 8, 0, 16, placeholder_4[0], 0, 0, 0, T.float32(0.0039215548895299435), -128, "NHWC", 256, 16, 1, "int16", 1, 1, 16, 1, 0, 1, ethosu_write_2[0], 0, 0, 0, T.float32(0.0023205536417663097), -128, "NHWC", 1, 1, 1, 16, 8, 1, 1, 1, 1, placeholder_global[0], 272, 0, placeholder_d_global[0], 160, 0, 0, 0, 0, "NONE", 0, 0, "TFL", "NONE", 0, 0, 0, dtype="int16"))
T.evaluate(T.call_extern("ethosu_copy", buffer_2[0], 1, placeholder_d_global_1[0], dtype="int16"))
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int16", 1, 1, 16, 1, 0, 1, ethosu_write_2[0], 0, 0, 0, T.float32(0.0023205536417663097), -128, "NHWC", 1, 1, 1, "int16", 1, 1, 1, 1, 0, 1, placeholder_d_global_1[0], 0, 0, 0, T.float32(0.0078125018482064768), 0, "NHWC", 1, 1, 1, "int8", 1, 1, 16, 1, 0, 1, ethosu_write_1[0], 0, 0, 0, T.float32(0.0023205536417663097), -128, "NHWC", 1, 1, 1, "MUL", 0, "NONE", 0, 0, "NATURAL", 0, 0, 0, dtype="int8"))
# fmt: on
def test_assign_addresses():
test_cases = [
{
# Stimulus
"tir_module": WeightStreamOnly,
"param_dict": {
WeightStreamOnly["main"].attrs["constants"]["buffer"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [128], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_1"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_2"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [112], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_3"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_4"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [112], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_5"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_6"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [112], "uint8"
),
WeightStreamOnly["main"].attrs["constants"]["buffer_7"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
},
},
{
# Stimulus
"tir_module": MixedRead,
"param_dict": {
MixedRead["main"].attrs["constants"]["buffer"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [592], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_1"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [160], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_2"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [80], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_3"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_4"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [80], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_5"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_6"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [80], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_7"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_8"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [80], "uint8"
),
MixedRead["main"].attrs["constants"]["buffer_9"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [32], "uint8"
),
},
},
{
# Stimulus
"tir_module": MixedConstantDatatypes,
"param_dict": {
MixedConstantDatatypes["main"].attrs["constants"]["buffer"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [160], "uint8"
),
MixedConstantDatatypes["main"].attrs["constants"]["buffer_2"]: np.random.randint(
np.iinfo("int16").min, np.iinfo("int16").max, [1], "int16"
),
MixedConstantDatatypes["main"].attrs["constants"]["buffer_1"]: np.random.randint(
np.iinfo("uint8").min, np.iinfo("uint8").max, [272], "uint8"
),
},
},
]
def extract_call_extern_list(mod):
"""This function will obtain all ethosu_conv2d
calls from a NPU TIR module
Parameters
----------
mod : tvm.IRModule
This is a NPU TIR Module
Returns
-------
list
of tvm.tir.Call objects
that are tir extern calls
for ethosu_conv2d
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
extern_calls = list()
def populate_extern_calls(stmt):
if isinstance(stmt, tvm.tir.Call) and stmt.op.name == "tir.call_extern":
extern_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_extern_calls)
return extern_calls
def collect_tir_buffer_info(npu_ops):
"""This is run prior to address assigning to collect tir buffer information
for verification later on"""
_npu_op_tir_buffers = dict()
for npu_op in npu_ops:
if isinstance(npu_op, vapi.NpuDmaOperation):
_npu_op_tir_buffers[npu_op] = (npu_op.src.address, npu_op.dest.address)
elif issubclass(type(npu_op), vapi.NpuBlockOperation):
_npu_op_tir_buffers[npu_op] = (
npu_op.ifm.tiles.addresses[0],
npu_op.ofm.tiles.addresses[0],
npu_op.weights,
npu_op.biases,
)
return _npu_op_tir_buffers
def _check_buffer(address, region, length, buffer_var):
"""Checks whether the buffer information is valid with
original tir buffers.
- If its constant, this will check
the slice in the constant tensor has the values.
- If its scratch, this will check
the slice is within scratch and does not have conflicts
with other scratch tensors.
- If its input/output, this will check the
address is zero
"""
inverse_region_map = {
0: tir_to_cs_translator.BufferType.constant,
1: tir_to_cs_translator.BufferType.scratch,
3: tir_to_cs_translator.BufferType.input,
4: tir_to_cs_translator.BufferType.output,
}
buffer_type = inverse_region_map[region]
buffer_dtype = buffer_var.type_annotation.element_type.dtype
dtype_bytes = np.iinfo(np.dtype(buffer_dtype)).bits // 8
if buffer_type == tir_to_cs_translator.BufferType.constant:
ref = buffer_info[buffer_var].values
hex_from = address * dtype_bytes * 2
hex_to = hex_from + length * dtype_bytes * 2
constant_hex = constant_hex_string[hex_from:hex_to]
constant_tensor = np.frombuffer(bytearray.fromhex(constant_hex), dtype=buffer_dtype)
np.array_equal(constant_tensor, ref)
# Every buffer is adjusted to align to 16 bytes
length = util.round_up(length, 16)
# Mark these constants are read at least once
constant_tensor_read_mask[address : address + length] = np.ones(
length, dtype=buffer_dtype
)
elif buffer_type == tir_to_cs_translator.BufferType.scratch:
assert address < tvmbaw_workspace_size
size_in_bytes = allocate_node_sizes[buffer_var]
# Every buffer is adjusted to align to 16 bytes
size_in_bytes = util.round_up(size_in_bytes, 16)
assert address + size_in_bytes <= tvmbaw_workspace_size
# The scratch area should not be used by any other buffer
assert not tvmbaw_workspace_mask[address : address + size_in_bytes].any()
# The scratch area is marked as used
tvmbaw_workspace_mask[address : address + size_in_bytes] = np.ones(
size_in_bytes, dtype="uint8"
)
elif buffer_type == tir_to_cs_translator.BufferType.input:
assert address == 0
else:
assert buffer_type == tir_to_cs_translator.BufferType.output
assert address == 0
def _get_allocate_node_sizes(mod):
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
_allocate_node_sizes = dict()
def analyze_remaining_allocates(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
pointer_type = allocate.buffer_var.type_annotation
storage_scope = pointer_type.storage_scope
if storage_scope == "global":
dtype_bytes = np.iinfo(np.dtype(allocate.dtype)).bits // 8
size_in_bytes = int(dtype_bytes * np.prod(list(allocate.extents)))
# Every memory address the NPU access have to be 16 byte aligned
size_in_bytes = util.round_up(size_in_bytes, 16)
_allocate_node_sizes[allocate.buffer_var] = size_in_bytes
tvm.tir.stmt_functor.post_order_visit(primfunc.body, analyze_remaining_allocates)
return _allocate_node_sizes
def verify(npu_ops):
"""This wrapper verifies the allocated addresses matches with original tir buffers"""
checked_buffers = set()
def check_buffer(address, region, length, buffer_var):
if buffer_var not in checked_buffers:
_check_buffer(address, region, length, buffer_var)
checked_buffers.add(buffer_var)
for npu_op in npu_ops:
if isinstance(npu_op, vapi.NpuDmaOperation):
src_tir_buffer_var = npu_op_tir_buffers[npu_op][0].buffer.data
check_buffer(
npu_op.src.address, npu_op.src.region, npu_op.src.length, src_tir_buffer_var
)
dest_tir_load = npu_op_tir_buffers[npu_op][1].buffer.data
check_buffer(
npu_op.dest.address,
npu_op.dest.region,
npu_op.dest.length,
dest_tir_load,
)
elif issubclass(type(npu_op), vapi.NpuBlockOperation):
ifm_tir_buffer_var = npu_op_tir_buffers[npu_op][0].buffer.data
ifm_length = (
npu_op.ifm.shape.height * npu_op.ifm.shape.width * npu_op.ifm.shape.depth
)
check_buffer(
npu_op.ifm.tiles.addresses[0],
npu_op.ifm.region,
ifm_length,
ifm_tir_buffer_var,
)
ofm_tir_buffer_var = npu_op_tir_buffers[npu_op][1].buffer.data
ofm_length = (
npu_op.ofm.shape.height * npu_op.ofm.shape.width * npu_op.ofm.shape.depth
)
check_buffer(
npu_op.ofm.tiles.addresses[0],
npu_op.ofm.region,
ofm_length,
ofm_tir_buffer_var,
)
for idx, weight in enumerate(npu_op_tir_buffers[npu_op][2]):
assert isinstance(weight, vapi.NpuAddressRange)
check_buffer(
npu_op.weights[idx].address,
npu_op.weights[idx].region,
npu_op.weights[idx].length,
weight.address.buffer.data,
)
for idx, bias in enumerate(npu_op_tir_buffers[npu_op][3]):
assert isinstance(bias, vapi.NpuAddressRange)
check_buffer(
npu_op.biases[idx].address,
npu_op.biases[idx].region,
npu_op.biases[idx].length,
bias.address.buffer.data,
)
for test_case in test_cases:
tir_mod = test_case["tir_module"]
tir_mod["main"] = tir_mod["main"].with_attr("target", tvm.target.Target("ethos-u"))
tir_mod = tvm.tir.transform.MakeUnpackedAPI()(tir_mod)
candidate_regions_for_scratch = [5, 2, 1]
(
scratch_region_map,
tvmbaw_workspace_size,
_,
) = tir_to_cs_translator.analyze_scratch_memory_acesses(
tir_mod, candidate_regions_for_scratch
)
allocate_node_sizes = _get_allocate_node_sizes(tir_mod)
buffer_info = tir_to_cs_translator.extract_buffer_info(tir_mod, test_case["param_dict"])
extern_calls = extract_call_extern_list(tir_mod)
_npu_ops = list()
for extern_call in extern_calls:
_npu_ops.append(tir_to_cs_translator.translate_ethosu_tir_call_extern(extern_call))
npu_op_tir_buffers = collect_tir_buffer_info(_npu_ops)
(_npu_ops, constant_hex_string) = tir_to_cs_translator.assign_addresses(
buffer_info, _npu_ops, scratch_region_map
)
tvmbaw_workspace_mask = np.zeros(tvmbaw_workspace_size, dtype="uint8")
constant_tensor_read_mask = np.zeros(len(constant_hex_string) // 2, dtype="uint8")
verify(_npu_ops)
# This will be only 1 if all allocated scratch is used.
assert np.prod(tvmbaw_workspace_mask) == 1
# This will be only 1 if all constant tensors is read at least once.
assert np.prod(constant_tensor_read_mask) == 1
# fmt: off
"""A ethosu_pooling tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuPooling:
@T.prim_func
def main(placeholder: T.handle, placeholder_3: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [75], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_pooling", "int8", 5, 9, 3, 5, 0, 9, placeholder_4[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 5, 3, 5, 0, 5, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 15, 3, 1, "AVG", 2, 3, 2, 1, 1, 1, 1, 1, 1, 0, "CLIP", 10, 100, "TFL", "NONE", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
def test_translate_ethosu_pooling():
def extract_ethosu_pooling_extern_call(mod):
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_pooling_calls = list()
def populate_ethosu_pooling_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_pooling"
):
ethosu_pooling_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_pooling_calls)
return ethosu_pooling_calls[0]
pooling_call = extract_ethosu_pooling_extern_call(SingleEthosuPooling)
npu_op = tir_to_cs_translator.translate_ethosu_pooling(pooling_call)
assert npu_op.ifm.data_type == vapi.NpuDataType.INT8
assert npu_op.ifm.shape == vapi.NpuShape3D(5, 9, 3)
assert npu_op.ifm.tiles.height_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_0
assert npu_op.ifm.tiles.height_1 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_1
assert npu_op.ifm.tiles.width_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).width_0
assert npu_op.ifm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ifm.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm.strides == vapi.NpuShape3D(27, 3, 1)
# Compare OFM
assert npu_op.ofm.data_type == vapi.NpuDataType.INT8
assert npu_op.ofm.shape == vapi.NpuShape3D(5, 5, 3)
assert npu_op.ofm.tiles.height_0 == vapi.NpuTileBox(5, 0, 5, [0, 0, 0, 0]).height_0
assert npu_op.ofm.tiles.height_1 == vapi.NpuTileBox(5, 0, 5, [0, 0, 0, 0]).height_1
assert npu_op.ofm.tiles.width_0 == vapi.NpuTileBox(5, 0, 5, [0, 0, 0, 0]).width_0
assert npu_op.ofm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ofm.layout == vapi.NpuLayout.NHWC
assert npu_op.ofm.strides == vapi.NpuShape3D(15, 3, 1)
# Compare pooling_type
assert npu_op.sub_op_type == vapi.NpuPoolingOp.AVERAGE
# Compare kernel and padding
assert (
npu_op.kernel.__dict__
== vapi.NpuKernel(w=2, h=3, stride_x=2, stride_y=1, dilation_x=1, dilation_y=1).__dict__
)
assert npu_op.padding == vapi.NpuPadding(top=1, left=1, bottom=1, right=0)
# Compare activation
assert npu_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
assert npu_op.activation.min == 10
assert npu_op.activation.max == 100
# Compare rounding mode
assert npu_op.rounding_mode == vapi.NpuRoundingMode.TFL
# Compare ifm upscaling
assert npu_op.ifm_upscale == vapi.NpuResamplingMode.NONE
# fmt: off
"""A ethosu_binary_elementwise ADD tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseAdd:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(
placeholder, [270], dtype="int8", elem_offset=0, align=64, offset_factor=1
)
ethosu_write_2 = T.match_buffer(
ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1
)
# body
T.evaluate(T.call_extern( "ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "ADD", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SUB tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseSub:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SUB", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise MUL tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMul:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MUL", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise MIN tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMin:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MIN", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise Max tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMax:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int8", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "MAX", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SHR tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseShr:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int32", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int32", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SHR", 0, "NONE", 0, 0, "TFL", 0, 0, 0, dtype="int32"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SHL tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseShl:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [270], dtype="int32", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [135], dtype="int32", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 5, 9, 3, 5, 0, 9, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, placeholder_2[135], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "int32", 5, 9, 3, 5, 0, 9, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 27, 3, 1, "SHL", 0, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int32"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX", "SHR", "SHL"])
def test_translate_ethosu_binary_elementwise(operator_type):
if operator_type == "SHR" or operator_type == "SHL":
data_type = vapi.NpuDataType.INT32
data_type_bytes = 4
else:
data_type = vapi.NpuDataType.INT8
data_type_bytes = 1
def extract_ethosu_binary_elementwise_call_extern(mod):
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_binary_elementwise_calls = list()
def populate_ethosu_binary_elementwise_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_binary_elementwise"
):
ethosu_binary_elementwise_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_binary_elementwise_calls)
return ethosu_binary_elementwise_calls[0]
if operator_type == "ADD":
binary_elementwise = SingleEthosuBinaryElementwiseAdd
elif operator_type == "SUB":
binary_elementwise = SingleEthosuBinaryElementwiseSub
elif operator_type == "MUL":
binary_elementwise = SingleEthosuBinaryElementwiseMul
elif operator_type == "MIN":
binary_elementwise = SingleEthosuBinaryElementwiseMin
elif operator_type == "MAX":
binary_elementwise = SingleEthosuBinaryElementwiseMax
elif operator_type == "SHR":
binary_elementwise = SingleEthosuBinaryElementwiseShr
elif operator_type == "SHL":
binary_elementwise = SingleEthosuBinaryElementwiseShl
binary_elementwise_call = extract_ethosu_binary_elementwise_call_extern(binary_elementwise)
npu_op = tir_to_cs_translator.translate_ethosu_binary_elementwise(binary_elementwise_call)
# Compare IFM
assert npu_op.ifm.data_type == data_type
assert npu_op.ifm.shape == vapi.NpuShape3D(5, 9, 3)
assert npu_op.ifm.tiles.height_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_0
assert npu_op.ifm.tiles.height_1 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_1
assert npu_op.ifm.tiles.width_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).width_0
assert npu_op.ifm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ifm.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm.strides == vapi.NpuShape3D(
27 * data_type_bytes, 3 * data_type_bytes, 1 * data_type_bytes
)
# Compare IFM2
assert npu_op.ifm2.data_type == data_type
assert npu_op.ifm2.shape == vapi.NpuShape3D(5, 9, 3)
assert npu_op.ifm2.tiles.height_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_0
assert npu_op.ifm2.tiles.height_1 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_1
assert npu_op.ifm2.tiles.width_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).width_0
assert npu_op.ifm2.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ifm2.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm2.strides == vapi.NpuShape3D(
27 * data_type_bytes, 3 * data_type_bytes, 1 * data_type_bytes
)
# Compare OFM
assert npu_op.ofm.data_type == data_type
assert npu_op.ofm.shape == vapi.NpuShape3D(5, 9, 3)
assert npu_op.ofm.tiles.height_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_0
assert npu_op.ofm.tiles.height_1 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).height_1
assert npu_op.ofm.tiles.width_0 == vapi.NpuTileBox(5, 0, 9, [0, 0, 0, 0]).width_0
assert npu_op.ofm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ofm.layout == vapi.NpuLayout.NHWC
assert npu_op.ofm.strides == vapi.NpuShape3D(
27 * data_type_bytes, 3 * data_type_bytes, 1 * data_type_bytes
)
# Compare op type
if operator_type == "ADD":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.ADD
elif operator_type == "SUB":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SUB
elif operator_type == "MUL":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MUL
elif operator_type == "MIN":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MIN
elif operator_type == "MAX":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MAX
elif operator_type == "SHR":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SHR
elif operator_type == "SHL":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SHL
# Compare reversed_operands
assert npu_op.reversed_operands == False
# Compare activation
if operator_type == "SHR":
assert npu_op.activation is None
else:
assert npu_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
assert npu_op.activation.min == 10
assert npu_op.activation.max == 100
# Compare rounding mode
assert npu_op.rounding_mode == vapi.NpuRoundingMode.TFL
# fmt: off
"""A ethosu_binary_elementwise ADD with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseAddBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "ADD", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SUB with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseSubBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SUB", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise MUL with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMulBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MUL", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise MIN with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMinBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MIN", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise MAX with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseMaxBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int8", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int8", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int8", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int8", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "MAX", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int8"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SHR with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseShrBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int32", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int32", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int32", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int32", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SHR", 1, "NONE", 0, 0, "TFL", 0, 0, 0, dtype="int32"))
__tvm_meta__ = None
# fmt: on
# fmt: off
"""A ethosu_binary_elementwise SHL with broadcasting tir testcase for the translator"""
@tvm.script.ir_module
class SingleEthosuBinaryElementwiseShlBroadcasting:
@T.prim_func
def main(placeholder: T.handle, ethosu_write: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [27], dtype="int32", elem_offset=0, align=64, offset_factor=1)
ethosu_write_2 = T.match_buffer(ethosu_write, [24], dtype="int32", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("ethosu_binary_elementwise", "int32", 2, 3, 4, 2, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "int32", 1, 3, 1, 1, 0, 3, placeholder_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 1, 1, 1, "int32", 2, 3, 4, 2, 0, 3, ethosu_write_2[0], 0, 0, 0, T.float32(1.0), 0, "NHWC", 12, 4, 1, "SHL", 1, "CLIP", 10, 100, "TFL", 0, 0, 0, dtype="int32"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX", "SHR", "SHL"])
def test_translate_ethosu_binary_elementwise_broadcasting(operator_type):
if operator_type == "SHR" or operator_type == "SHL":
data_type = vapi.NpuDataType.INT32
data_type_bytes = 4
else:
data_type = vapi.NpuDataType.INT8
data_type_bytes = 1
def extract_ethosu_binary_elementwise_broadcasting_call_extern(mod):
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_binary_elementwise_calls = list()
def populate_ethosu_binary_elementwise_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "tir.call_extern"
and stmt.args[0] == "ethosu_binary_elementwise"
):
ethosu_binary_elementwise_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_binary_elementwise_calls)
return ethosu_binary_elementwise_calls[0]
if operator_type == "ADD":
binary_elementwise = SingleEthosuBinaryElementwiseAddBroadcasting
elif operator_type == "SUB":
binary_elementwise = SingleEthosuBinaryElementwiseSubBroadcasting
elif operator_type == "MUL":
binary_elementwise = SingleEthosuBinaryElementwiseMulBroadcasting
elif operator_type == "MIN":
binary_elementwise = SingleEthosuBinaryElementwiseMinBroadcasting
elif operator_type == "MAX":
binary_elementwise = SingleEthosuBinaryElementwiseMaxBroadcasting
elif operator_type == "SHR":
binary_elementwise = SingleEthosuBinaryElementwiseShrBroadcasting
elif operator_type == "SHL":
binary_elementwise = SingleEthosuBinaryElementwiseShlBroadcasting
binary_elementwise_call = extract_ethosu_binary_elementwise_broadcasting_call_extern(
binary_elementwise
)
npu_op = tir_to_cs_translator.translate_ethosu_binary_elementwise(binary_elementwise_call)
# Compare IFM
assert npu_op.ifm.data_type == data_type
assert npu_op.ifm.shape == vapi.NpuShape3D(2, 3, 4)
assert npu_op.ifm.tiles.height_0 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).height_0
assert npu_op.ifm.tiles.height_1 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).height_1
assert npu_op.ifm.tiles.width_0 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).width_0
assert npu_op.ifm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ifm.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm.strides == vapi.NpuShape3D(
12 * data_type_bytes, 4 * data_type_bytes, 1 * data_type_bytes
)
# Compare IFM2
assert npu_op.ifm2.data_type == data_type
assert npu_op.ifm2.shape == vapi.NpuShape3D(1, 3, 1)
assert npu_op.ifm2.tiles.height_0 == vapi.NpuTileBox(1, 0, 3, [0, 0, 0, 0]).height_0
assert npu_op.ifm2.tiles.height_1 == vapi.NpuTileBox(1, 0, 3, [0, 0, 0, 0]).height_1
assert npu_op.ifm2.tiles.width_0 == vapi.NpuTileBox(1, 0, 3, [0, 0, 0, 0]).width_0
assert npu_op.ifm2.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ifm2.layout == vapi.NpuLayout.NHWC
assert npu_op.ifm2.strides == vapi.NpuShape3D(
1 * data_type_bytes, 1 * data_type_bytes, 1 * data_type_bytes
)
# Compare OFM
assert npu_op.ofm.data_type == data_type
assert npu_op.ofm.shape == vapi.NpuShape3D(2, 3, 4)
assert npu_op.ofm.tiles.height_0 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).height_0
assert npu_op.ofm.tiles.height_1 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).height_1
assert npu_op.ofm.tiles.width_0 == vapi.NpuTileBox(2, 0, 3, [0, 0, 0, 0]).width_0
assert npu_op.ofm.quantization == vapi.NpuQuantization(1.0, 0)
assert npu_op.ofm.layout == vapi.NpuLayout.NHWC
assert npu_op.ofm.strides == vapi.NpuShape3D(
12 * data_type_bytes, 4 * data_type_bytes, 1 * data_type_bytes
)
# Compare op type
if operator_type == "ADD":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.ADD
elif operator_type == "SUB":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SUB
elif operator_type == "MUL":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MUL
elif operator_type == "MIN":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MIN
elif operator_type == "MAX":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.MAX
elif operator_type == "SHR":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SHR
elif operator_type == "SHL":
assert npu_op.sub_op_type == vapi.NpuElementWiseOp.SHL
# Compare reversed_operands
assert npu_op.reversed_operands == True
# Compare activation
if operator_type == "SHR":
assert npu_op.activation is None
else:
assert npu_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
assert npu_op.activation.min == 10
assert npu_op.activation.max == 100
# Compare rounding mode
assert npu_op.rounding_mode == vapi.NpuRoundingMode.TFL
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_type_inference.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import relay, TVMError
from tvm.relay.testing import run_opt_pass
from .infra import make_ethosu_conv2d
from .infra import make_ethosu_depthwise_conv2d
from .infra import make_ethosu_pooling
from .infra import make_ethosu_binary_elementwise
from .infra import make_ethosu_identity
from .infra import make_ethosu_unary_elementwise
@pytest.mark.parametrize(
["ifm_shape", "ifm_layout"], [((1, 56, 72, 55), "NHWC"), ((1, 56, 4, 72, 16), "NHCWB16")]
)
@pytest.mark.parametrize(
"ofm_shape,ofm_layout", [((1, 54, 38, 122), "NHWC"), ((1, 54, 8, 38, 16), "NHCWB16")]
)
def test_ethosu_conv2d_type_inference(
ifm_shape,
ifm_layout,
ofm_shape,
ofm_layout,
):
ifm_channels = 55
ofm_channels = 122
kernel_shape = (3, 2)
padding = (0, 1, 2, 3)
strides = (1, 2)
dilation = (2, 1)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
conv2d = make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
func = relay.Function([ifm], conv2d)
func = run_opt_pass(func, relay.transform.InferType())
assert tuple(func.body.checked_type.shape) == ofm_shape
@pytest.mark.parametrize(
"ifm_dtype,weight_dtype,scale_bias_dtype",
[("float32", "int8", "uint8"), ("int8", "float32", "uint8"), ("int8", "int8", "float32")],
)
def test_ethosu_conv2d_invalid_dtypes(ifm_dtype, weight_dtype, scale_bias_dtype):
ifm_channels = 55
ofm_channels = 122
kernel_shape = (3, 2)
padding = (0, 1, 2, 3)
strides = (1, 2)
dilation = (2, 1)
ifm = relay.var("ifm", shape=(1, 56, 72, 55), dtype=ifm_dtype)
conv2d = make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
weight_dtype=weight_dtype,
scale_bias_dtype=scale_bias_dtype,
)
func = relay.Function([ifm], conv2d)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_conv2d_invalid_upscale_method():
invalid_upscale_method = "FOO"
ifm_channels = 55
ofm_channels = 122
kernel_shape = (3, 2)
padding = (0, 1, 2, 3)
strides = (1, 2)
dilation = (2, 1)
ifm = relay.var("ifm", shape=(1, 56, 72, 55), dtype="int8")
conv2d = make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
weight_dtype="int8",
scale_bias_dtype="uint8",
upscale=invalid_upscale_method,
)
func = relay.Function([ifm], conv2d)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize(
"ifm_shape, ifm_layout", [((1, 46, 71, 55), "NHWC"), ((1, 46, 4, 71, 16), "NHCWB16")]
)
@pytest.mark.parametrize(
"ofm_shape, ofm_layout", [((1, 44, 37, 55), "NHWC"), ((1, 44, 4, 37, 16), "NHCWB16")]
)
def test_ethosu_depthwise_conv2d_type_inference(
ifm_shape,
ifm_layout,
ofm_shape,
ofm_layout,
):
channels = 55
kernel_shape = (3, 2)
padding = (0, 1, 2, 3)
strides = (1, 2)
dilation = (2, 1)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
depthwise_conv2d = make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
func = relay.Function([ifm], depthwise_conv2d)
func = run_opt_pass(func, relay.transform.InferType())
assert tuple(func.body.checked_type.shape) == ofm_shape
@pytest.mark.parametrize(
"ifm_dtype,weight_dtype,scale_bias_dtype",
[("float32", "int8", "uint8"), ("int8", "float32", "uint8"), ("int8", "int8", "float32")],
)
def test_ethosu_depthwise_conv2d_invalid_dtypes(ifm_dtype, weight_dtype, scale_bias_dtype):
channels = 55
kernel_shape = (3, 2)
padding = (0, 1, 2, 3)
strides = (1, 2)
dilation = (2, 1)
dilation = (2, 1)
ifm = relay.var("ifm", shape=(1, 56, 72, 55), dtype=ifm_dtype)
depthwise_conv2d = make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
weight_dtype=weight_dtype,
scale_bias_dtype=scale_bias_dtype,
)
func = relay.Function([ifm], depthwise_conv2d)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize(
"ifm_shape, ifm_layout", [((1, 56, 72, 55), "NHWC"), ((1, 56, 4, 72, 16), "NHCWB16")]
)
@pytest.mark.parametrize(
"ofm_shape, ofm_layout", [((1, 56, 38, 55), "NHWC"), ((1, 56, 4, 38, 16), "NHCWB16")]
)
def test_ethosu_pooling_type_inference(
ifm_shape,
ifm_layout,
ofm_shape,
ofm_layout,
):
dtype = "int8"
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
pooling_type = "AVG"
pool_shape = (3, 2)
ofm_channels = 55
strides = (1, 2)
padding = (0, 1, 2, 3)
pooling = make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
func = relay.Function([ifm], pooling)
func = run_opt_pass(func, relay.transform.InferType())
assert tuple(func.body.checked_type.shape) == ofm_shape
assert func.body.checked_type.dtype == dtype
def test_ethosu_pooling_invalid_pooling_type():
invalid_pooling_type = "A"
dtype = "int8"
ifm = relay.var("ifm", shape=[1, 56, 72, 55], dtype=dtype)
pool_shape = (3, 2)
ofm_channels = 55
strides = (1, 2)
padding = (0, 1, 2, 3)
pooling = make_ethosu_pooling(
ifm,
invalid_pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
)
func = relay.Function([ifm], pooling)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_pooling_invalid_dtype():
invalid_dtype = "int32"
ifm = relay.var("ifm", shape=[1, 56, 72, 55], dtype=invalid_dtype)
pooling_type = "MAX"
pool_shape = (3, 2)
ofm_channels = 55
strides = (1, 2)
padding = (0, 1, 2, 3)
pooling = make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
)
func = relay.Function([ifm], pooling)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_pooling_invalid_upscale_method():
invalid_upscale_method = "FOO"
ifm = relay.var("ifm", shape=[1, 56, 72, 55], dtype="int8")
pooling = make_ethosu_pooling(
ifm,
"MAX",
(3, 2),
55,
(1, 2),
(0, 1, 2, 3),
upscale=invalid_upscale_method,
)
func = relay.Function([ifm], pooling)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize(
"ifm_shape, ifm_layout", [((1, 4, 5, 33), "NHWC"), ((1, 4, 3, 5, 16), "NHCWB16")]
)
@pytest.mark.parametrize(
"ofm_shape, ofm_layout", [((1, 4, 5, 33), "NHWC"), ((1, 4, 3, 5, 16), "NHCWB16")]
)
def test_ethosu_binary_elementwise_type_inference(
ifm_shape,
ifm_layout,
ofm_shape,
ofm_layout,
):
dtype = "int8"
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=dtype)
operator_type = "ADD"
ifm_channels, ifm2_channels = 33, 33
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
dtype,
ifm_layout=ifm_layout,
ifm2_layout=ifm_layout,
ofm_layout=ofm_layout,
)
func = relay.Function([ifm, ifm2], binary_elementwise)
func = run_opt_pass(func, relay.transform.InferType())
assert tuple(func.body.checked_type.shape) == ofm_shape
assert func.body.checked_type.dtype == dtype
def test_ethosu_binary_elementwise_invalid_operator_type():
invalid_operator_type = "A"
ifm_shape = [1, 4, 5, 33]
dtype = "int8"
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=dtype)
ifm_channels, ifm2_channels = 33, 33
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
invalid_operator_type,
dtype,
)
func = relay.Function([ifm, ifm2], binary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_binary_elementwise_invalid_data_types():
dtype = "int8"
dtype2 = "int32"
operator_type = "ADD"
ifm_shape = [1, 4, 5, 33]
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=dtype2)
ifm_channels, ifm2_channels = 33, 33
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
dtype,
)
func = relay.Function([ifm, ifm2], binary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize("operator_type", ["MIN", "MAX"])
def test_ethosu_binary_elementwise_min_max_invalid_data_type(operator_type):
invalid_dtype = "int32"
ifm_shape = [1, 4, 5, 33]
ifm = relay.var("ifm", shape=ifm_shape, dtype=invalid_dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=invalid_dtype)
ifm_channels, ifm2_channels = 33, 33
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
invalid_dtype,
)
func = relay.Function([ifm, ifm2], binary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize("invalid_dtype", ["int8", "uint8"])
@pytest.mark.parametrize("operator_type", ["RHS", "SHR"])
def test_ethosu_binary_elementwise_shift_invalid_data_type(invalid_dtype, operator_type):
ifm_shape = [1, 4, 5, 33]
ifm = relay.var("ifm", shape=ifm_shape, dtype=invalid_dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=invalid_dtype)
ifm_channels, ifm2_channels = 33, 33
binary_elementwise = make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
invalid_dtype,
)
func = relay.Function([ifm, ifm2], binary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize("shape", [(1, 56, 72, 55), (241, 7, 755), (28, 44), (5003,)])
def test_ethosu_identity_type_inference(shape):
dtype = "int8"
ifm = relay.var("ifm", shape=shape, dtype=dtype)
identity = make_ethosu_identity(ifm)
func = relay.Function([ifm], identity)
func = run_opt_pass(func, relay.transform.InferType())
assert tuple(func.body.checked_type.shape) == shape
assert func.body.checked_type.dtype == dtype
def test_ethosu_identity_invalid_shape():
invalid_shape = [1, 2, 3, 4, 5]
dtype = "int8"
ifm = relay.var("ifm", shape=invalid_shape, dtype=dtype)
identity = make_ethosu_identity(ifm)
func = relay.Function([ifm], identity)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_identity_invalid_dtype():
invalid_dtype = "int32"
ifm = relay.var("ifm", shape=[6000], dtype=invalid_dtype)
identity = make_ethosu_identity(ifm)
func = relay.Function([ifm], identity)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
@pytest.mark.parametrize(
"ifm_shape, ifm_layout", [((1, 4, 5, 33), "NHWC"), ((1, 4, 3, 5, 16), "NHCWB16")]
)
@pytest.mark.parametrize(
"ofm_shape, ofm_layout", [((1, 4, 5, 33), "NHWC"), ((1, 4, 3, 5, 16), "NHCWB16")]
)
@pytest.mark.parametrize("operator_type, data_type", [("ABS", "int8"), ("CLZ", "int32")])
def test_ethosu_unary_elementwise_type_inference(
ifm_shape,
ifm_layout,
ofm_shape,
ofm_layout,
operator_type,
data_type,
):
ifm = relay.var("ifm", shape=ifm_shape, dtype=data_type)
ofm_channels = 33
unary_elementwise = make_ethosu_unary_elementwise(
ifm,
ofm_channels,
operator_type,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
f = relay.Function([ifm], unary_elementwise)
f = run_opt_pass(f, relay.transform.InferType())
assert tuple(f.body.checked_type.shape) == ofm_shape
def test_ethosu_unary_elementwise_invalid_operator_type():
ifm = relay.var("ifm", shape=(1, 3, 7, 12), dtype="int8")
invalid_op_type = "ABBBS"
unary_elementwise = make_ethosu_unary_elementwise(
ifm,
12,
invalid_op_type,
)
func = relay.Function([ifm], unary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
def test_ethosu_unary_elementwise_invalid_dtype():
invalid_dtype = "int32"
ifm = relay.var("ifm", shape=(1, 5, 15, 25), dtype=invalid_dtype)
unary_elementwise = make_ethosu_unary_elementwise(
ifm,
25,
"ABS",
)
func = relay.Function([ifm], unary_elementwise)
with pytest.raises(TVMError):
run_opt_pass(func, relay.transform.InferType())
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_vela_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from ethosu.vela import api as vapi
from unittest.mock import patch
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.relay.backend.contrib.ethosu import vela_api
import tvm.relay.backend.contrib.ethosu.tir_to_cs_translator as tirtocs
ACCEL_TYPES = [
vapi.NpuAccelerator.Ethos_U55_256,
vapi.NpuAccelerator.Ethos_U55_128,
vapi.NpuAccelerator.Ethos_U55_64,
vapi.NpuAccelerator.Ethos_U55_32,
]
"""Test case 1"""
@tvm.script.ir_module
class Module1:
@T.prim_func
def main(
placeholder: T.handle,
placeholder_1: T.handle,
placeholder_2: T.handle,
ethosu_conv2d: T.handle,
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_3 = T.match_buffer(
placeholder, [192], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
placeholder_4 = T.match_buffer(
placeholder_1, [48], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
placeholder_5 = T.match_buffer(
placeholder_2, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1
)
ethosu_conv2d_1 = T.match_buffer(
ethosu_conv2d, [1024], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
# body
T.evaluate(
T.call_extern(
"ethosu_conv2d",
"uint8",
8,
8,
3,
8,
0,
8,
placeholder_3[0],
0,
0,
0,
T.float32(0.5),
10,
"NHWC",
24,
3,
1,
"uint8",
8,
8,
16,
8,
0,
8,
ethosu_conv2d_1[0],
0,
0,
0,
T.float32(0.25),
14,
"NHWC",
128,
16,
1,
1,
1,
1,
1,
1,
1,
placeholder_4[0],
0,
12,
placeholder_5[0],
0,
0,
0,
0,
0,
"CLIP",
0,
0,
"TFL",
"NONE",
dtype="uint8",
)
)
__tvm_meta__ = None
"""Test case 2 with per-channel quantization"""
@tvm.script.ir_module
class Module2:
@T.prim_func
def main(
placeholder: T.handle,
placeholder_1: T.handle,
placeholder_2: T.handle,
placeholder_6: T.handle,
ethosu_conv2d: T.handle,
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_3 = T.match_buffer(
placeholder, [192], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
placeholder_4 = T.match_buffer(
placeholder_1, [48], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
placeholder_5 = T.match_buffer(
placeholder_2, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1
)
# Per-channel weight scales
placeholder_7 = T.match_buffer(
placeholder_6, [16], dtype="float32", elem_offset=0, align=64, offset_factor=1
)
ethosu_conv2d_1 = T.match_buffer(
ethosu_conv2d, [1024], dtype="uint8", elem_offset=0, align=64, offset_factor=1
)
# body
T.evaluate(
T.call_extern(
"ethosu_conv2d",
"uint8",
8,
8,
3,
8,
0,
8,
placeholder_3[0],
0,
0,
0,
T.float32(0.5),
10,
"NHWC",
24,
3,
1,
"uint8",
8,
8,
16,
8,
0,
8,
ethosu_conv2d_1[0],
0,
0,
0,
T.float32(0.25),
14,
"NHWC",
128,
16,
1,
1,
1,
1,
1,
1,
1,
placeholder_4[0],
0,
12,
placeholder_5[0],
0,
0,
0,
0,
0,
"CLIP",
0,
0,
"TFL",
"NONE",
dtype="uint8",
)
)
__tvm_meta__ = None
def test_get_optimal_block_config():
block_configs_cases = [
{
"test": [
vapi.NpuShape3D(10, 20, 8),
vapi.NpuShape3D(10, 30, 16),
vapi.NpuShape3D(10, 40, 32),
],
"ref": vapi.NpuShape3D(10, 40, 32),
},
{
"test": [
vapi.NpuShape3D(10, 20, 8),
vapi.NpuShape3D(10, 50, 32),
vapi.NpuShape3D(10, 40, 32),
],
"ref": vapi.NpuShape3D(10, 50, 32),
},
{
"test": [
vapi.NpuShape3D(50, 50, 8),
vapi.NpuShape3D(10, 30, 32),
vapi.NpuShape3D(8, 8, 64),
],
"ref": vapi.NpuShape3D(8, 8, 64),
},
]
for test_case in block_configs_cases:
assert vela_api._get_optimal_block_config(test_case["test"]) == test_case["ref"]
@pytest.mark.parametrize(
"block_config_str, expected_block_config",
[("4x4x8", vapi.NpuShape3D(4, 4, 8)), ("3x7x16", vapi.NpuShape3D(3, 7, 16))],
)
def test_force_block_config(block_config_str, expected_block_config):
config = {
"dev_force_block_config": block_config_str,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
block_config = vela_api.get_optimal_block_config(None, vapi.NpuAccelerator.Ethos_U55_128)
assert block_config == expected_block_config
def test_compress_weights():
test_vecs = [
{
# Stimulus
"accel": vapi.NpuAccelerator.Ethos_U55_256,
"block_depth": 8,
"ifm_dtype": np.uint8,
"shape": (3, 3, 16, 64),
"layout": "HWIO",
"zero_point": np.int64(134),
"dilation": (1, 1),
"is_depthwise": False,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.PART_KERNEL_FIRST,
},
{
# Stimulus
"accel": vapi.NpuAccelerator.Ethos_U55_256,
"block_depth": 8,
"ifm_dtype": np.uint8,
"shape": (3, 3, 32, 64),
"layout": "HWIO",
"zero_point": np.int64(134),
"dilation": (1, 1),
"is_depthwise": False,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.DEPTH_FIRST,
},
{
# Stimulus
"accel": vapi.NpuAccelerator.Ethos_U55_256,
"block_depth": 8,
"ifm_dtype": np.int16,
"shape": (3, 3, 16, 64),
"layout": "HWIO",
"zero_point": np.int64(134),
"dilation": (1, 1),
"is_depthwise": False,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.DEPTH_FIRST,
},
# Pass-through value check
{
# Stimulus
"accel": vapi.NpuAccelerator.Ethos_U55_128,
"block_depth": 16,
"ifm_dtype": np.uint8,
"shape": (243, 152, 7, 1),
"layout": "HWOI",
"zero_point": np.int64(110),
"dilation": (2, 2),
"is_depthwise": True,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.DEPTH_FIRST,
},
{
# Stimulus
"accel": vapi.NpuAccelerator.Ethos_U55_128,
"block_depth": 32,
"ifm_dtype": np.uint8,
"shape": (64, 67, 35, 8),
"layout": "OHWI",
"zero_point": np.int64(100),
"dilation": (1, 2),
"is_depthwise": False,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.PART_KERNEL_FIRST,
},
]
def verify(test_vec, mock_obj):
layout_transform_indices = {
"HWIO": (3, 0, 1, 2),
"HWOI": (2, 0, 1, 3),
"OHWI": (0, 1, 2, 3),
}
assert mock_obj
mock_obj.assert_called_once()
assert mock_obj.call_args[1]["accelerator"] == test_vec["accel"]
assert mock_obj.call_args[1]["accelerator"] == test_vec["accel"]
ishape = test_vec["shape"]
shape_owhi = (
ishape[layout_transform_indices[test_vec["layout"]][0]],
ishape[layout_transform_indices[test_vec["layout"]][1]],
ishape[layout_transform_indices[test_vec["layout"]][2]],
ishape[layout_transform_indices[test_vec["layout"]][3]],
)
assert mock_obj.call_args[1]["weights_volume"].shape == shape_owhi
assert mock_obj.call_args[1]["dilation_xy"] == test_vec["dilation"]
assert mock_obj.call_args[1]["ifm_bitdepth"] == np.iinfo(test_vec["ifm_dtype"]).bits
assert mock_obj.call_args[1]["ofm_block_depth"] == test_vec["block_depth"]
assert mock_obj.call_args[1]["is_depthwise"] == test_vec["is_depthwise"]
assert mock_obj.call_args[1]["block_traversal"] == test_vec["block_traversal"]
def create_mock(test_vec):
with patch("ethosu.vela.api.npu_encode_weights") as mock_npu_encode_weights:
ifm_bitdepth = np.iinfo(test_vec["ifm_dtype"]).bits
ifm_dtype = test_vec["ifm_dtype"]
max = np.iinfo(ifm_dtype).max
min = np.iinfo(ifm_dtype).min
values = np.random.randint(min, max, test_vec["shape"], ifm_dtype)
vela_api.compress_weights(
weights=values,
weights_zp=test_vec["zero_point"],
weights_layout=test_vec["layout"],
ifm_bitdepth=ifm_bitdepth,
block_depth=test_vec["block_depth"],
dilation=test_vec["dilation"],
accel_config=test_vec["accel"],
is_depthwise=test_vec["is_depthwise"],
)
return mock_npu_encode_weights
for tv in test_vecs:
mock_obj = create_mock(tv)
verify(tv, mock_obj)
def test_pack_biases():
test_vecs = [
{
# Stimulus
"bias_length": 3,
"ifm_scale": np.single(1.11111111),
"ifm_dtype": np.uint8,
"weight_scales": np.array(
[np.single(0.91111111), np.single(1.01111111), np.single(1.11111111)]
),
"ofm_scale": np.single(1.2),
"is_activation_tanh_or_sigmoid": False,
# Reference outputs
"hw_scales": (1811663288, 2010504240, 1104672703),
"hw_shifts": (31, 31, 30),
},
{
# Stimulus
"bias_length": 3,
"ifm_scale": np.single(1.11111111),
"ifm_dtype": np.int8,
"weight_scales": np.array(
[np.single(0.91111111), np.single(1.01111111), np.single(1.11111111)]
),
"ofm_scale": np.single(1.2),
"is_activation_tanh_or_sigmoid": False,
# Reference outputs
"hw_scales": (1811663185, 2010504312, 1104672720),
"hw_shifts": (31, 31, 30),
},
{
# Stimulus
"bias_length": 3,
"ifm_scale": np.single(1.11111111),
"ifm_dtype": np.int16,
"weight_scales": np.array(
[np.single(0.91111111), np.single(1.01111111), np.single(1.11111111)]
),
"ofm_scale": np.single(1.2),
"is_activation_tanh_or_sigmoid": False,
# Reference outputs
"hw_scales": (27644, 30678, 16856),
"hw_shifts": (15, 15, 14),
},
]
def verify(test_vec, mock_obj, packed_biases):
assert mock_obj
for idx, val in enumerate(test_vec["bias_values"]):
assert val == mock_obj.call_args_list[idx][0][0]
assert test_vec["hw_scales"][idx] == mock_obj.call_args_list[idx][0][1]
assert test_vec["hw_shifts"][idx] == mock_obj.call_args_list[idx][0][2]
def create_mock(test_vec):
with patch("ethosu.vela.api.npu_encode_bias") as mock_npu_encode_bias:
mock_npu_encode_bias.return_value = bytearray(10)
ifm_dtype = test_vec["ifm_dtype"]
max = np.iinfo(ifm_dtype).max
min = np.iinfo(ifm_dtype).min
# tvm will always create biases in int32
biases = np.random.randint(min, max, test_vec["bias_length"], np.int32)
packed_biases = vela_api.pack_biases(
biases=biases,
ifm_scale=test_vec["ifm_scale"],
ifm_dtype=test_vec["ifm_dtype"],
weight_scales=test_vec["weight_scales"],
ofm_scale=test_vec["ofm_scale"],
is_activation_tanh_or_sigmoid=test_vec["is_activation_tanh_or_sigmoid"],
)
test_vec["bias_values"] = biases
return mock_npu_encode_bias, packed_biases
return None
for _test_vec in test_vecs:
mock_obj, packed_biases = create_mock(_test_vec)
verify(_test_vec, mock_obj, packed_biases)
def extract_ethosu_conv2d_extern_calls(mod):
"""This function will obtain all ethosu_conv2d
calls from a NPU TIR module
Parameters
----------
mod : tvm.IRModule
This is a NPU TIR Module
Returns
-------
list
List of tvm.tir.Call objects
that are tir extern calls
for ethosu_conv2d
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
ethosu_conv2d_calls = list()
def populate_ethosu_conv2d_calls(stmt):
if (
isinstance(stmt, tvm.tir.Call)
and stmt.op.name == "T.call_extern"
and stmt.args[0] == "ethosu_conv2d"
):
ethosu_conv2d_calls.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_ethosu_conv2d_calls)
return ethosu_conv2d_calls
@pytest.mark.parametrize(
"accel",
ACCEL_TYPES,
)
def test_encode_weights(accel):
test_vecs = [
{
# Stimulus
"tir_module": Module1,
"param_dict": {
1: np.random.randint(np.iinfo("uint8").min, np.iinfo("uint8").max, [48], "uint8"),
2: np.random.randint(np.iinfo("int32").min, np.iinfo("int32").max, [16], "int32"),
},
"accel_type": accel,
# Reference outputs
"block_traversal": vapi.NpuBlockTraversal.PART_KERNEL_FIRST,
},
]
def create_mock(test_vec):
with patch("ethosu.vela.api.npu_encode_weights") as mock_enc_w:
with patch("ethosu.vela.api.npu_find_block_configs") as mock_blk_cfg:
mock_blk_cfg.return_value = [vapi.NpuShape3D(8, 8, 8)]
ethosu_conv2d_calls = extract_ethosu_conv2d_extern_calls(test_vec["tir_module"])
buffer_info = tirtocs.extract_buffer_info(
test_vec["tir_module"], test_vec["param_dict"]
)
for ethosu_conv2d_call in ethosu_conv2d_calls:
npu_op, _ = tirtocs.translate_ethosu_conv2d(ethosu_conv2d_call)
weights = buffer_info[npu_op.weights[0].address.buffer_var][0]
vela_api.encode_weights(ethosu_conv2d_call, weights, accel)
return mock_enc_w
def verify(test_vec, mock_enc_w):
ethosu_conv2d_calls = extract_ethosu_conv2d_extern_calls(test_vec["tir_module"])
buffer_info = tirtocs.extract_buffer_info(test_vec["tir_module"], test_vec["param_dict"])
for ethosu_conv2d_call in ethosu_conv2d_calls:
npu_op, w_zero_point = tirtocs.translate_ethosu_conv2d(ethosu_conv2d_call)
weights = buffer_info[npu_op.weights[0].address.buffer_var][0]
assert mock_enc_w.call_args[1]["accelerator"] == accel
assert (
mock_enc_w.call_args[1]["weights_volume"].flatten()
== weights.astype(np.int64) - w_zero_point
).all()
assert mock_enc_w.call_args[1]["dilation_xy"] == (
npu_op.kernel.dilation_x,
npu_op.kernel.dilation_y,
)
assert mock_enc_w.call_args[1]["dilation_xy"] == (
npu_op.kernel.dilation_x,
npu_op.kernel.dilation_y,
)
assert mock_enc_w.call_args[1]["ifm_bitdepth"] == npu_op.ifm.data_type.size_in_bits()
assert mock_enc_w.call_args[1]["block_traversal"] == test_vec["block_traversal"]
for _test_vec in test_vecs:
_mock_enc_w = create_mock(_test_vec)
verify(_test_vec, _mock_enc_w)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_gemm_acc16.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
from tvm import te
import numpy as np
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16
def benchmark_fc_int8_acc16():
m = 128
n = 128
k = 128
X = te.placeholder((m, k), name="X", dtype="uint8")
W = te.placeholder((n, k), name="W", dtype="int8")
peak = 512 / 16 * 2 * 2 * 2
gops_per_mm = 2 * n * m * k
print("Peak {} Gops/s \n".format(peak))
def verify(target="llvm -mcpu=skylake-avx512"):
if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
dev = tvm.device(target, 0)
X = te.placeholder((m, k), name="X", dtype="uint8")
W = te.placeholder((n, k), name="W", dtype="int8")
pc = dot_16x1x16_uint8_int8_int16()
ak = te.reduce_axis((0, k), name="k")
packedW = te.placeholder((n // 128, 128 * (k // 2), 2), name="packedW", dtype="int8")
t_fc = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int16")
* packedW[j // 128, (ak // 2) * 128 + j % 128, ak % 2].astype("int16"),
axis=ak,
),
name="F",
)
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
(a_k,) = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=128)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=2)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=128)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=32)
t_sch[t_fc].reorder(a_yo, a_xo, a_koo, a_xi, a_koi, a_yi, a_ki)
t_sch[t_fc].tensorize(a_yi, pc)
# print(tvm.lower(t_sch, [X, packedW, t_fc], simple_mode=True))
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, dev, number=10)
# generate the plain data
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(n // 128, 128 * (k // 2), 2)).astype("int8")
# This occurs in pre_compute stage
for r_idx in range(n // 128):
for s_idx in range(128 * (k // 2)):
for t_idx in range(2):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 128 + s_idx % 128][
s_idx // 128 * 2 + t_idx
]
x = tvm.nd.array(a_, dev)
w = tvm.nd.array(packW, dev)
y = tvm.nd.array(np.zeros((m, n), dtype="int16"), dev)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
tvm.testing.assert_allclose(y.numpy(), np.dot(a_, b_.T), rtol=1e-5)
print(
"Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}.".format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak
)
)
# t_func.export_library("gemm_tensorize.o")
verify()
if __name__ == "__main__":
benchmark_fc_int8_acc16()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_gemm_acc32_vnni.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
import pytest
@tvm.testing.requires_llvm
@pytest.mark.skip("skip because feature not enabled")
def test_fc_int8_acc32():
m = 1024
n = 1024
k = 1024
X = te.placeholder((m, k), name="X", dtype="uint8")
W = te.placeholder((n, k), name="W", dtype="int8")
peak = 280
print("Peak {} Gops/s".format(peak))
memory_ops = m * k + n * k + 2 * m * n
gops_per_mm = 2 * m * n * k
# For LLVM < 8.0, it shows "'cascadelake' is not a recognized processor for this target
# (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the
# test, we should use cascadelake setting.
def verify(target="llvm -mcpu=cascadelake"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
dev = tvm.device(target, 0)
pc = dot_16x1x16_uint8_int8_int32_cascadelake()
ak = te.reduce_axis((0, k), name="k")
packedW = te.placeholder((n // 16, 16 * (k // 4), 4), name="packedW", dtype="int8")
t_fc = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packedW[
tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4) * 16 + j % 16, ak % 4
].astype("int32"),
axis=ak,
),
name="F",
)
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
(a_k,) = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)
t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)
t_sch[t_fc].unroll(a_koi)
t_sch[t_fc].tensorize(a_yi, pc)
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, dev, number=10)
# generate the plain data
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(n // 16, 16 * (k // 4), 4)).astype("int8")
# This occurs in pre_compute stage
for r_idx in range(n // 16):
for s_idx in range(16 * (k // 4)):
for t_idx in range(4):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx % 16][
(s_idx // 16) * 4 + t_idx
]
x = tvm.nd.array(a_, dev)
w = tvm.nd.array(packW, dev)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), dev)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
# verify the correctness
tvm.testing.assert_allclose(y.numpy(), np.dot(a_, b_.T), rtol=0)
print(
"Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}".format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak
)
)
t_func.export_library("tensorize_acc32.o")
verify()
if __name__ == "__main__":
# The test requires Cascade Lake and newer Intel machines to generate the
# correct AVX512 VNNI instruction. So, disabling the test.
# test_fc_int8_acc32()
pass
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Testing infrastructure for Hexagon """
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/benchmark_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Utility functions used for benchmarks """
import csv
import os
import tempfile
import pytest
from tvm.contrib.hexagon.tools import HEXAGON_SIMULATOR_NAME
def skip_benchmarks_flag_and_reason():
"""
Returns one of these tuples:
(False, '') or
(True, (a string describing why the test should be skipped))
NOTE: This function is a temporary measure to prevent the TVM CI system
running benchmark scripts every time the CI pre-commit hook executes.
This should go away when a better system is in place to govern when various
tests / benchmarks are executed.
"""
asn = os.environ.get("ANDROID_SERIAL_NUMBER")
if asn == HEXAGON_SIMULATOR_NAME:
return (True, "Skipping benchmarks when ANDROID_SERIAL_NUMBER='simluator'")
return (False, "")
class UnsupportedException(Exception):
"""
Indicates that the specified benchmarking configuration is known to
currently be unsupported. The Exception message may provide more detail.
"""
class NumericalAccuracyException(Exception):
"""
Indicates that the benchmarking configuration appeared to run successfully,
but the output data didn't have the expected accuracy.
"""
class BenchmarksTable:
"""
Stores/reports the result of benchmark runs.
Each line item has a status: success, fail, or skip.
Each 'success' line item must include benchmark data,
in the form provided by TVM's `time_evaluator` mechanism.
Each line item may also specify values for any subset of
the columns provided to the table's construstor.
"""
BUILTIN_COLUMN_NAMES = set(
[
"row_status",
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_mean_usecs",
"timings_stddev_usecs",
]
)
def __init__(self):
self._line_items = []
def validate_user_supplied_kwargs(self, kwarg_dict):
name_conflicts = set(kwarg_dict).intersection(self.BUILTIN_COLUMN_NAMES)
if name_conflicts:
name_list = ", ".join(name_conflicts)
raise Exception(f"Attempting to supply values for built-in column names: {name_list}")
def record_success(self, timings, **kwargs):
"""
`timings` : Assumed to have the structure and meaning of
the timing results provided by TVM's `time_evaluator`
mechanism.
`kwargs` : Optional values for any of the other columns
defined for this benchmark table.
"""
self.validate_user_supplied_kwargs(kwargs)
line_item = kwargs
line_item["row_status"] = "SUCCESS"
line_item["timings_min_usecs"] = timings.min * 1000000
line_item["timings_max_usecs"] = timings.max * 1000000
line_item["timings_median_usecs"] = timings.median * 1000000
line_item["timings_stddev_usecs"] = timings.std * 1000000
line_item["timings_mean_usecs"] = timings.mean * 1000000
self._line_items.append(line_item)
def record_skip(self, **kwargs):
self.validate_user_supplied_kwargs(kwargs)
line_item = dict(kwargs)
line_item["row_status"] = "SKIP"
self._line_items.append(line_item)
def record_fail(self, **kwargs):
self.validate_user_supplied_kwargs(kwargs)
line_item = dict(kwargs)
line_item["row_status"] = "FAIL"
self._line_items.append(line_item)
def has_fail(self):
"""
Returns True if the table contains at least one 'fail' line item,
otherwise returns False.
"""
return any(item["row_status"] == "FAIL" for item in self._line_items)
def print_csv(self, f, column_name_order, timing_decimal_places=3):
"""
Print the benchmark results as a csv.
`f` : The output stream.
`column_name_order`: an iterable sequence of column names, indicating the
left-to-right ordering of columns in the CSV output.
The CSV output will contain only those columns that are mentioned in
this list.
`timing_decimal_places`: for the numeric timing values, this is the
number of decimal places to provide in the printed output.
For example, a value of 3 is equivalent to the Python formatting string
`'{:.3f}'`
"""
writer = csv.DictWriter(
f, column_name_order, dialect="excel-tab", restval="", extrasaction="ignore"
)
writer.writeheader()
for line_item_dict in self._line_items:
# Use a copy of the line-item dictionary, because we might do some modifications
# for the sake of rendering...
csv_line_dict = dict(line_item_dict)
for col_name in [
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_stddev_usecs",
"timings_mean_usecs",
]:
if col_name in csv_line_dict:
old_value = csv_line_dict[col_name]
assert isinstance(old_value, float), (
f"Formatting code assumes that column {col_name} is"
f" some col_nameind of float, but its actual type is {type(old_value)}"
)
str_value = f"{old_value:>0.{timing_decimal_places}f}"
csv_line_dict[col_name] = str_value
writer.writerow(csv_line_dict)
def get_benchmark_id(keys_dict):
"""
Given a dictionary with the distinguishing characteristics of a particular benchmark
line item, compute a string that uniquely identifies the benchmark.
The returned string:
- is a valid directory name on the host's file systems, and
- should be easy for humans to parse
Note that the insertion order for `keys_dict` affects the computed name.
"""
# Creat a copy, because we might be modifying it.
keys_dict_copy = dict(keys_dict)
# Sniff for shape-like lists, because we want them in a form that's both
# readable and filesystem-friendly...
for k, v in keys_dict_copy.items():
if isinstance(v, (list, tuple)):
v_str = "_".join([str(x) for x in v])
keys_dict_copy[k] = v_str
return "-".join([f"{k}:{v}" for k, v in keys_dict_copy.items()])
def get_benchmark_decription(keys_dict):
"""
Similar to `get_benchmark_id`, but the focus is on human-readability.
The returned string contains no line-breaks, but may contain spaces and
other characters that make it unsuitable for use as a filename.
"""
return " ".join([f"{k}={v}" for k, v in keys_dict.items()])
@pytest.fixture(scope="class")
def benchmark_group(request):
"""This fixture provides some initialization / finalization logic for groups of related
benchmark runs.
See the fixture implementation below for details.
The fixture's mechanics are described here: https://stackoverflow.com/a/63047695
TODO: There may be cleaner ways to let each class that uses this fixture provide its
own value for `csv_column_order`.
TODO: In the future we may wish to break this fixture up in to several smaller ones.
The overall contract for a class (e.g. `MyTest`) using this fixture is as follows:
https://stackoverflow.com/a/63047695
@pytest.mark.usefixtures("benchmark_group")
class MyTest:
# The fixture requires that this class variable is defined before
# the fixture's finalizer-logic executes.
#
# This is used as an argument to BenchmarkTable.print_csv(...) after
# all of MyTest's unit tests have executed.
csv_column_order = [
...
]
# Before the MyTest's first unit test executes, the fixture will populate the
# following class variables:
MyTest.working_dir : str
MyTest.benchmark_table : BenchmarkTable"""
working_dir = tempfile.mkdtemp()
table = BenchmarksTable()
request.cls.working_dir = working_dir
request.cls.benchmark_table = table
yield
tabular_output_filename = os.path.join(working_dir, "benchmark-results.csv")
if not hasattr(request.cls, "csv_column_order"):
raise Exception('Classes using this fixture must have a member named "csv_column_order"')
with open(tabular_output_filename, "w", encoding="UTF-8") as csv_file:
table.print_csv(csv_file, request.cls.csv_column_order)
print()
print("*" * 80)
print(f"BENCHMARK RESULTS FILE: {tabular_output_filename}")
print("*" * 80)
print()
if table.has_fail() > 0:
pytest.fail("At least one benchmark configuration failed", pytrace=False)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon testing fixtures used to deduce testing argument
values from testing parameters """
# Disabling invalid-name check as the name is expected to be exactly this by pytest
# pylint: disable=invalid-name
pytest_plugins = [
"tvm.contrib.hexagon.pytest_plugin",
]
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/conv2d/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Testing infrastructure for Hexagon/TOPI/Conv2d """
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/conv2d/test_conv2d_blocked.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon contrib tests for blocked conv2d """
import numpy as np
import tvm
import tvm.testing
from tvm import te, topi
from tvm.topi import testing
from ..infrastructure import (
build_and_run,
conv2d_compute,
conv2d_verify,
get_block_shape,
get_packed_filter_shape,
get_packed_shape,
)
def conv2d_nhwc8h8w32c(
shape_input,
pad,
stride,
dilation,
shape_filter,
k_split_factor,
h_split_factor,
dtype,
storage_scope="global",
):
"""
Conv2d wherein the input activation is defined by its
logical NHWC layout. The filter is provided in its physical
packed layout (oihw8i32o4i). The input is padded and then packed
into its physical packed layout (nhwc8h8w32c). The resulting
computation is in the same physical packed layout (nhwc8h8w32c).
"""
# nhwc layout
logical_input = te.placeholder(shape_input, dtype=dtype, name="logical_input")
# oihw8i32o4i layout
filt_packed = te.placeholder(shape_filter, dtype=dtype, name="packed_filter")
block_h, block_w, block_c = get_block_shape()
# Calculate padded input
_, height, width, _ = shape_input
pad_h = (block_h - ((height + pad[1]) % block_h)) % block_h
pad_w = (block_w - ((width + pad[3]) % block_w)) % block_w
padded_input = topi.nn.pad(
logical_input,
[0, pad[0], pad[2], 0],
[0, pad_h, pad_w, 0],
pad_value=0,
name="padded_input",
)
# Calculate packed input
packed_shape = get_packed_shape(padded_input.shape)
packed_input = te.compute(
packed_shape,
lambda n, ho, wo, co, hi, wi, ci: padded_input[
n, ho * block_h + hi, wo * block_w + wi, co * block_c + ci
],
name="packed_input",
)
output_shape, compute = conv2d_compute(packed_input, filt_packed, pad, stride, dilation)
packed_output = te.compute(output_shape, compute, name="packed_output")
s = te.create_schedule(packed_output.op)
# Ensure the padding and array packing is performed inline
s[padded_input].compute_inline()
s[packed_input].compute_inline()
# cache reads and writes
cached_input = s.cache_read(packed_input, storage_scope, [packed_output])
cached_filt = s.cache_read(filt_packed, storage_scope, [packed_output])
cached_output = s.cache_write(packed_output, storage_scope)
# cache write schedule
batch, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[packed_output].op.axis
koo, koi = s[packed_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[packed_output].split(h_outer, factor=h_split_factor)
s[packed_output].reorder(batch, koo, hoo, koi, hoi, w_outer, h_inner, w_inner, k_inner)
s[cached_output].compute_at(s[packed_output], hoo)
# compute schedule
batch, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[cached_output].op.axis
_, _, reduce_c = s[cached_output].op.reduce_axis
rco, rci = s[cached_output].split(reduce_c, factor=block_c)
koo, koi = s[cached_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[cached_output].split(h_outer, factor=h_split_factor)
s[cached_output].reorder(
batch, koo, hoo, koi, hoi, w_outer, rco, h_inner, w_inner, k_inner, rci
)
s[cached_input].compute_at(s[cached_output], hoo)
s[cached_filt].compute_at(s[cached_output], hoo)
binds = {}
if storage_scope and storage_scope != "global":
with tvm.transform.PassContext():
input_buffer = tvm.tir.decl_buffer(
packed_shape, name="Xb", dtype=dtype, scope=storage_scope
)
output_buffer = tvm.tir.decl_buffer(
output_shape, name="Yb", dtype=dtype, scope=storage_scope
)
binds = {logical_input: input_buffer, packed_output: output_buffer}
return (s, [logical_input, filt_packed, packed_output], binds)
class BaseConv2d:
"""Base class for conv2d tests"""
# input
batch = tvm.testing.parameter(1)
in_size = tvm.testing.parameter(64)
in_channel = tvm.testing.parameter(64)
# conv2d
pad = tvm.testing.parameter(0)
stride = tvm.testing.parameter(1)
kernel_size = tvm.testing.parameter(1, 3)
out_channel = tvm.testing.parameter(128)
# schedule params
k_split_factor = tvm.testing.parameter(1, 2)
h_split_factor = tvm.testing.parameter(1, 2)
dtype = tvm.testing.parameter("float32")
class TestConv2dPackedFilter(BaseConv2d):
"""Conv2d packed filter test class"""
@tvm.testing.parametrize_targets("llvm")
@tvm.testing.skip_if_32bit(reason="Test known to be flaky on i386 machines")
def test_conv2d(
self,
batch,
in_size,
in_channel,
pad,
stride,
kernel_size,
out_channel,
k_split_factor,
h_split_factor,
dtype,
target,
):
"""conv2d test"""
# TODO: no support for dilation
dilation = 1
shape_input = [batch, in_size, in_size, in_channel]
shape_filter_oihw = [out_channel, in_channel, kernel_size, kernel_size]
shape_filter_oihw8i32o4i = get_packed_filter_shape(shape_filter_oihw)
inputs = [
np.random.uniform(0, 255, size=shape_input).astype(dtype),
np.random.uniform(0, 255, size=shape_filter_oihw8i32o4i).astype(dtype),
]
np_filter = (
inputs[1]
.transpose(0, 5, 1, 4, 6, 2, 3)
.reshape(shape_filter_oihw)
.transpose(2, 3, 1, 0)
)
ref_output = testing.conv2d_nhwc_python(inputs[0], np_filter, stride, pad)
output = build_and_run(
inputs,
conv2d_nhwc8h8w32c,
target,
target,
shape_input=shape_input,
pad=(pad, pad, pad, pad),
stride=(stride, stride),
dilation=(dilation, dilation),
shape_filter=shape_filter_oihw8i32o4i,
k_split_factor=k_split_factor,
h_split_factor=h_split_factor,
dtype=dtype,
)
conv2d_verify(output, ref_output, dtype)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" back-to-back conv2d Hexagon test for stripe scheduling """
import numpy as np
import tvm
import tvm.testing
from tvm import te, topi
from tvm.topi import testing
from ..infrastructure import (
build_and_run,
conv2d_compute,
conv2d_verify,
get_block_shape,
get_packed_filter_shape,
get_packed_shape,
)
def conv2dconv2d_nhwc8h8w32c(
shape_input,
pad1,
stride1,
dilation1,
shape_filter1,
pad2,
stride2,
dilation2,
shape_filter2,
k_split_factor,
h_split_factor,
dtype,
storage_scope="global",
):
"""
Conv2d -> Conv2d wherein the input activation is defined by its
logical NHWC layout. The filter is provided in its physical
packed layout (oihw8i32o4i). The input is padded and then packed
into its physical packed layout (nhwc8h8w32c). The resulting
computation is in the same physical packed layout (nhwc8h8w32c).
"""
# nhwc layout
logical_input = te.placeholder(shape_input, dtype=dtype, name="logical_input")
# oihw8i32o4i layout
filt_packed1 = te.placeholder(shape_filter1, dtype=dtype, name="packed_filter1")
filt_packed2 = te.placeholder(shape_filter2, dtype=dtype, name="packed_filter2")
block_h, block_w, block_c = get_block_shape()
# Calculate padded input
_, height, width, _ = shape_input
pad_h = (block_h - ((height + pad1[1]) % block_h)) % block_h
pad_w = (block_w - ((width + pad1[3]) % block_w)) % block_w
padded_input = topi.nn.pad(
logical_input,
[0, pad1[0], pad1[2], 0],
[0, pad_h, pad_w, 0],
pad_value=0,
name="padded_input",
)
# Calculate packed input
packed_shape = get_packed_shape(padded_input.shape)
packed_input = te.compute(
packed_shape,
lambda n, ho, wo, co, hi, wi, ci: padded_input[
n, ho * block_h + hi, wo * block_w + wi, co * block_c + ci
],
name="packed_input",
)
output_shape1, compute1 = conv2d_compute(packed_input, filt_packed1, pad1, stride1, dilation1)
temp_output = te.compute(output_shape1, compute1, name="temp_output")
output_shape2, compute2 = conv2d_compute(temp_output, filt_packed2, pad2, stride2, dilation2)
packed_output = te.compute(output_shape2, compute2, name="packed_output")
s = te.create_schedule(packed_output.op)
# Ensure the padding and array packing is performed inline
s[padded_input].compute_inline()
s[packed_input].compute_inline()
# cache reads and writes
packed_input_cached = s.cache_read(packed_input, storage_scope, [temp_output])
filt_packed1_cached = s.cache_read(filt_packed1, storage_scope, [temp_output])
filt_packed2_cached = s.cache_read(filt_packed2, storage_scope, [packed_output])
packed_output_cached = s.cache_write(packed_output, storage_scope)
# conv2d #1 schedule
n, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[temp_output].op.axis
_, _, reduce_channel = s[temp_output].op.reduce_axis
rco, rci = s[temp_output].split(reduce_channel, factor=block_c)
koo, koi = s[temp_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[temp_output].split(h_outer, factor=h_split_factor)
s[temp_output].reorder(n, koo, hoo, koi, hoi, w_outer, rco, h_inner, w_inner, k_inner, rci)
s[packed_input_cached].compute_at(s[temp_output], hoo)
s[filt_packed1_cached].compute_at(s[temp_output], hoo)
# cache write schedule
n, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[packed_output].op.axis
koo, koi = s[packed_output].split(k_outer, factor=k_split_factor)
hoo, hoi = s[packed_output].split(h_outer, factor=h_split_factor)
s[packed_output].reorder(n, koo, hoo, koi, hoi, w_outer, h_inner, w_inner, k_inner)
s[packed_output_cached].compute_at(s[packed_output], hoo)
# conv2d #2 schedule
n, h_outer, w_outer, k_outer, h_inner, w_inner, k_inner = s[packed_output_cached].op.axis
_, _, reduce_channel = s[packed_output_cached].op.reduce_axis
rco, rci = s[packed_output_cached].split(reduce_channel, factor=block_c)
koo, koi = s[packed_output_cached].split(k_outer, factor=k_split_factor)
hoo, hoi = s[packed_output_cached].split(h_outer, factor=h_split_factor)
s[packed_output_cached].reorder(
n, koo, hoo, koi, hoi, w_outer, rco, h_inner, w_inner, k_inner, rci
)
s[temp_output].compute_at(s[packed_output_cached], hoo)
s[filt_packed2_cached].compute_at(s[packed_output_cached], hoo)
binds = {}
if storage_scope and storage_scope != "global":
with tvm.transform.PassContext():
input_buffer = tvm.tir.decl_buffer(
packed_shape, name="Xb", dtype=dtype, scope=storage_scope
)
output_buffer = tvm.tir.decl_buffer(
output_shape2, name="Yb", dtype=dtype, scope=storage_scope
)
binds = {logical_input: input_buffer, packed_output: output_buffer}
return (s, [logical_input, filt_packed1, filt_packed2, packed_output], binds)
class BaseConv2dConv2d:
"""Base class for conv2d-conv2d tests"""
# input
batch = tvm.testing.parameter(1)
in_size = tvm.testing.parameter(64)
in_channel = tvm.testing.parameter(128)
# conv2d #1
pad1 = tvm.testing.parameter(0)
stride1 = tvm.testing.parameter(1)
kernel_size1 = tvm.testing.parameter(1, 3)
out_channel1 = tvm.testing.parameter(128)
# conv2d #2
stride2 = tvm.testing.parameter(1)
kernel_size2 = tvm.testing.parameter(1, 3)
out_channel2 = tvm.testing.parameter(128)
# schedule params
k_split_factor = tvm.testing.parameter(1, 2)
h_split_factor = tvm.testing.parameter(1, 2)
dtype = tvm.testing.parameter("float32")
class TestConv2dConv2dPackedFilter(BaseConv2dConv2d):
"""Conv2d-Conv2d packed filter test class"""
@tvm.testing.parametrize_targets("llvm")
@tvm.testing.skip_if_32bit(reason="Test known to be flaky on i386 machines")
def test_conv2d(
self,
batch,
in_size,
in_channel,
pad1,
stride1,
kernel_size1,
out_channel1,
stride2,
kernel_size2,
out_channel2,
k_split_factor,
h_split_factor,
dtype,
target,
):
"""conv2d-conv2d test"""
# TODO: no support for padding in conv2d #2
pad2 = 0
# TODO: no support for dilation
dilation1 = 1
dilation2 = 1
shape_input = [batch, in_size, in_size, in_channel]
shape_filter1_oihw = [out_channel1, in_channel, kernel_size1, kernel_size1]
shape_filter1_oihw8i32o4i = get_packed_filter_shape(shape_filter1_oihw)
shape_filter2_oihw = [out_channel2, out_channel1, kernel_size2, kernel_size2]
shape_filter2_oihw8i32o4i = get_packed_filter_shape(shape_filter2_oihw)
inputs = [
np.random.uniform(0, 255, size=shape_input).astype(dtype),
np.random.uniform(0, 255, size=shape_filter1_oihw8i32o4i).astype(dtype),
np.random.uniform(0, 255, size=shape_filter2_oihw8i32o4i).astype(dtype),
]
np_filter1 = (
inputs[1]
.transpose(0, 5, 1, 4, 6, 2, 3)
.reshape(shape_filter1_oihw)
.transpose(2, 3, 1, 0)
)
np_filter2 = (
inputs[2]
.transpose(0, 5, 1, 4, 6, 2, 3)
.reshape(shape_filter2_oihw)
.transpose(2, 3, 1, 0)
)
temp_output = testing.conv2d_nhwc_python(inputs[0], np_filter1, stride1, pad1)
ref_output = testing.conv2d_nhwc_python(temp_output, np_filter2, stride2, pad2)
output = build_and_run(
inputs,
conv2dconv2d_nhwc8h8w32c,
target,
target,
shape_input=shape_input,
pad1=(pad1, pad1, pad1, pad1),
stride1=(stride1, stride1),
dilation1=(dilation1, dilation1),
shape_filter1=shape_filter1_oihw8i32o4i,
pad2=(pad2, pad2, pad1, pad1),
stride2=(stride2, stride2),
dilation2=(dilation2, dilation2),
shape_filter2=shape_filter2_oihw8i32o4i,
k_split_factor=k_split_factor,
h_split_factor=h_split_factor,
dtype=dtype,
)
conv2d_verify(output, ref_output, dtype)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Hexagon testing infrastructure """
import numpy
import tvm
from tvm import te
def ceildiv(o, d):
assert o >= 0
assert d >= 0
return tvm.tir.floordiv(o + d - 1, d)
# defines inner block shape: 8h8w32c
def get_block_shape():
return 8, 8, 32
# defines inner filter block shape: 8i32o41
def get_filter_block_shape():
return 8, 32, 4
# input: locgical shape in nhwc layout
# output: physical packed shape in nhw8h8w32c layout
def get_packed_shape(logical_shape_nhwc):
assert len(logical_shape_nhwc) == 4
physical_shape_nhwc8h8w32c = [logical_shape_nhwc[0]]
block_shape = get_block_shape()
off_h, off_w, off_c = block_shape
physical_shape_nhwc8h8w32c.append(ceildiv(logical_shape_nhwc[1], off_h))
physical_shape_nhwc8h8w32c.append(ceildiv(logical_shape_nhwc[2], off_w))
physical_shape_nhwc8h8w32c.append(ceildiv(logical_shape_nhwc[3], off_c))
physical_shape_nhwc8h8w32c.extend(block_shape)
return physical_shape_nhwc8h8w32c
# input: physical packed shape in nhw8h8w32c layout
# output: logical shape in nhwc layout
def get_logical_shape(physical_shape_nhwc8h8w32c):
assert len(physical_shape_nhwc8h8w32c) == 7
logical_shape_nhwc = [physical_shape_nhwc8h8w32c[0]]
logical_shape_nhwc.append(physical_shape_nhwc8h8w32c[1] * physical_shape_nhwc8h8w32c[4])
logical_shape_nhwc.append(physical_shape_nhwc8h8w32c[2] * physical_shape_nhwc8h8w32c[5])
logical_shape_nhwc.append(physical_shape_nhwc8h8w32c[3] * physical_shape_nhwc8h8w32c[6])
return logical_shape_nhwc
def get_packed_filter_shape(logical_shape_oihw):
"""return packed filter shape
Parameters
----------
logical_shape_oihw :
logical shape in oihw layout
Returns
-------
physical_shape_oihw8i32o4i :
physical packed shape in oihw8i3204i layout
"""
assert len(logical_shape_oihw) == 4
filter_block_shape = get_filter_block_shape()
filter_Cio, filter_Ki, filter_Cii = filter_block_shape
filter_Ci = filter_Cio * filter_Cii
physical_shape_oihw8i32o4i = []
physical_shape_oihw8i32o4i.append(int(ceildiv(logical_shape_oihw[0], filter_Ki)))
physical_shape_oihw8i32o4i.append(int(ceildiv(logical_shape_oihw[1], filter_Ci)))
physical_shape_oihw8i32o4i.append(logical_shape_oihw[2])
physical_shape_oihw8i32o4i.append(logical_shape_oihw[3])
physical_shape_oihw8i32o4i.extend(filter_block_shape)
return physical_shape_oihw8i32o4i
def build_and_run(inputs, func, target: str, target_host: str, *args, **kwargs):
"""build and run the function func"""
schedule, placeholders, binds = func(*args, **kwargs)
func = tvm.build(
schedule, placeholders, target=tvm.target.Target(target, host=target_host), binds=binds
)
dev = tvm.device(target)
tensors = []
for tensor in inputs:
tensors.append(tvm.nd.array(tensor, dev))
tensors.append(
tvm.nd.array(
numpy.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype),
dev,
)
)
func(*tensors)
return tensors[-1].asnumpy()
def get_conv2d_nhwc_shape(shape_nhwc, kernel_size, strides, padding, dilation, out_channels):
assert len(shape_nhwc) == 4
kernel = []
kernel.append((kernel_size[0] - 1) * dilation[0] + 1)
kernel.append((kernel_size[1] - 1) * dilation[1] + 1)
return (
shape_nhwc[0],
(shape_nhwc[1] - kernel[0] + padding[0] + padding[1]) // strides[0] + 1,
(shape_nhwc[2] - kernel[1] + padding[2] + padding[3]) // strides[1] + 1,
out_channels,
)
def conv2d_verify(output, ref_output, dtype):
"""transpose and reshape output and compare with ref_output"""
# nhwc8h8w32c -> nhwc
logical_output_shape = get_logical_shape(output.shape)
output = output.transpose(0, 1, 4, 2, 5, 3, 6).reshape(logical_output_shape)
# slice output to match ref_output shape
# e.g. 8x8 spatial 3x3 filter = 6x6 ref output
# but still 8x8 output given the blocked layout
output = output[
0 : ref_output.shape[0] : 1,
0 : ref_output.shape[1] : 1,
0 : ref_output.shape[2] : 1,
0 : ref_output.shape[3] : 1,
]
if "int" in dtype:
tol = {"atol": 0, "rtol": 0}
elif dtype == "float32":
tol = {"rtol": 1e-4, "atol": 2e-4}
tvm.testing.assert_allclose(output, ref_output, **tol)
def conv2d_compute(X, filt, pad, stride, dilation):
"""Define conv2d compute"""
block_shape = get_block_shape()
block_H, block_W, block_C = block_shape
filter_c_io, _, filter_c_ii = get_filter_block_shape()
filter_c_i = filter_c_io * filter_c_ii
shape_filter = filt.shape
kernel_size = tuple(shape_filter[2:4])
out_channels = shape_filter[0] * shape_filter[5]
logical_input_shape = get_logical_shape(X.shape)
logical_output_shape = get_conv2d_nhwc_shape(
logical_input_shape,
kernel_size,
stride,
pad,
dilation,
out_channels,
)
output_shape = get_packed_shape(logical_output_shape)
rh = te.reduce_axis((0, kernel_size[0]), name="rh")
rw = te.reduce_axis((0, kernel_size[1]), name="rw")
rc = te.reduce_axis((0, logical_input_shape[3]), name="rc")
def compute(n, ho, wo, ko, hi, wi, ki):
h = ho * block_H + hi
h_contig = h * stride[0] + rh
h_block_id = h_contig // block_H
h_block_offset = h_contig % block_H
w = wo * block_W + wi
w_contig = w * stride[1] + rw
w_block_id = w_contig // block_W
w_block_offset = w_contig % block_W
c_block_id = rc // block_C
c_block_offset = rc % block_C
rco = rc // filter_c_i
rcio = (rc % filter_c_i) // filter_c_ii
rcii = rc % filter_c_ii
return te.sum(
X[
n,
h_block_id,
w_block_id,
c_block_id,
h_block_offset,
w_block_offset,
c_block_offset,
]
* filt[ko, rco, rh, rw, rcio, ki, rcii],
axis=[rh, rw, rc],
)
return output_shape, compute
def transform_numpy(arr_np, current_layout: str, new_layout: str):
"""Reshape and transpose numpy array according to the specified layout"""
if current_layout == "nhwc":
if new_layout == "nhwc":
return arr_np
if new_layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-1d"]:
n, h, w, c = arr_np.shape
return arr_np.reshape([n, h // 8, 8, w // 4, 2, 2, c // 32, 32]).transpose(
0, 1, 3, 6, 2, 4, 7, 5
)
if new_layout in ["nhwc-4h2w32c2w-2d"]:
n, h, w, c = arr_np.shape
return arr_np.reshape([n, h // 4, 4, w // 4, 2, 2, c // 32, 32]).transpose(
0, 1, 3, 6, 2, 4, 7, 5
)
if new_layout in ["n11c-1024c-2d", "n11c-1024c-1d"]:
n, h, w, c = arr_np.shape
assert h == 1 and w == 1, "The size of h and w must be 1"
return arr_np.reshape([n, 1, 1, c // 1024, 1024])
if new_layout == "nc-1024-2d":
n, c = arr_np.shape
return arr_np.reshape([n, c // 1024, 1024])
if new_layout == "nhwc-1024c-2d":
N, H, W, C = arr_np.shape
return arr_np.reshape([N, H, W, C // 1024, 1024])
if new_layout == "nc-2048-2d":
N, C = arr_np.shape
return arr_np.reshape([N, C // 2048, 2048])
if new_layout == "nhwc-2048c-2d":
N, H, W, C = arr_np.shape
return arr_np.reshape([N, H, W, C // 2048, 2048])
if new_layout == "nhwc-8h8w32c-2d":
n, h, w, c = arr_np.shape
return arr_np.reshape([n, h // 8, 8, w // 8, 8, c // 32, 32]).transpose(
0, 1, 3, 5, 2, 4, 6
)
if new_layout == "n11c-2048c-2d":
n, h, w, c = arr_np.shape
assert h == 1 and w == 1, "The size of h and w must be 1"
return arr_np.reshape([n, h, w, c // 2048, 2048])
raise RuntimeError(f"Unexpected new_layout '{new_layout}'")
if current_layout == "nc":
n, c = arr_np.shape
if new_layout in ["nc-1024c-2d"]:
return arr_np.reshape([n, c // 1024, 1024])
if new_layout in ["nc-512c-2d"]:
return arr_np.reshape([n, c // 512, 512])
if new_layout in ["nc-2048c-2d"]:
return arr_np.reshape([n, c // 2048, 2048])
raise RuntimeError(f"Unexpected new_layout '{new_layout}'")
if current_layout == "nhw":
if new_layout in ["nhw-32h16w-2d"]:
n, h, w = arr_np.shape
return arr_np.reshape([n, h // 32, 32, w // 16, 16]).transpose(0, 1, 3, 2, 4)
raise RuntimeError(f"Unexpected new_layout '{new_layout}'")
raise RuntimeError(f"Unexpected current_layout '{current_layout}'")
def quantize_np(arr_np: numpy.ndarray, dtype: str):
"""
Returns quantized array along with scale and zero-point
Parameters
----------
arr_np: numpy.ndarray
Input numpy array to be quantized
dtype: str
dtype of the quantized array: "uint8", "int8", etc
Returns
-------
quant_np: numpy.ndarray
Quantized numpy array
scale: float
Scale
zero_point: int
Value corresponding to float 0
"""
if dtype == "uint8":
qmax = 255
qmin = 0
elif dtype == "int8":
qmax = 127
qmin = -128
else:
raise RuntimeError(f"Unsupported quantized data type '{dtype}'")
fmin = numpy.amin(arr_np)
fmax = numpy.amax(arr_np)
# Include floating-point zero in the range
if fmax < 0:
fmax = 0.0
elif fmin > 0:
fmin = 0.0
scale = (fmax - fmin) / (qmax - qmin)
zero_point = numpy.rint((fmax * qmin - fmin * qmax) / (fmax - fmin)).astype("int32")
quant_np = (arr_np / scale + zero_point).astype(dtype)
return quant_np, scale, zero_point
def get_hexagon_target(cpu_ver: str) -> tvm.target.Target:
"""Creates a Hexagon target"""
target = tvm.target.hexagon(cpu_ver)
return tvm.target.Target(target, host=target)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/metaschedule_e2e/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Demonstration of end-to-end MetaSchedule tuning."""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon MetaSchedule test helper functions."""
import torch
from torchvision.models import resnet
from torchvision.models.quantization import resnet as qresnet
import tvm
from tvm import relay
def export_resnet50_fp16():
"""Export Resnet50 FP16."""
model = resnet.resnet50(pretrained=True).eval()
pt_inp = torch.randn(1, 3, 224, 224)
script_module = torch.jit.trace(model, pt_inp).eval()
input_name = "image"
input_shapes = [(input_name, pt_inp.shape)]
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
mod = relay.transform.ToMixedPrecision("float16")(mod)
with open("resnet50_fp16.json", "w") as file:
file.write(tvm.ir.save_json(mod))
with open("resnet50_fp16.params", "wb") as file:
file.write(relay.save_param_dict(params))
def export_resnet50_int8():
"""Export Resnet50 INT8."""
def quantize_model(model, inp):
model.fuse_model()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
model(inp)
torch.quantization.convert(model, inplace=True)
model = qresnet.resnet50(pretrained=True).eval()
pt_inp = torch.randn(1, 3, 224, 224)
quantize_model(model, pt_inp)
script_module = torch.jit.trace(model, pt_inp).eval()
input_name = "image"
input_shapes = [(input_name, pt_inp.shape)]
mod, params = relay.frontend.from_pytorch(
script_module, input_shapes, keep_quantized_weight=True
)
with open("resnet50_int8.json", "w") as file:
file.write(tvm.ir.save_json(mod))
with open("resnet50_int8.params", "wb") as file:
file.write(relay.save_param_dict(params))
if __name__ == "__main__":
export_resnet50_fp16()
export_resnet50_int8()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Resnet50 float16 with MetaSchedule"""
import os
import tempfile
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm import meta_schedule as ms
from tvm.contrib.hexagon.meta_schedule import get_hexagon_local_builder, get_hexagon_rpc_runner
from tvm.relay.backend import Executor
from ..infrastructure import get_hexagon_target
def convert_conv2d_layout(mod, desired_layouts):
with tvm.transform.PassContext(opt_level=3):
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
return seq(mod)
@pytest.mark.skip("End-to-end tuning is skipped on CI.")
@tvm.testing.requires_hexagon
def test_resnet50(hexagon_launcher):
"""Test Resnet50."""
model_json = "resnet50_fp16.json"
target_llvm = tvm.target.Target("llvm")
target_hexagon = get_hexagon_target("v69")
model_params = "resnet50_fp16.params"
if not os.path.exists(model_json):
pytest.skip(msg="Run python export_models.py first.")
with open(model_json, "r") as file:
mod = tvm.ir.load_json(file.read())
with open(model_params, "rb") as file:
params = relay.load_param_dict(file.read())
mod = convert_conv2d_layout(mod, {"nn.conv2d": ["NHWC", "HWIO"]})
inp = np.random.randn(1, 3, 224, 224).astype("float32")
input_name = "image"
executor = Executor("graph", {"link-params": True})
# This line is necessary for link-params to take effect during
# task extraction and relay.build(...).
mod = mod.with_attr("executor", executor)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target_hexagon,
params=params,
work_dir=work_dir,
# for faster tuning
max_trials_global=20000,
max_trials_per_task=8,
num_trials_per_iter=8,
strategy="replay-trace",
# max_trials_global=20000,
# num_trials_per_iter=32,
# max_trials_per_task=128,
# strategy="evolutionary",
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=20),
# Without this, the same workloads with different constant weights
# are treated as distinct tuning tasks.
module_equality="ignore-ndarray",
)
hexagon_lowered = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target_hexagon,
params=params,
)
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
mod,
tvm.target.Target(target_llvm, host=target_llvm),
params=params,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(input_name, inp.copy())
llvm_graph_mod.run()
ref_result = llvm_graph_mod.get_output(0).numpy()
with hexagon_launcher.create_session() as session:
graph_mod = session.get_executor_from_factory(hexagon_lowered)
graph_mod.set_input(input_name, inp.copy())
graph_mod.run()
hexagon_output = graph_mod.get_output(0).numpy()
# Example output: max and mean abs difference with the reference: 0.1406 0.0126
print(
"max and mean abs difference with the reference:",
np.max(np.abs(ref_result - hexagon_output)),
np.mean(np.abs(ref_result - hexagon_output)),
)
tvm.testing.assert_allclose(ref_result, hexagon_output, atol=2e-1)
time_ms = graph_mod.benchmark(session.device, number=1, repeat=20).mean * 1e3
print("time elapsed: ", time_ms)
debug_ex = session.get_graph_debug_executor(
hexagon_lowered.get_graph_json(), hexagon_lowered.lib
)
print(debug_ex.profile(input_name=inp.copy()))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Resnet50 int8 with MetaSchedule"""
import os
import tempfile
from typing import Optional
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm._ffi import register_func
from tvm.contrib.hexagon.meta_schedule import (
get_hexagon_local_builder,
get_hexagon_rpc_runner,
)
from tvm.meta_schedule import postproc, schedule_rule
from tvm.tir.schedule import BlockRV, Schedule
from tvm.tir.schedule.analysis import has_block
from tvm.tir.tensor_intrin.hexagon import VRMPY_u8i8i32_INTRIN, VRMPY_u8u8i32_INTRIN
from ..infrastructure import get_hexagon_target
MODEL_JSON = "resnet50_int8.json"
EXECUTOR = relay.backend.Executor("graph", {"link-params": True})
TARGET_LLVM = tvm.target.Target("llvm")
TARGET_HEXAGON = get_hexagon_target("v68")
MODEL_PARAMS = "resnet50_int8.params"
def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher):
"""Tune VRMPY with auto tensorization."""
sch_rules = [
schedule_rule.ApplyCustomRule(),
schedule_rule.AutoInline(
into_producer=False,
into_consumer=True,
inline_const_tensor=True,
disallow_if_then_else=True,
require_injective=True,
require_ordered=True,
disallow_op=["tir.exp"],
),
# VRMPY_u8i8i32_INTRIN is used for conv2d. See topi/hexagon/conv2d_alter_op.py
# for why we use different intrins for conv2d and dense.
schedule_rule.MultiLevelTilingWithIntrin(
VRMPY_u8i8i32_INTRIN,
structure="SRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=schedule_rule.ReuseType(
req="may",
levels=[1, 2],
scope="global",
),
),
# VRMPY_u8u8i32_INTRIN is used for dense
schedule_rule.MultiLevelTilingWithIntrin(
VRMPY_u8u8i32_INTRIN,
structure="SRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=schedule_rule.ReuseType(
req="may",
levels=[1, 2],
scope="global",
),
),
schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=16,
max_vectorize_extent=128,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
]
postprocs = [
postproc.RewriteParallelVectorizeUnroll(),
postproc.RewriteReductionBlock(),
postproc.RewriteTensorize(vectorize_init_loop=True),
]
# This line is necessary for link-params to take effect during
# task extraction and relay.build(...).
mod = mod.with_attr("executor", EXECUTOR)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=TARGET_HEXAGON,
params=params,
work_dir=work_dir,
# for faster tuning
max_trials_global=20000,
max_trials_per_task=8,
num_trials_per_iter=8,
strategy="replay-trace",
# max_trials_global=20000,
# num_trials_per_iter=32,
# max_trials_per_task=128,
# strategy="evolutionary",
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=20),
space=ms.space_generator.PostOrderApply(
sch_rules=sch_rules,
postprocs=postprocs,
mutator_probs={},
),
# This enables anchor-block tuning, where different subgraphs
# with the same anchor block workload will be identified as equal.
# It reduces the number of conv2d tuning tasks in the int8 resnet50 model
# from 36 to 23, with negligible performance difference.
module_equality="anchor-block",
)
return ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=TARGET_HEXAGON,
params=params,
)
@pytest.mark.skip("End-to-end tuning is skipped on CI.")
@tvm.testing.requires_hexagon
def test_resnet50(hexagon_launcher):
"""Test Resnet50."""
if not os.path.exists(MODEL_JSON):
pytest.skip(msg="Run python export_models.py first.")
with open(MODEL_JSON, "r") as file:
mod = tvm.ir.load_json(file.read())
with open(MODEL_PARAMS, "rb") as file:
params = relay.load_param_dict(file.read())
inp = np.random.randn(1, 3, 224, 224).astype("float32")
input_name = "image"
do_tune = True
if do_tune:
hexagon_lowered = tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher)
else:
with tvm.transform.PassContext(opt_level=3):
hexagon_lowered = relay.build(
mod,
tvm.target.Target(TARGET_HEXAGON, host=TARGET_HEXAGON),
params=params,
executor=EXECUTOR,
)
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
mod,
tvm.target.Target(TARGET_LLVM, host=TARGET_LLVM),
params=params,
)
with hexagon_launcher.create_session() as session:
graph_mod = session.get_executor_from_factory(hexagon_lowered)
graph_mod.set_input(input_name, inp.copy())
graph_mod.run()
hexagon_output = graph_mod.get_output(0).numpy()
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(input_name, inp.copy())
llvm_graph_mod.run()
ref_result = llvm_graph_mod.get_output(0).numpy()
np.testing.assert_allclose(ref_result, hexagon_output, atol=1e-4, rtol=1e-5)
time_ms = graph_mod.benchmark(session.device, number=1, repeat=20).mean * 1e3
print("time elapsed: ", time_ms)
debug_ex = session.get_graph_debug_executor(
hexagon_lowered.get_graph_json(), hexagon_lowered.lib
)
print(debug_ex.profile(input_name=inp.copy()))
def _schedule_packed_8x8x32_conv2d():
"""Manually schedule a conv2d block, created from TE compute op via CreatePrimFunc,
using 8x8x32 packed layout.
"""
def schedule_fn(sch, conv2d_block: Optional[BlockRV] = None) -> bool:
if conv2d_block is None:
if has_block(sch, "conv2d_NCHWc_int8"):
conv2d_block = sch.get_block("conv2d_NCHWc_int8")
else:
return False
assert "conv2d_NCHWc_int8" in sch.get(conv2d_block).annotations["schedule_rule"]
# Apply scheduling
post_blocks = sch.get_consumers(conv2d_block)
if len(post_blocks) > 0:
# Fuse all intermediate post ops into the last op.
# This is equivalent to the traverse_inline function used in TE schedules.
while True:
next_post_blocks = []
for post_block in post_blocks:
next_consumers = sch.get_consumers(post_block)
if len(next_consumers) > 0:
sch.compute_inline(post_block)
next_post_blocks += next_consumers
if len(next_post_blocks) == 0:
assert len(post_blocks) == 1
outer_block = post_blocks[0]
break
post_blocks = next_post_blocks
else:
outer_block = conv2d_block
# Move the conv2d mma into the injective post mma compute block
if outer_block != conv2d_block:
loops = sch.get_loops(outer_block)
# TODO(csullivan): Currently does all post conv2d mma steps
# directly after accumulation for one spatial pixel. May
# be desirable to do this with coarser spatial granularity
sch.compute_at(conv2d_block, loops[4])
def index_map_nchw32c_nchw8h8w32c(n_batch, channel, height, width, channel_32):
return [n_batch, channel, height // 8, width // 8, height % 8, width % 8, channel_32]
# Add cache for input and output activation layout transform,
# note that weight is already in correct layout
input_cache = sch.cache_read(conv2d_block, 0, "global") # pylint: disable=unused-variable
output_cache = sch.cache_write(outer_block, 0, "global") # pylint: disable=unused-variable
# Transform the layout of the input
sch.transform_layout(
conv2d_block, ("read", 0), index_map=index_map_nchw32c_nchw8h8w32c, pad_value=0
)
# Transform the layout of the int32 accumulator
sch.transform_layout(
conv2d_block, ("write", 0), index_map=index_map_nchw32c_nchw8h8w32c, pad_value=0
)
# Transform the layout of the output
sch.transform_layout(
outer_block, ("write", 0), index_map=index_map_nchw32c_nchw8h8w32c, pad_value=0
)
return True
return schedule_fn
def tune_packed_8x8x32_template(mod, params, hexagon_launcher):
"""Generate packed 8*8*32 template."""
def schedule_rule_conv2d_packed_8x8x32(sch: Schedule, conv2d_block: BlockRV):
_schedule_packed_8x8x32_conv2d()(sch, conv2d_block)
return [sch]
register_func("meta_schedule.conv2d_NCHWc_int8.hexagon", schedule_rule_conv2d_packed_8x8x32)
def schedule_conv2d_for_tune(sch: Schedule):
_schedule_packed_8x8x32_conv2d()(sch)
# This line is necessary for link-params to take effect during
# task extraction and relay.build(...).
mod = mod.with_attr("executor", EXECUTOR)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=TARGET_HEXAGON,
params=params,
work_dir=work_dir,
max_trials_global=20000,
max_trials_per_task=1,
num_trials_per_iter=1,
strategy="replay-trace",
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=20),
# Apply MS auto scheduling rules for all blocks, but utilize
# the custom block scheduling strategy registered above for
# blocks annotated as `schedule_rule:meta_schedule.conv2d_NCHWc_int8`
# space=ms.space_generator.PostOrderApply(
# f_block_filter=None,
# sch_rules="from-target",
# postprocs=[],
# mutator_probs="from-target",
# ),
# Constrain search space to only be the single
# schedule provided for all blocks. No auto
# scheduling will be possible.
space=ms.space_generator.ScheduleFn(
schedule_conv2d_for_tune,
sch_rules=[],
postprocs=[],
mutator_probs={},
),
# Without this, the same workloads with different constant weights
# are treated as distinct tuning tasks.
module_equality="ignore-ndarray",
)
return ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=TARGET_HEXAGON,
params=params,
)
@pytest.mark.skip("End-to-end tuning is skipped on CI.")
@tvm.testing.requires_hexagon
def test_packed_8x8x32_resnet50(hexagon_launcher):
"""Test packed 8*8*32 Resnet50"""
if not os.path.exists(MODEL_JSON):
pytest.skip(msg="Run python export_models.py first.")
with open(MODEL_JSON, "r") as file:
mod = tvm.ir.load_json(file.read())
with open(MODEL_PARAMS, "rb") as file:
params = relay.load_param_dict(file.read())
inp = np.random.randn(1, 3, 224, 224).astype("float32")
input_name = "image"
do_tune = True
if do_tune:
hexagon_lowered = tune_packed_8x8x32_template(mod, params, hexagon_launcher)
else:
with tvm.transform.PassContext(opt_level=3):
hexagon_lowered = relay.build(
mod,
tvm.target.Target(TARGET_HEXAGON, host=TARGET_HEXAGON),
params=params,
executor=EXECUTOR,
)
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
mod,
tvm.target.Target(TARGET_LLVM, host=TARGET_LLVM),
params=params,
)
with hexagon_launcher.start_session() as session:
graph_mod = session.get_executor_from_factory(hexagon_lowered)
graph_mod.set_input(input_name, inp.copy())
graph_mod.run()
hexagon_output = graph_mod.get_output(0).numpy()
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(input_name, inp.copy())
llvm_graph_mod.run()
ref_result = llvm_graph_mod.get_output(0).numpy()
np.testing.assert_allclose(ref_result, hexagon_output, atol=1e-4, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/pytest_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon pytest utility functions """
from typing import List, Optional, Union
import collections
import numpy as np
def get_test_id(*test_params, test_param_descs: List[Optional[str]] = None) -> str:
"""
An opinionated alternative to pytest's default algorithm for generating a
test's ID string. Intended to make it easier for human readers to
interpret the test IDs.
'test_params': The sequence of pytest parameter values supplied to some unit
test.
'test_param_descs': An (optional) means to provide additional text for some/all of the
paramuments in 'test_params'.
If provided, then len(test_params) must equal len(test_param_descs).
Each element test_param_descs that is a non-empty string will be used
in some sensible way in this function's returned string.
"""
assert len(test_params) > 0
if test_param_descs is None:
test_param_descs = [None] * len(test_params)
else:
assert len(test_param_descs) == len(test_params)
def get_single_param_chunk(param_val, param_desc: Optional[str]):
if isinstance(param_val, list):
# Like str(list), but avoid the whitespace padding.
val_str = "[" + ",".join(str(x) for x in param_val) + "]"
need_prefix_separator = False
elif isinstance(param_val, bool):
if param_val:
val_str = "T"
else:
val_str = "F"
need_prefix_separator = True
elif isinstance(param_val, TensorContentConstant):
val_str = f"const[{param_val.elem_value}]"
need_prefix_separator = True
elif isinstance(param_val, TensorContentDtypeMin):
val_str = "min"
need_prefix_separator = True
elif isinstance(param_val, TensorContentDtypeMax):
val_str = "max"
need_prefix_separator = True
elif isinstance(param_val, TensorContentRandom):
val_str = "random"
need_prefix_separator = True
elif isinstance(param_val, TensorContentSequentialCOrder):
val_str = f"seqC[start:{param_val.start_value},inc:{param_val.increment}]"
need_prefix_separator = True
else:
val_str = str(param_val)
need_prefix_separator = True
if param_desc and need_prefix_separator:
return f"{param_desc}:{val_str}"
elif param_desc and not need_prefix_separator:
return f"{param_desc}{val_str}"
else:
return val_str
chunks = [
get_single_param_chunk(param_val, param_desc)
for param_val, param_desc in zip(test_params, test_param_descs)
]
return "-".join(chunks)
def get_multitest_ids(
multitest_params_list: List[List], param_descs: Optional[List[Optional[str]]]
) -> List[str]:
"""
A convenience function for classes that use both 'tvm.testing.parameters' and 'get_test_id'.
This function provides a workaround for a specific quirk in Python, where list-comprehension
can't necessarily access the value of another class-variable, discused here:
https://stackoverflow.com/q/13905741
"""
return [
get_test_id(*single_test_param_list, test_param_descs=param_descs)
for single_test_param_list in multitest_params_list
]
def get_numpy_dtype_info(dtype) -> Union[np.finfo, np.iinfo]:
"""
Return an appropriate 'np.iinfo' or 'np.finfo' object corresponding to
the specified Numpy dtype.
'dtype' must be a value that 'numpy.dtype(...)' can handle.
"""
np_dtype = np.dtype(dtype)
kind = np_dtype.kind
if kind == "f":
return np.finfo(np_dtype)
elif kind == "i":
return np.iinfo(np_dtype)
else:
raise TypeError(f"dtype ({dtype}) must indicate some floating-point or integral data type")
TensorContentConstant = collections.namedtuple("TensorContentConstant", ["elem_value"])
TensorContentSequentialCOrder = collections.namedtuple(
"TensorContentSequentialCOrder", ["start_value", "increment"]
)
TensorContentRandom = collections.namedtuple("TensorContentRandom", [])
TensorContentDtypeMin = collections.namedtuple("TensorContentDtypeMin", [])
TensorContentDtypeMax = collections.namedtuple("TensorContentDtypeMax", [])
def create_populated_numpy_ndarray(
input_shape: Union[list, tuple], dtype: str, input_tensor_populator
) -> np.ndarray:
"""
Create a numpy tensor with the specified shape, dtype, and content.
"""
itp = input_tensor_populator # just for brevity
if isinstance(itp, TensorContentConstant):
return np.full(tuple(input_shape), itp.elem_value, dtype=dtype)
elif isinstance(itp, TensorContentDtypeMin):
info = get_numpy_dtype_info(dtype)
return np.full(tuple(input_shape), info.min, dtype=dtype)
elif isinstance(itp, TensorContentDtypeMax):
info = get_numpy_dtype_info(dtype)
return np.full(tuple(input_shape), info.max, dtype=dtype)
elif isinstance(itp, TensorContentRandom):
return np.random.random(input_shape).astype(dtype)
elif isinstance(itp, TensorContentSequentialCOrder):
a = np.empty(tuple(input_shape), dtype)
with np.nditer(a, op_flags=["writeonly"], order="C") as iterator:
next_elem_val = itp.start_value
for elem in iterator:
elem[...] = next_elem_val
next_elem_val += itp.increment
return a
else:
raise ValueError(f"Unexpected input_tensor_populator type: {type(itp)}")
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_2d_physical_buffers.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test 2d physical buffers """
import contextlib
import numpy as np
import pytest
import tvm
# Needed to register the link_shared packedfunc.
import tvm.contrib.hexagon
import tvm.testing
from tvm import te
from tvm.contrib.hexagon.pytest_plugin import requires_hexagon_toolchain
from tvm.tir.stmt_functor import post_order_visit
from tvm.contrib.hexagon import allocate_hexagon_array
from .infrastructure import get_hexagon_target
# Disabling invalid name as pylint assumes global variables as constants and
# expects them to be all upper-case. Since these are used as
# tvm.testing.parameters, if they are made upper-case, the functions which take
# them as arguments would also need to be upper-case, and pylint would complain
# there as well
# pylint: disable=invalid-name
schedule_type = tvm.testing.parameter("TE", "TIR")
dtype = tvm.testing.parameter("int8")
batch_size = tvm.testing.parameter(
16,
2,
)
input_channels = tvm.testing.parameter(
32,
)
input_image_shape = tvm.testing.parameter(
by_dict={
"8x8": (8, 8),
"32x32": (32, 32),
}
)
input_layout = tvm.testing.parameter(
"nhwc",
"nchw-8h8w32c-1d",
)
output_layout = tvm.testing.parameter(
"nhwc",
"nchw-8h8w32c-1d",
)
working_layout, working_scope = tvm.testing.parameters(
("nhwc", "global"),
("nhwc", "global.vtcm"),
("nchw-8h8w32c-1d", "global"),
("nchw-8h8w32c-1d", "global.vtcm"),
# 2-d memory may only occur in vtcm memory
("nchw-8h8w32c-2d", "global.vtcm"),
)
# pylint: enable=invalid-name
@tvm.testing.fixture
def target_host():
"""Return tvm target.Target with host attached"""
return get_hexagon_target("v68")
# Disabling redefined-outer-name for the whole file as there isn't any easy
# solution yet to refactor tvm.testing.fixture fixtures that avoid redefining
# outer variable names
# pylint: disable=redefined-outer-name
@tvm.testing.fixture
def input_shape(batch_size, input_channels, input_image_shape):
return [batch_size, *input_image_shape, input_channels]
def transform_shape(shape, layout):
if layout == "nhwc":
return shape
if layout in ["nchw-8h8w32c-1d", "nchw-8h8w32c-2d"]:
batch, height, width, channel = shape
return [batch, (channel + 31) // 32, (height + 7) // 8, (width + 7) // 8, 8, 8, 32]
raise RuntimeError(f"Unexpected layout '{layout}'")
def transform_numpy(arr_np, layout):
if layout == "nhwc":
return arr_np
if layout in ["nchw-8h8w32c-1d", "nchw-8h8w32c-2d"]:
batch, height, width, channel = arr_np.shape
return arr_np.reshape([batch, height // 8, 8, width // 8, 8, channel // 32, 32]).transpose(
0, 5, 1, 3, 2, 4, 6
)
raise RuntimeError(f"Unexpected layout '{layout}'")
@tvm.testing.fixture
def transformed_input_shape(input_shape, input_layout):
return transform_shape(input_shape, input_layout)
@tvm.testing.fixture
def transformed_output_shape(output_shape, output_layout):
return transform_shape(output_shape, output_layout)
@tvm.testing.fixture
def input_np(input_shape, dtype):
return (100 * np.random.uniform(size=input_shape)).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(input_np, input_layout):
return transform_numpy(input_np, input_layout)
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, output_layout):
return transform_numpy(expected_output_np, output_layout)
def layout_transform_1d(batch, height, width, channel):
return [
batch,
channel // 32,
height // 8,
width // 8,
height % 8,
width % 8,
channel % 32,
]
def layout_transform_2d(batch, height, width, channel):
return [
batch,
channel // 32,
height // 8,
width // 8,
te.AXIS_SEPARATOR,
height % 8,
width % 8,
channel % 32,
]
def extract_buffers(stmt):
buffers = []
def visitor(node):
if isinstance(node, (tvm.tir.BufferLoad, tvm.tir.BufferStore, tvm.tir.BufferRealize)):
buffers.append(node.buffer)
post_order_visit(stmt, visitor)
return buffers
class TestElementWise:
"""TestElementWise"""
@tvm.testing.fixture
def expected_output_np(self, input_np):
return 2 * input_np
@tvm.testing.fixture
def output_shape(self, input_shape):
return input_shape
@tvm.testing.fixture
def schedule_args(
self,
schedule_type,
input_shape,
dtype,
input_layout,
output_layout,
working_layout,
working_scope,
):
"""Create and return the schedule and input args after applying layout transform"""
if schedule_type == "TE":
return self._te_schedule_args(
input_shape, dtype, input_layout, output_layout, working_layout, working_scope
)
elif schedule_type == "TIR":
return self._tir_schedule_args(
input_shape, dtype, input_layout, output_layout, working_layout, working_scope
)
else:
raise ValueError(f"Unknown schedule type: {schedule_type}")
def _te_tensors(self, input_shape, dtype):
input_tensor = te.placeholder(input_shape, dtype, name="Input")
output_tensor = te.compute(
shape=input_tensor.shape,
fcompute=lambda *indices: (2 * input_tensor[indices]).astype(dtype),
name="Output",
)
return input_tensor, output_tensor
def _te_schedule_args(
self,
input_shape,
dtype,
input_layout,
output_layout,
working_layout,
working_scope,
):
input_tensor, output_tensor = self._te_tensors(input_shape, dtype)
schedule = te.create_schedule(output_tensor.op)
write_cache = schedule.cache_write(output_tensor, working_scope)
read_cache = schedule.cache_read(input_tensor, working_scope, [write_cache])
def apply_transform(tensor, layout):
if layout == "nhwc":
return None
if layout == "nchw-8h8w32c-1d":
return schedule[tensor].transform_layout(layout_transform_1d)
if layout == "nchw-8h8w32c-2d":
return schedule[tensor].transform_layout(layout_transform_2d)
raise RuntimeError(f"Unexpected layout '{layout}'")
apply_transform(input_tensor, input_layout)
compute_loopnest = apply_transform(output_tensor, output_layout) or output_tensor.op.axis
schedule[write_cache].compute_at(schedule[output_tensor], compute_loopnest[0])
apply_transform(read_cache, working_layout)
apply_transform(write_cache, working_layout)
return [schedule, [input_tensor, output_tensor]]
def _tir_schedule_args(
self, input_shape, dtype, input_layout, output_layout, working_layout, working_scope
):
tensors = self._te_tensors(input_shape, dtype)
sch = tvm.tir.Schedule(te.create_prim_func(tensors))
cache_read_block = sch.cache_read("Output", 0, working_scope)
cache_write_block = sch.cache_write("Output", 0, working_scope)
def apply_transform(block, buffer_name, layout):
if layout == "nhwc":
pass
elif layout == "nchw-8h8w32c-1d":
sch.transform_layout(block, buffer_name, layout_transform_1d)
elif layout == "nchw-8h8w32c-2d":
sch.transform_layout(block, buffer_name, layout_transform_2d)
else:
raise RuntimeError(f"Unexpected layout '{layout}'")
apply_transform(cache_read_block, ("read", 0), input_layout)
apply_transform(cache_read_block, ("write", 0), working_layout)
apply_transform(cache_write_block, ("read", 0), working_layout)
apply_transform(cache_write_block, ("write", 0), output_layout)
return [sch.mod]
@tvm.testing.fixture
def ir_module(self, schedule_args):
# If the two buffers are accessed with the same indices, CSE
# will replace them with a Let binding. Since this makes it
# harder to test what the transformed indices are, disabling
# the CSE pass for this test.
with tvm.transform.PassContext(disabled_pass=["tir.CommonSubexprElimTIR"]):
return tvm.lower(*schedule_args)
@tvm.testing.fixture
def uses_unsupported_physical_dimensions( # pylint: disable=invalid-name
self, target_host, input_layout, working_layout, output_layout
):
uses_2d_memory = "nchw-8h8w32c-2d" in [input_layout, working_layout, output_layout]
can_handle_2d_memory = target_host.kind.name == "hexagon"
return uses_2d_memory and not can_handle_2d_memory
def test_param_shapes(self, ir_module, transformed_input_shape, transformed_output_shape):
func = ir_module["main"]
primfunc_input_shape, primfunc_output_shape = [
list(func.preflattened_buffer_map[param].shape) for param in func.params
]
assert primfunc_input_shape == transformed_input_shape
assert primfunc_output_shape == transformed_output_shape
def test_cache_shape(self, ir_module, input_layout, working_layout, output_layout):
"""Test function to check expected_physical_dimensions for cached buffers"""
func = ir_module["main"]
for buffer in extract_buffers(func.body):
buffer_layout = {
"Input": input_layout,
"Input.global": working_layout,
"Output.global": working_layout,
"Input.global.vtcm": working_layout,
"Output.global.vtcm": working_layout,
"Output": output_layout,
}[buffer.name.replace("_", ".")]
expected_physical_dimensions = {
"nhwc": 1,
"nchw-8h8w32c-1d": 1,
"nchw-8h8w32c-2d": 2,
}[buffer_layout]
assert len(buffer.shape) == expected_physical_dimensions
def test_lower(self, schedule_args):
return tvm.lower(*schedule_args)
@requires_hexagon_toolchain
def test_build(self, schedule_args, target_host, input_layout, working_layout, output_layout):
"""Testing build success/failure
* On Hexagon targets, build must succeed for both 1-d and 2-d memory.
* On non-Hexagon targets, build must succeed 1-d memory.
* On non-Hexagon targets, build must fail and report an error for 2-d memory.
"""
# contextlib.nullcontext wasn't added until python3.7, and the
# CI currently runs on python3.6. Therefore, using ExitStack
# to manage an optional context instead.
stack = contextlib.ExitStack()
with stack:
is_hexagon = target_host.kind.name == "hexagon"
uses_2d_memory = "nchw-8h8w32c-2d" in [input_layout, working_layout, output_layout]
if uses_2d_memory and not is_hexagon:
stack.enter_context(pytest.raises(tvm.TVMError))
tvm.build(*schedule_args, target=target_host)
@tvm.testing.fixture
def runtime_module(self, schedule_args, target_host):
if target_host.kind.name != "hexagon":
pytest.skip("Only running on hexagon")
return tvm.build(*schedule_args, target=target_host)
@tvm.testing.requires_hexagon
def test_execute(
self,
runtime_module,
transformed_input_np,
transformed_expected_output_np,
input_layout,
output_layout,
hexagon_session,
):
"""Test execution of computes with 2d physical buffers"""
if input_layout == "nchw-8h8w32c-2d":
input_axis_separators = [4]
else:
input_axis_separators = []
if output_layout == "nchw-8h8w32c-2d":
output_axis_separators = [4]
else:
output_axis_separators = []
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=input_axis_separators,
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
data=np.zeros_like(transformed_expected_output_np),
axis_separators=output_axis_separators,
)
mod = hexagon_session.load_module(runtime_module)
mod(input_arr, output_arr)
output_np = output_arr.numpy()
np.testing.assert_array_equal(output_np, transformed_expected_output_np)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_async_dma_pipeline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test different strategies for loading data into vtcm before running HVX workloads. """
import numpy as np
import pytest
import tvm
from numpy.random import default_rng
from tvm.script import tir as T
VRMPY_SIZE_B = 128
VRMPY_SIZE_INT32 = 32
def conv_approximation(size_a, size_w):
"""Conv approximation."""
a_shape = (size_a, VRMPY_SIZE_B)
w_shape = (size_w, VRMPY_SIZE_B)
out_shape = (size_a, VRMPY_SIZE_INT32)
@T.prim_func
def operator(a_input: T.handle, b_input: T.handle, c_output: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a_input, a_shape, dtype="uint8")
w_buffer = T.match_buffer(b_input, w_shape, dtype="uint8")
c_buffer = T.match_buffer(c_output, out_shape, dtype="int32")
for n, index_0 in T.grid(size_a, size_w):
with T.block("c_buffer"):
vn_index, vi_index = T.axis.remap("SR", [n, index_0])
T.reads(
a_buffer[vn_index, 0:VRMPY_SIZE_B],
w_buffer[vi_index, 0:VRMPY_SIZE_B],
c_buffer[vn_index, 0:VRMPY_SIZE_INT32],
)
T.writes(c_buffer[vn_index, 0:VRMPY_SIZE_INT32])
with T.init():
for x in T.serial(VRMPY_SIZE_INT32):
c_buffer[vn_index, x] = 0
c_buffer[vn_index, T.ramp(0, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.acc.128B"),
T.uint32(3),
c_buffer[vn_index, T.ramp(0, 1, 32)],
T.reinterpret(a_buffer[vn_index, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(w_buffer[vi_index, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
# Currently async DMA lowering does not add any wait to the end of schedules so
# for timing purposes we are manually adding a wait to ensure that all copies
# are complete when the schedule exits.
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.dma_wait",
0, # QueueId
0, # Wait for 0 in flight
dtype="int32",
)
)
return tvm.tir.Schedule(operator)
def evaluate(
hexagon_session,
sch,
a_data,
b_data,
c_data,
expected_output=None,
use_async_copy=0,
merge_async_commit_queue_scope=False,
):
"""Evaluate function."""
target_hexagon = tvm.target.hexagon("v68", link_params=True)
with tvm.transform.PassContext(
config={
"tir.use_async_copy": use_async_copy,
"tir.merge_async_commit_queue_scope": merge_async_commit_queue_scope,
}
):
func_tir = tvm.build(
sch.mod["main"], target=tvm.target.Target(target_hexagon, host=target_hexagon)
)
module = hexagon_session.load_module(func_tir)
a_hexagon = tvm.runtime.ndarray.array(a_data, device=hexagon_session.device)
b_hexagon = tvm.runtime.ndarray.array(b_data, device=hexagon_session.device)
c_hexagon = tvm.runtime.ndarray.array(c_data, device=hexagon_session.device)
if tvm.testing.utils.IS_IN_CI:
# Run with reduced number and repeat for CI
timer = module.time_evaluator("__tvm_main__", hexagon_session.device, number=1, repeat=1)
else:
timer = module.time_evaluator("__tvm_main__", hexagon_session.device, number=10, repeat=10)
time = timer(a_hexagon, b_hexagon, c_hexagon)
if expected_output is not None:
tvm.testing.assert_allclose(c_hexagon.asnumpy(), expected_output)
return round(time.mean * 1000, 4)
def get_single_dma_schedule(size_a, size_w):
"""Generate single DMA schedule."""
a_shape = (size_a, VRMPY_SIZE_B)
w_shape = (size_w, VRMPY_SIZE_B)
out_shape = (size_a, VRMPY_SIZE_INT32)
a_bytes = size_a * VRMPY_SIZE_B
w_bytes = size_w * VRMPY_SIZE_B
@T.prim_func
def operator(a_input: T.handle, b_input: T.handle, c_output: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a_input, a_shape, dtype="uint8", scope="global")
w_buffer = T.match_buffer(b_input, w_shape, dtype="uint8", scope="global")
c_buffer = T.match_buffer(c_output, out_shape, dtype="int32", scope="global")
a_global_vtcm = T.alloc_buffer(a_shape, dtype="uint8", scope="global")
w_global_vtcm = T.alloc_buffer(w_shape, dtype="uint8", scope="global")
c_global_vtcm = T.alloc_buffer(out_shape, dtype="int32", scope="global")
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
a_global_vtcm.data,
T.tvm_stack_make_shape(size_a, VRMPY_SIZE_B, dtype="handle"),
0,
2,
a_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
a_buffer.data,
T.tvm_stack_make_shape(size_a, VRMPY_SIZE_B, dtype="handle"),
0,
2,
a_buffer.dtype,
0,
dtype="handle",
),
T.Cast("int", a_bytes),
dtype="int32",
)
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
w_global_vtcm.data,
T.tvm_stack_make_shape(size_w, VRMPY_SIZE_B, dtype="handle"),
0,
2,
w_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
w_buffer.data,
T.tvm_stack_make_shape(size_w, VRMPY_SIZE_B, dtype="handle"),
0,
2,
w_buffer.dtype,
0,
dtype="handle",
),
T.Cast("int", w_bytes),
dtype="int32",
)
)
for n, index_0 in T.grid(size_a, size_w):
with T.block("c_buffer"):
vn_index, vi_index = T.axis.remap("SR", [n, index_0])
T.reads(
a_global_vtcm[vn_index, 0:VRMPY_SIZE_B],
w_global_vtcm[vi_index, 0:VRMPY_SIZE_B],
c_global_vtcm[vn_index, 0:VRMPY_SIZE_INT32],
)
T.writes(c_global_vtcm[vn_index, 0:VRMPY_SIZE_INT32])
with T.init():
for x in T.serial(VRMPY_SIZE_INT32):
c_global_vtcm[vn_index, x] = 0
c_global_vtcm[vn_index, T.ramp(0, 1, 32)] += T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(a_global_vtcm[vn_index, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(w_global_vtcm[vi_index, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
c_buffer.data,
T.tvm_stack_make_shape(size_a, VRMPY_SIZE_B, dtype="handle"),
0,
2,
c_buffer.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
c_global_vtcm.data,
T.tvm_stack_make_shape(size_a, VRMPY_SIZE_B, dtype="handle"),
0,
2,
c_global_vtcm.dtype,
0,
dtype="handle",
),
T.Cast("int", a_bytes),
dtype="int32",
)
)
sch = tvm.tir.Schedule(operator)
return sch
def get_fake_conv_vtcm_schedule(size_a, size_w, blocks=2):
"""Generate fake conv schedule with VTCM."""
sch = conv_approximation(size_a, size_w)
compute_block = sch.get_block("c_buffer")
sch.cache_read(compute_block, 1, "global.vtcm")
n = sch.get_loops(compute_block)[0]
n_outer, _ = sch.split(n, [blocks, None])
cache_read_block_a = sch.cache_read(compute_block, 0, "global.vtcm")
sch.compute_at(cache_read_block_a, n_outer)
sch.fuse(*sch.get_loops(cache_read_block_a)[1:])
cache_write_block_c = sch.cache_write(compute_block, 0, "global.vtcm")
sch.reverse_compute_at(cache_write_block_c, n_outer)
sch.fuse(*sch.get_loops(cache_write_block_c)[1:])
return sch
def get_multi_input_fake_conv_vtcm_schedule(size_a, size_w, blocks=2):
"""Generate multi input fake Conv using VTCM."""
sch = conv_approximation(size_a, size_w)
compute_block = sch.get_block("c_buffer")
n = sch.get_loops(compute_block)[0]
n_outer, _ = sch.split(n, [blocks, None])
cache_read_block_a = sch.cache_read(compute_block, 0, "global.vtcm")
sch.compute_at(cache_read_block_a, n_outer)
sch.fuse(*sch.get_loops(cache_read_block_a)[1:])
cache_read_block_b = sch.cache_read(compute_block, 1, "global.vtcm")
sch.compute_at(cache_read_block_b, n_outer)
sch.fuse(*sch.get_loops(cache_read_block_b)[1:])
cache_write_block_c = sch.cache_write(compute_block, 0, "global.vtcm")
sch.reverse_compute_at(cache_write_block_c, n_outer)
sch.fuse(*sch.get_loops(cache_write_block_c)[1:])
return sch
def print_results(test_key, runtimes):
print(test_key)
for runtime in runtimes.items():
print("-{} took {} ms".format(runtime[0], runtime[1]))
print()
class TestAsyncDMAPipeline:
"""Async DMA pipeline test class."""
# Removed most of these to speedup CI.
size_a = tvm.testing.parameter(
1024,
64 * 64,
128 * 64,
)
size_w = tvm.testing.parameter(
1 * 1,
3 * 3,
9 * 9,
)
@tvm.testing.fixture
def input_a(self, size_a):
return default_rng().integers(0, 8, (size_a, VRMPY_SIZE_B), dtype="uint8")
@tvm.testing.fixture
def input_w(self, size_w):
return default_rng().integers(0, 8, (size_w, VRMPY_SIZE_B), dtype="uint8")
@tvm.testing.fixture
def expected_output(self, size_a, size_w, input_a, input_w):
"""Generate expected output."""
if tvm.testing.utils.IS_IN_CI and (size_a > 1024 or size_w > 1):
pytest.skip("Skipping test since it takes too long in CI.")
expected_result = np.zeros((size_a, VRMPY_SIZE_INT32), dtype="int32")
for n in range(size_a):
for x in range(size_w):
for index_0 in range(VRMPY_SIZE_INT32):
for r_index in range(4):
expected_result[n, index_0] += np.uint32(
input_a[n, index_0 * 4 + r_index]
) * np.uint32(input_w[x, index_0 * 4 + r_index])
return expected_result
@tvm.testing.requires_hexagon
def test_loading_vtcm_for_vrmpy(
self,
hexagon_session,
size_a,
size_w,
input_a,
input_w,
expected_output,
):
"""VTCM for VRMPY test."""
if tvm.testing.utils.IS_IN_CI and (size_a > 1024 or size_w > 1):
pytest.skip("Skipping test since it takes too long in CI.")
sch = conv_approximation(size_a, size_w)
base_runtime = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
)
sch = get_fake_conv_vtcm_schedule(size_a, size_w)
base_vtcm_runtime = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
)
sch = get_fake_conv_vtcm_schedule(size_a, size_w)
n = sch.get_loops(sch.get_block("c_buffer"))[0]
sch.annotate(n, "software_pipeline_stage", [0, 1, 2])
sch.annotate(n, "software_pipeline_order", [0, 1, 2])
sch.annotate(n, "software_pipeline_async_stages", [0])
async_input_runtime = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
)
sch = get_fake_conv_vtcm_schedule(size_a, size_w)
n = sch.get_loops(sch.get_block("c_buffer"))[0]
sch.annotate(n, "software_pipeline_stage", [0, 1, 2])
sch.annotate(n, "software_pipeline_order", [0, 1, 2])
sch.annotate(n, "software_pipeline_async_stages", [0, 2])
async_input_output = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
)
sch = get_fake_conv_vtcm_schedule(size_a, size_w)
n = sch.get_loops(sch.get_block("c_buffer"))[0]
sch.annotate(n, "software_pipeline_stage", [0, 3, 6])
sch.annotate(n, "software_pipeline_order", [0, 1, 2])
sch.annotate(n, "software_pipeline_async_stages", [0, 6])
async_larger_buffers = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
)
sch = get_multi_input_fake_conv_vtcm_schedule(size_a, size_w)
n = sch.get_loops(sch.get_block("c_buffer"))[0]
sch.annotate(n, "software_pipeline_stage", [0, 0, 1, 2])
sch.annotate(n, "software_pipeline_order", [0, 1, 2, 3])
sch.annotate(n, "software_pipeline_async_stages", [0, 2])
async_multi_input_output = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
merge_async_commit_queue_scope=False,
)
sch = get_fake_conv_vtcm_schedule(size_a, size_w)
n = sch.get_loops(sch.get_block("c_buffer"))[0]
sch.annotate(n, "software_pipeline_stage", [0, 1, 2])
sch.annotate(n, "software_pipeline_order", [0, 1, 2])
sch.annotate(n, "software_pipeline_async_stages", [2])
async_output_runtime = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
use_async_copy=1,
)
sch = get_single_dma_schedule(size_a, size_w)
single_dma_runtime = evaluate(
hexagon_session,
sch,
input_a,
input_w,
np.zeros(expected_output.shape, "int32"),
expected_output,
)
# Total transfer size is equal to the size of
# a_buffer + w_buffer + c_buffer which is equal to 2 * size_a * 128 + size_w * 128
transfer_mb = round((2 * size_a * VRMPY_SIZE_B + size_w * VRMPY_SIZE_B) / 1e6, 2)
# Total number of operations can be calculated given
# the total number of vrmpy calls (size_a * size_w) * operations
# per vrmpy accumulate (128 multiplies + 3 adds for reduction
# per lane + 1 add for accumulate per lane)
complexity = round(size_a * size_w * (VRMPY_SIZE_B * 4) / 1e9, 3)
print_results(
(
f"Test with a_buffer.size: {size_a * VRMPY_SIZE_B}, w_buffer.size:"
f" {size_w * VRMPY_SIZE_B}, computational complexity of {complexity} GOPs"
f", and total memory transfer of {transfer_mb} MB..."
),
{
"without_vtcm": base_runtime,
"synchronous_dma": single_dma_runtime,
"base_vtcm": base_vtcm_runtime,
"async_dma_input": async_input_runtime,
"async_dma_output": async_output_runtime,
"async_dma_input_output": async_input_output,
"async_dma_multi_input_output": async_multi_input_output,
"async_input_output_runtime_larger_buffers": async_larger_buffers,
},
)
# from tvm.script import tir as T
@tvm.script.ir_module
class ModulePipelined:
"""Pipelined module class."""
# pylint: disable=no-self-argument
@T.prim_func
def main(
p0_buffer: T.Buffer[(1, 1, 230, 230, 4), "uint8"],
p1_buffer: T.Buffer[(2, 1, 7, 7, 1, 32, 4), "int8"],
t_cast: T.Buffer[(1, 2, 112, 112, 32), "int32"],
) -> None:
# pylint: disable=missing-function-docstring
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
conv2d_nchwc_int8 = T.alloc_buffer([1, 2, 112, 112, 32], dtype="int32", scope="global.vtcm")
p0_global_vtcm = T.alloc_buffer([1, 1, 230, 230, 4], dtype="uint8", scope="global.vtcm")
p1_global_vtcm = T.alloc_buffer([2, 1, 7, 7, 1, 32, 4], dtype="int8", scope="global.vtcm")
for ax0, ax1, ax2, ax3, ax4, ax5, ax6 in T.grid(2, 1, 7, 7, 1, 32, 4):
with T.block("p1_global.vtcm"):
v0_ind, v1_ind, v2_ind, v3_ind, v4_ind, v5_ind, v6_ind = T.axis.remap(
"SSSSSSS", [ax0, ax1, ax2, ax3, ax4, ax5, ax6]
)
T.reads(p1_buffer[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind, v5_ind, v6_ind])
T.writes(p1_global_vtcm[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind, v5_ind, v6_ind])
p1_global_vtcm[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind, v5_ind, v6_ind] = p1_buffer[
v0_ind, v1_ind, v2_ind, v3_ind, v4_ind, v5_ind, v6_ind
]
for p_outer in T.serial(4):
for index_0 in T.serial(55876):
with T.block("p0_global.vtcm"):
v0_ind = T.axis.spatial(1, 0)
v1_ind = T.axis.spatial(1, 0)
v2_ind = T.axis.spatial(230, p_outer * 56 + index_0 // 916)
v3_ind = T.axis.spatial(230, index_0 % 916 // 4)
v4_ind = T.axis.spatial(4, index_0 % 4)
T.reads(p0_buffer[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind])
T.writes(p0_global_vtcm[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind])
p0_global_vtcm[v0_ind, v1_ind, v2_ind, v3_ind, v4_ind] = p0_buffer[
v0_ind, v1_ind, v2_ind, v3_ind, v4_ind
]
for index_0 in T.parallel(28):
for index_1, index_2, index_3 in T.grid(2, 14, 8):
with T.block("conv2d_NCHWc_int8_o_init"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(2, index_1)
o_height = T.axis.spatial(
112, (p_outer * 28 + index_0) // 14 * 14 + index_2
)
o_width = T.axis.spatial(112, (p_outer * 28 + index_0) % 14 * 8 + index_3)
oc_block_o = T.axis.spatial(1, 0) # pylint: disable=unused-variable
T.reads()
T.writes(conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32])
for i4_1 in T.vectorized(32):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(32, i4_1)
T.reads()
T.writes(
conv2d_nchwc_int8[
n, oc_chunk, o_height, o_width, oc_block_i_init
]
)
conv2d_nchwc_int8[
n, oc_chunk, o_height, o_width, oc_block_i_init
] = 0
for i1_1, i5_1, i6_1, i2_2, i3_2 in T.grid(2, 7, 7, 14, 8):
with T.block("conv2d_NCHWc_int8_o_update"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(2, i1_1)
o_height = T.axis.spatial(112, (p_outer * 28 + index_0) // 14 * 14 + i2_2)
o_width = T.axis.spatial(112, (p_outer * 28 + index_0) % 14 * 8 + i3_2)
oc_block_o = T.axis.spatial(1, 0) # pylint: disable=unused-variable
k_height = T.axis.reduce(7, i5_1)
k_width = T.axis.reduce(7, i6_1)
ic_outer = T.axis.reduce(1, 0)
ic_f_inner = T.axis.reduce(1, 0)
ic_s_inner_o = T.axis.reduce(1, 0) # pylint: disable=unused-variable
T.reads(
conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32],
p0_global_vtcm[
n,
ic_outer,
o_height * 2 + k_height,
o_width * 2 + k_width,
ic_f_inner * 4 : ic_f_inner * 4 + 4,
],
p1_global_vtcm[
oc_chunk, ic_outer, k_height, k_width, ic_f_inner, 0:32, 0:4
],
)
T.writes(conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32])
a_buffer = T.match_buffer(
p0_global_vtcm[
n,
ic_outer,
o_height * 2 + k_height,
o_width * 2 + k_width,
ic_f_inner * 4 : ic_f_inner * 4 + 4,
],
[4],
dtype="uint8",
offset_factor=1,
scope="global.vtcm",
)
b_buffer = T.match_buffer(
p1_global_vtcm[
oc_chunk, ic_outer, k_height, k_width, ic_f_inner, 0:32, 0:4
],
[32, 4],
dtype="int8",
offset_factor=1,
scope="global.vtcm",
)
c_buffer = T.match_buffer(
conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32],
[32],
dtype="int32",
offset_factor=1,
scope="global.vtcm",
)
a_u8x4: T.uint8x4 = a_buffer[0:4]
a_i32: T.int32 = T.reinterpret(a_u8x4, dtype="int32")
b_i8x128 = b_buffer[0, 0:128]
b_i32x32: T.int32x32 = T.reinterpret(b_i8x128, dtype="int32x32")
c_buffer[0:32] = T.call_llvm_pure_intrin(
4217,
T.uint32(3),
c_buffer[0:32],
T.broadcast(a_i32, 32),
b_i32x32,
dtype="int32x32",
)
for index_0 in T.serial(200704):
with T.block("conv2d_nchwc_int8.vtcm"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(2, index_0 % 7168 // 3584)
ax2_1 = T.axis.spatial(
112, (p_outer * 28 + index_0 // 7168) // 14 * 14 + index_0 % 3584 // 256
)
ax3_1 = T.axis.spatial(
112, (p_outer * 28 + index_0 // 7168) % 14 * 8 + index_0 % 256 // 32
)
ax4 = T.axis.spatial(32, index_0 % 32)
T.reads(conv2d_nchwc_int8[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
T.writes(t_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
t_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4] = conv2d_nchwc_int8[
ax0_1, ax1_1, ax2_1, ax3_1, ax4
]
# from tvm.script import tir as T
@tvm.script.ir_module
class ModuleBase:
"""Base module test class."""
# pylint: disable=no-self-argument
@T.prim_func
def main(
p0_buffer: T.Buffer[(1, 1, 230, 230, 4), "uint8"],
p1_buffer: T.Buffer[(2, 1, 7, 7, 1, 32, 4), "int8"],
t_cast: T.Buffer[(1, 2, 112, 112, 32), "int32"],
) -> None:
# pylint: disable=missing-function-docstring
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# buffer definition
# body
# with T.block("root")
conv2d_nchwc_int8 = T.alloc_buffer([1, 2, 112, 112, 32], dtype="int32")
for i0_0_i1_0_i2_0_i3_0_fused in T.parallel(
112, annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}
):
for i4_0_0 in T.serial(1): # pylint: disable=unused-variable
for i1_1_init, i2_1_init, i3_1_init, i1_2_init, i2_2_init, i3_2_init in T.grid(
2, 1, 1, 1, 14, 8
):
with T.block("conv2d_NCHWc_int8_o_init"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(2, i1_1_init + i1_2_init)
o_height = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused // 14 * 14 + i2_1_init * 14 + i2_2_init
)
o_width = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused % 14 * 8 + i3_1_init * 8 + i3_2_init
)
oc_block_o = T.axis.spatial(1, 0) # pylint: disable=unused-variable
T.reads()
T.writes(conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32])
for i4_1 in T.vectorized(32):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(32, i4_1)
T.reads()
T.writes(
conv2d_nchwc_int8[
n, oc_chunk, o_height, o_width, oc_block_i_init
]
)
conv2d_nchwc_int8[
n, oc_chunk, o_height, o_width, oc_block_i_init
] = 0
for i5_0, i6_0, i7_0, i8_0, i9_0_0 in T.grid( # pylint: disable=unused-variable
1, 1, 1, 1, 1
): # pylint: disable=unused-variable
for (
i0_1, # pylint: disable=unused-variable
i1_1,
i2_1,
i3_1,
i4_0_1, # pylint: disable=unused-variable
i5_1,
i6_1,
i7_1, # pylint: disable=unused-variable
i8_1, # pylint: disable=unused-variable
i9_0_1, # pylint: disable=unused-variable
i0_2, # pylint: disable=unused-variable
i1_2,
i2_2,
i3_2,
i4_0_2, # pylint: disable=unused-variable
) in T.grid(1, 2, 1, 1, 1, 7, 7, 1, 1, 1, 1, 1, 14, 8, 1):
with T.block("conv2d_NCHWc_int8_o_update"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(2, i1_1 + i1_2)
o_height = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused // 14 * 14 + i2_1 * 14 + i2_2
)
o_width = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused % 14 * 8 + i3_1 * 8 + i3_2
)
oc_block_o = T.axis.spatial(1, 0) # pylint: disable=unused-variable
k_height = T.axis.reduce(7, i5_0 * 7 + i5_1)
k_width = T.axis.reduce(7, i6_0 * 7 + i6_1)
ic_outer = T.axis.reduce(1, 0)
ic_f_inner = T.axis.reduce(1, 0)
ic_s_inner_o = T.axis.reduce(1, 0) # pylint: disable=unused-variable
T.reads(
conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32],
p0_buffer[
n,
ic_outer,
o_height * 2 + k_height,
o_width * 2 + k_width,
ic_f_inner * 4 : ic_f_inner * 4 + 4,
],
p1_buffer[
oc_chunk, ic_outer, k_height, k_width, ic_f_inner, 0:32, 0:4
],
)
T.writes(conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32])
a_buffer = T.match_buffer(
p0_buffer[
n,
ic_outer,
o_height * 2 + k_height,
o_width * 2 + k_width,
ic_f_inner * 4 : ic_f_inner * 4 + 4,
],
[4],
dtype="uint8",
offset_factor=1,
)
b_buffer = T.match_buffer(
p1_buffer[
oc_chunk, ic_outer, k_height, k_width, ic_f_inner, 0:32, 0:4
],
[32, 4],
dtype="int8",
offset_factor=1,
)
c_buffer = T.match_buffer(
conv2d_nchwc_int8[n, oc_chunk, o_height, o_width, 0:32],
[32],
dtype="int32",
offset_factor=1,
)
a_u8x4: T.uint8x4 = a_buffer[0:4]
a_i32: T.int32 = T.reinterpret(a_u8x4, dtype="int32")
b_i8x128 = b_buffer[0, 0:128]
b_i32x32: T.int32x32 = T.reinterpret(b_i8x128, dtype="int32x32")
c_buffer[0:32] = T.call_llvm_pure_intrin(
4217,
T.uint32(3),
c_buffer[0:32],
T.broadcast(a_i32, 32),
b_i32x32,
dtype="int32x32",
)
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 14, 8):
for ax4_fused in T.vectorized(32):
with T.block("T_cast_2"):
ax0_1, ax1_1 = T.axis.remap("SS", [ax0, ax1])
ax2_1 = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused // 14 * 14 + ax2
)
ax3_1 = T.axis.spatial(
112, i0_0_i1_0_i2_0_i3_0_fused % 14 * 8 + ax3
)
ax4 = T.axis.spatial(32, ax4_fused)
T.reads(conv2d_nchwc_int8[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
T.writes(t_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
t_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4] = conv2d_nchwc_int8[
ax0_1, ax1_1, ax2_1, ax3_1, ax4
]
@tvm.testing.requires_hexagon
def test_meta(hexagon_session):
"""Test meta."""
if tvm.testing.utils.IS_IN_CI:
pytest.skip("Skipping test since it takes too long in CI.")
a_data = default_rng().integers(1, 8, (1, 1, 230, 230, 4), dtype="uint8")
w_data = default_rng().integers(1, 8, (2, 1, 7, 7, 1, 32, 4), dtype="int8")
c_data = np.zeros((1, 2, 112, 112, 32), dtype="int32")
sch = tvm.tir.Schedule(ModuleBase)
base_runtime = evaluate(hexagon_session, sch, a_data, w_data, c_data)
sch = tvm.tir.Schedule(ModulePipelined)
compute_block = sch.get_block("conv2d_NCHWc_int8_o_update")
outer = sch.get_loops(compute_block)[0]
unscheduled_vtcm_runtime = evaluate(
hexagon_session, sch, a_data, w_data, c_data, use_async_copy=1
)
sch = tvm.tir.Schedule(ModulePipelined)
compute_block = sch.get_block("conv2d_NCHWc_int8_o_update")
outer = sch.get_loops(compute_block)[0]
sch.annotate(outer, "software_pipeline_stage", [0, 1, 2])
sch.annotate(outer, "software_pipeline_order", [0, 1, 2])
sch.annotate(outer, "software_pipeline_async_stages", [0, 2])
pipeline_runtime = evaluate(hexagon_session, sch, a_data, w_data, c_data, use_async_copy=1)
transfer_mb = round((a_data.size + w_data.size + c_data.size) / 1e6, 2)
print_results(
(
f"Test with a_buffer.size: {a_data.size}, w_buffer.size: {w_data.size}"
f", and total memory transfer of {transfer_mb} MB..."
),
{
"without_vtcm": base_runtime,
"unscheduled_vtcm_runtime": unscheduled_vtcm_runtime,
"pipeline_runtime": pipeline_runtime,
},
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_autotvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Minimal example of tuning on hexagon. """
import contextlib
import os
import pytest
import tvm
import tvm.testing
from tvm import autotvm, te
from tvm.autotvm.tuner import GATuner, XGBTuner
from .infrastructure import get_hexagon_target
@autotvm.template("demo_template")
def demo_template():
"""Initial demo template"""
size_m, size_n, size_k = [1024] * 3
input1 = te.placeholder((size_m, size_k), dtype="float32")
input2 = te.placeholder((size_n, size_k), dtype="float32")
k = te.reduce_axis((0, 1024), name="k")
output = te.compute(
(size_m, size_n), lambda i, j: te.sum(input1[i, k] * input2[j, k], axis=[k])
)
s = te.create_schedule(output.op)
cfg = autotvm.get_config()
_, _ = s[output].op.axis
(k_iter,) = s[output].op.reduce_axis
cfg.define_split("k_split", k_iter, num_outputs=2)
_, _ = cfg["k_split"].apply(s, output, k_iter)
return s, [input1, input2, output]
class HexagonModuleLoader:
"""HexagonModuleLoader"""
def __init__(self, hexagon_session, pre_load_function=None) -> None:
self.pre_load_function = pre_load_function
self.hexagon_session = hexagon_session
@contextlib.contextmanager
def __call__(self, remote_kwargs, build_result):
remote = self.hexagon_session._rpc
if self.pre_load_function is not None:
self.pre_load_function(remote, build_result)
try:
yield remote, self.hexagon_session.load_module(build_result)
finally:
pass
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=2048,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
"""Tune tasks with different tuners"""
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
@pytest.mark.skip(reason="AutoTVM tuning is not yet enabled on Hexagon")
@tvm.testing.requires_hexagon
def test_autotvm(hexagon_session):
"""Top level test function for testing autotvm"""
logfilename = "./hexagon.autotvm.log"
options = {
"log_filename": logfilename,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=15),
runner=autotvm.RPCRunner(
module_loader=HexagonModuleLoader(hexagon_session),
key=hexagon_session._remote_kw["key"],
host=hexagon_session._remote_kw["host"],
port=hexagon_session._remote_kw["port"],
number=3,
timeout=15,
min_repeat_ms=150,
# cooldown_interval=150
),
),
}
task = autotvm.task.create(
"demo_template",
args=[],
target=get_hexagon_target("v68"),
)
tune_tasks([task], **options)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" benchmark_elemwise_add """
import os
import os.path
import sys
import tempfile
import numpy as np
import pytest
import tvm.script
import tvm.testing
from tvm.contrib.hexagon.session import Session
from tvm.script import tir as T
from . import benchmark_util as bu
from .infrastructure import get_hexagon_target
_SHOULD_SKIP_BENCHMARKS, _SKIP_BENCHMARKS_REASON = bu.skip_benchmarks_flag_and_reason()
# This is a fixed detail of the v68 architecture.
HVX_VECTOR_BYTES = 128
# NOTE on server ports:
# These tests use different port numbers for the RPC server (7070 + ...).
# The reason is that an RPC session cannot be gracefully closed without
# triggering TIME_WAIT state on the server socket. This prevents another
# server to bind to the same port until the wait time elapses.
_BT = bu.BenchmarksTable()
_CSV_COLUMN_ORDER = [
# Identifies which TE-compute / TIRScript is used as the basis for the
# benchmarked primfunc. Only needs to be meaningful to humans.
"basic_kernel",
# The tensors' element type
"dtype",
# When applicable, indicates the particular variation of schedules
# apply by the Python code. Decoding this may require looking at this
# script's source code.
"sched_type",
# The memory location of the tensors used during the execution of
# the primfunc. We currently assume just one location.
# This will likely need to be generalized as we add more sophisticated
# primfuncs.
"mem_scope",
# For primfuncs that treat tensor buffers as collections of 1D vectors,
# this is the number of vectors in each tensor.
# This will likely need to be generalized as we add more sophisticated
# primfuncs.
"num_vectors_per_tensor",
# Reserved columns defined by the BenchmarksTable class.
"row_status",
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_mean_usecs",
"timings_stddev_usecs",
# For benchmarks that produce files on the host file system, this indicates
# their location. Useful for post-mortem investigation of benchmark results.
"host_files_dir_path",
# Miscellaneous comments about the benchmark.
"comments",
]
_HOST_OUTPUT_DIR = tempfile.mkdtemp()
_PRIMFUNC_NAME = "elemwise_add"
print("-" * 80)
print("OUTPUT DIRECTORY: {}".format(_HOST_OUTPUT_DIR))
print("-" * 80)
print()
def _get_irmod_elemwise_add(shape: list, dtype: str, mem_scope: str) -> tvm.ir.module.IRModule:
"""
Return an IRModule containing a single primfunc, expressed as NS-TIR.
The primfunc implements elementwise-add. Its signature is (A,B,C), where
A and B are the input tensors, and C is the output tensor.
All three tensors have the specfied shape, dtype, and mem_scope.
If the specified primfunc is known to be unsupported, raise an UnsupportedExcetion.
"""
assert len(shape) == 2
# TVMScript can reference simple Python variables, but it doesn't
# curently support more complex Python expressions...
(
dim0_size,
dim1_size,
) = shape
if mem_scope == "global.vtcm":
raise bu.UnsupportedException("This benchmark kernel does not yet support VTCM buffers.")
# This check is currently elided by the one above, but it should become relevant as soon
# as we add VTCM support to this kernel generator.
#
# Also: The VTCM budget is a very rough estimate, based only on experience.
# Assuming that it's even reasonable to use a hard-coded estimate AT ALL, this number
# may need tweaking.
# The below code is commented is commented to avoid unreachable error
# with pylint. Please enable this once the kernel starts supporting
# VTCM buffers
# Code starts below:
# ---- ------ -----
# estimated_vtcm_budget_bytes = HVX_VECTOR_BYTES * 1024
# dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
# assert dtype_bits % 8 == 0
# dtype_bytes = dtype_bits // 8
# num_vtcm_tensors = 3
# estimated_vtcm_needed_bytes = shape[0] * shape[1] * dtype_bytes * num_vtcm_tensors
# if estimated_vtcm_needed_bytes > estimated_vtcm_budget_bytes:
# raise bu.UnsupportedException("Expect to exceed VTCM budget.")
@tvm.script.ir_module
class BenchmarkModule:
"""Elementwise STIR module for benchmarking"""
# pylint: disable=no-self-argument,invalid-name,missing-function-docstring
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle):
# We exchange data between function by handles, which are similar to pointer.
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, shape, dtype=dtype)
B = T.match_buffer(b, shape, dtype=dtype)
C = T.match_buffer(c, shape, dtype=dtype)
for i in range(dim0_size):
for j in range(dim1_size):
C[i, j] = A[i, j] + B[i, j]
# pylint: enable=no-self-argument,invalid-name,missing-function-docstring
return BenchmarkModule
def _benchmark_hexagon_elementwise_add_kernel(
hexagon_session: Session, shape: list, dtype: str, mem_scope: str
):
"""
Generate and benchmark a single elementwise-add kernel for Hexagon.
Produce these outputs:
- Printed status updates / results to stdout and/or stderr.
- Create a new subdirectory under _HOST_OUTPUT_DIR, and populate it with
various logs and intermediate files.
- Add to _BT a row describing this benchmark run.
"""
# Represent the benchmark details in a form required by the benchmark table
# and for other logging...
keys_dict = {
"basic_kernel": "ewise-add",
"dtype": dtype,
"shape": shape,
"mem_scope": mem_scope,
}
desc = bu.get_benchmark_decription(keys_dict)
# Create the host-side directory for this benchmark run's files / logs...
host_files_dir_name = bu.get_benchmark_id(keys_dict)
host_files_dir_path = os.path.join(_HOST_OUTPUT_DIR, host_files_dir_name)
os.mkdir(host_files_dir_path)
keys_dict["host_files_dir_path"] = host_files_dir_path
log_file_path = os.path.join(host_files_dir_path, "out.txt")
with open(log_file_path, "w", encoding="UTF-8") as log_file:
print(f"CONFIGURATION: {desc}")
log_file.write(f"CONFIGURATION: {desc}\n")
try:
ns_tir_module = _get_irmod_elemwise_add(shape, dtype, mem_scope)
# Dump the primfunc NS-TIR (as text) to the log file...
lowered_mod = tvm.lower(ns_tir_module, _PRIMFUNC_NAME)
log_file.write("LOWERED IR MODULE:\n")
log_file.write(str(lowered_mod))
log_file.write("\n")
# Lower the primfunc's IRModule to Hexagon object code...
input1 = tvm.te.placeholder(shape, dtype=dtype)
input2 = tvm.te.placeholder(shape, dtype=dtype)
output = tvm.te.placeholder(shape, dtype=dtype)
built_module: tvm.driver.build_module.OperatorModule = tvm.build(
ns_tir_module,
[
input1,
input2,
output,
],
get_hexagon_target("v69"),
name=_PRIMFUNC_NAME,
)
# Create an actual Hexagon-native shared object file, initially stored on the
# host's file system...
host_dso_binary_path = os.path.join(host_files_dir_path, "test_binary.so")
built_module.save(host_dso_binary_path)
print(f"SAVED BINARY TO HOST PATH: {host_dso_binary_path}")
# Upload the .so to the Android device's file system (or wherever is appropriate
# when using the Hexagon simulator)...
target_dso_binary_filename = "test_binary.so"
target_dso_binary_pathname = hexagon_session.upload(
host_dso_binary_path, target_dso_binary_filename
)
# Generate our testing / validation data...
(
host_numpy_input1_data,
host_numpy_input2_data,
host_numpy_output_data_expected,
) = _get_elemwise_add_reference_value_tensors(shape, dtype)
# On the target device / simulator, make our Hexagon-native shared object
# available for use...
loaded_hexagon_module: tvm.runtime.module.Module = hexagon_session.load_module(
target_dso_binary_pathname
)
# Create the target-side tensors to hold the primfunc's inputs and outputs...
input1_data = tvm.nd.empty(shape, dtype, hexagon_session.device, mem_scope)
input2_data = tvm.nd.empty(shape, dtype, hexagon_session.device, mem_scope)
output_data = tvm.nd.empty(shape, dtype, hexagon_session.device, mem_scope)
# Populate the primfunc's input tensors...
input1_data.copyfrom(host_numpy_input1_data)
input2_data.copyfrom(host_numpy_input2_data)
# Actually benchmark the primfunc...
timer = loaded_hexagon_module.time_evaluator(
"main", hexagon_session.device, number=10, repeat=1
)
timing_result = timer(input1_data, input2_data, output_data)
print(f"TIMING RESULT: {timing_result}")
log_file.write(f"TIMING RESULT: {timing_result}\n")
# Verify that the computation actually happened, and produced the correct result.
result = output_data.numpy()
if dtype == "float16":
# These are the closest tolerance we currently expect / require for these
# kernels. They may be changed in the future.
rel_tolerance = 0.005
abs_tolerance = 2.0
elif dtype == "int8":
rel_tolerance = 0
abs_tolerance = 0
else:
raise Exception(f"Unexpected dtype: {dtype}")
# TODO: We're assuming that *any* assertion thrown by 'assert_allclose' is because
# the numerical differences were too large. But ideally this code would
# differentiate between (a) numerical difference errors, which should simply be
# recorded as a failed benchmark run, vs. (b) more serious errors that should
# kill the overall script.
try:
tvm.testing.assert_allclose(
result, host_numpy_output_data_expected, rel_tolerance, abs_tolerance
)
except AssertionError as err:
raise bu.NumericalAccuracyException(str(err))
_BT.record_success(timing_result, **keys_dict)
except bu.NumericalAccuracyException as err:
print()
print("FAIL: Numerical accuracy error. See log file.")
log_file.write("\n")
log_file.write(f"FAIL: {err}\n")
_BT.record_fail(**keys_dict, comments="Numerical accuracy error. See log file.")
except bu.UnsupportedException as err:
print()
print(f"SKIP: {err}")
log_file.write("\n")
log_file.write(f"SKIP: {err}\n")
_BT.record_skip(**keys_dict, comments=f"Unsupported configuration: {err}")
def _get_elemwise_add_reference_value_tensors(shape: list, dtype: str):
"""
Return [A:np.array, B:np.array, C:np.array]
`A`, `B`, and `C` are reference data used to exercise and validate
an elementwise-add kernel: C = A+B.
NOTE: These data are primarily meant for performance testing.
The values may be helpful in detecting correctness issues, but that's
a secondary consideration here.
"""
assert len(shape) == 2
input1 = np.ndarray(shape, dtype=dtype)
input2 = np.ndarray(shape, dtype=dtype)
np_dtype = input1.dtype
if np_dtype.kind in ["i", "u"]:
# We allow overflow for integer types because it tends to be well-behaved
# and well-understood...
min_value = np.iinfo(np_dtype).min
max_value = np.iinfo(np_dtype).max
next_value = min_value
for i in range(shape[0]):
for j in range(shape[1]):
input1[i, j] = next_value
input2[i, j] = next_value * 2
next_value += 1
elif np_dtype.kind == "f":
# NOTE: For simplicity, we avoid test data that that require
# well-defined behavior on floating-point overflow.
# But it may be reasonable to test that in the future.
min_value = np.finfo(np_dtype).min
max_value = np.finfo(np_dtype).max
min_input_value = min_value / 2.0 + 1
max_input_value = max_value / 2.0 - 2
delta = (max_input_value - min_input_value) / (shape[0] * shape[1])
next_value = min_input_value
for i in range(shape[0]):
for j in range(shape[1]):
input1[i, j] = next_value
input2[i, j] = next_value + 1
next_value += delta
else:
assert False, f"Unexpected data type: {np_dtype}"
output = input1 + input2
return [
input1,
input2,
output,
]
@pytest.mark.skipif(_SHOULD_SKIP_BENCHMARKS, reason=_SKIP_BENCHMARKS_REASON)
@tvm.testing.requires_hexagon
def test_elemwise_add(hexagon_session: Session):
"""Main elementwise add test function"""
for dtype in [
"int8",
"float16",
]:
for mem_scope in [
"global",
"global.vtcm",
]:
# These numbers are fairly arbitrary, but they're meant to stress memory/caches to
# various extents.
for num_vectors_per_tensor in [
1,
16,
64,
512,
2048,
]:
dtype_bits = tvm._ffi.runtime_ctypes.DataType(dtype).bits
assert dtype_bits % 8 == 0
dtype_bytes = dtype_bits // 8
elem_per_hvx_vector = HVX_VECTOR_BYTES // dtype_bytes
shape = [
num_vectors_per_tensor,
elem_per_hvx_vector,
]
print()
_benchmark_hexagon_elementwise_add_kernel(hexagon_session, shape, dtype, mem_scope)
print("-" * 80)
print(f"OUTPUT DIRECTORY: {_HOST_OUTPUT_DIR}")
print("-" * 80)
print()
tabular_output_filename = os.path.join(_HOST_OUTPUT_DIR, "benchmark-results.csv")
with open(tabular_output_filename, "w", encoding="UTF-8") as csv_file:
_BT.print_csv(csv_file, _CSV_COLUMN_ORDER)
print(f"BENCHMARK RESULTS FILE: {tabular_output_filename}")
_BT.print_csv(sys.stdout, _CSV_COLUMN_ORDER)
if _BT.has_fail() > 0:
pytest.fail("At least one benchmark configuration failed", pytrace=False)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_benchmark_maxpool2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module serves two purposes:
(1) Demonstrates how to write Python code that exercises various
Hexagon-related algorithms / features.
(2) Benchmark the resulting primfuncs.
Current limitations:
- Input shapes are limited to NHWC --> NHWC_8h8w32c.
- Testing parameters (input shapes, dtypes, etc.) currently
support only one value for each parameter.
- height, width, channel must be integer multiples of 8, 8, and 32,
respectively. I.e., partial blocks aren't currently
supported by this script.
- Requires that I/O tensors reside in "global.VTCM" memory,
rather than "global" memory.
This prevents benchmarking with I/O tensors that are too
large to fit into availble VTCM.
- The script only develops one primfunc.
Future revisions to this script are expected to add more
primfuncs and demonstrate more coding strategies.
"""
from typing import List
import copy
import os
import pytest
import numpy as np
import tvm.testing
from tvm import te, topi, tir
from tvm.topi import testing
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon import allocate_hexagon_array
from .infrastructure import get_hexagon_target
from . import benchmark_util as bu
# Pytest seems to require that fixture names exist in the current module.
# E.g., it doesn't allow: @pytest.mark.usefixtures("bu.benchmark_group")
BENCHMARK_GROUP = bu.benchmark_group
_SHOULD_SKIP_BENCHMARKS, _SKIP_BENCHMARKS_REASON = bu.skip_benchmarks_flag_and_reason()
def _ceil_div(numerator, denominator):
return (numerator + (denominator - 1)) // denominator
def _int8_nhwc_8h8w32c_map(n_batch, height, width, channel):
return [
n_batch,
height // 8,
width // 8,
channel // 32,
te.AXIS_SEPARATOR,
height % 8,
width % 8,
channel % 32,
]
def _int8_nhwc_8h8w32c_shape(n_batch, height, width, channel) -> List[int]:
return [
n_batch,
_ceil_div(height, 8),
_ceil_div(width, 8),
_ceil_div(channel, 32),
8,
8,
32,
]
def _int8_nhwc_8h8w32c_xform_immediate(arr_in: np.ndarray) -> np.ndarray:
"""
Return a deep copy of 'arr_in', transformed from a NWHC to
NHWC-8h8wc32 shape. Any newly created array elements have value 0.
"""
stage1 = copy.copy(arr_in)
(
n_batch,
height,
width,
channel,
) = stage1.shape
(
h_minor,
w_minor,
c_minor,
) = [8, 8, 32]
h_major = _ceil_div(height, h_minor)
w_major = _ceil_div(width, w_minor)
c_major = _ceil_div(channel, c_minor)
# This handles cases where the dimensions of arr_in are not cleanly divided
# by the minor block size, i.e. [8, 8, 32].
#
# Any additional array elements that this creates will ahve value 0.
# We shouldn't actually care what value is used for those elements, because they
# shouldn't be treated as meaningful by any of our algorithms.
if (height % h_minor) or (width % w_minor) or (channel % c_minor):
stage1.resize(
(n_batch, h_major * h_minor, w_major * w_minor, c_major * c_minor), refcheck=False
)
stage2 = stage1.reshape(n_batch, h_major, h_minor, w_major, w_minor, c_major, c_minor)
stage3 = stage2.transpose(0, 1, 3, 5, 2, 4, 6)
return stage3
def _create_test_input(shape, dtype: str) -> np.ndarray:
np_dtype = np.dtype(dtype)
min_value = np.iinfo(np_dtype).min
max_value = np.iinfo(np_dtype).max
return np.random.randint(low=min_value, high=max_value, size=tuple(shape), dtype=np.int8)
@pytest.mark.usefixtures("BENCHMARK_GROUP")
class TestMaxPool2D:
"""maxpool2D base test class"""
csv_column_order = [
# Identifies which TE-compute / TIRScript is used as the basis for the
# benchmarked primfunc. Only needs to be meaningful to humans.
"basic_kernel",
# When applicable, indicates the particular variation of schedules
# apply by the Python code. Decoding this may require looking at this
# script's source code.
"sched_type",
# Values directly based on test parameters...
"input_shape_4d",
"block_shape",
"dtype",
"kernel",
"stride",
"dilation",
"padding",
"io_tensor_mem_scope",
# Reserved columns defined by the BenchmarksTable class.
"row_status",
"timings_min_usecs",
"timings_max_usecs",
"timings_median_usecs",
"timings_mean_usecs",
"timings_stddev_usecs",
# For benchmarks that produce files on the host file system, this indicates
# their location. Useful for post-mortem investigation of benchmark results.
"host_files_dir_path",
# Miscellaneous comments about the benchmark.
"comments",
]
dtype = tvm.testing.parameter("int8")
# FIXME(cconvey): The script currently fails when height, width, or channel is not an
# integer multiple of 8, 8, or 32, respectively.
n_batch = tvm.testing.parameter(1)
height = tvm.testing.parameter(*[x * 8 for x in [1, 4, 16]])
width = tvm.testing.parameter(*[x * 8 for x in [1, 4, 16]])
channel = tvm.testing.parameter(*[x * 32 for x in [1, 2]])
kernel = tvm.testing.parameter((1, 1), (3, 3))
stride = tvm.testing.parameter((1, 1))
dilation = tvm.testing.parameter((1, 1))
padding = tvm.testing.parameter((0, 0, 0, 0))
io_tensor_mem_scope = tvm.testing.parameter("global.vtcm")
@pytest.mark.skipif(_SHOULD_SKIP_BENCHMARKS, reason=_SKIP_BENCHMARKS_REASON)
@tvm.testing.requires_hexagon
def test_maxpool2d_nhwc(
self,
n_batch,
height,
width,
channel,
dtype,
kernel,
stride,
dilation,
padding,
io_tensor_mem_scope,
hexagon_session: Session,
):
"""Test maxpool2d NHWC"""
keys_dict = {
"basic_kernel": "max_pool2d",
"sched_type": 1,
"input_shape_4d": [n_batch, height, width, channel],
"block_shape": [8, 8, 32],
"dtype": dtype,
"kernel": kernel,
"stride": stride,
"dilation": dilation,
"padding": padding,
"io_tensor_mem_scope": io_tensor_mem_scope,
}
desc = bu.get_benchmark_decription(keys_dict)
# Create the host-side directory for this benchmark run's files / logs...
host_files_dir_name = bu.get_benchmark_id(keys_dict)
host_files_dir_path = os.path.join(self.working_dir, host_files_dir_name)
os.mkdir(host_files_dir_path)
keys_dict["host_files_dir_path"] = host_files_dir_path
log_file_path = os.path.join(host_files_dir_path, "out.txt")
with open(log_file_path, "w") as log_file:
print(f"CONFIGURATION: {desc}")
log_file.write(f"CONFIGURATION: {desc}\n")
try:
input_tensor_shape_4d = [n_batch, height, width, channel]
input_tensor_shape_7d = _int8_nhwc_8h8w32c_shape(n_batch, height, width, channel)
data = te.placeholder(tuple(input_tensor_shape_4d), dtype=dtype)
output = topi.nn.pool2d(
data, kernel, stride, dilation, padding, "max", layout="NHWC"
)
primfunc = te.create_prim_func([data, output])
sch = tir.Schedule(primfunc, debug_mask="all")
sch.transform_layout(
block="tensor", buffer="placeholder", index_map=_int8_nhwc_8h8w32c_map
)
built_module = tvm.build(
sch.mod,
target=get_hexagon_target("v69"),
)
# Save a local copy of the Hexagon object code (in the form of a .so file)
# to allow post-mortem inspection.
host_dso_binary_path = os.path.join(host_files_dir_path, "test_binary.so")
built_module.save(host_dso_binary_path)
print(f"SAVED BINARY TO HOST PATH: {host_dso_binary_path}")
hexagon_mod = hexagon_session.load_module(built_module)
# Generate the input tensor's data.
# Note that we'll eventually need it in two different layouts:
# (1) NHWC as an argument to testing.poolnd_python.
# (2) NHWC_8h8w32c for as an argument to our Hexagon primfunc.
# a_numpy_4d = np.random.randint(low=-128, high=127,
# size=input_tensor_shape_4d, dtype=np.int8)
a_numpy_4d = _create_test_input(input_tensor_shape_4d, dtype)
ref_output_4d = testing.poolnd_python(
a_numpy_4d.astype("int32"),
kernel,
stride,
dilation,
padding[0:2],
padding[2:],
pool_type="max",
dtype="int32",
layout="NHWC",
).astype(dtype)
output_tensor_shape_4d = ref_output_4d.shape
a_numpy_7d = _int8_nhwc_8h8w32c_xform_immediate(a_numpy_4d)
a_hexagon_7d = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=input_tensor_shape_7d,
axis_separators=[4],
dtype=dtype,
mem_scope=io_tensor_mem_scope,
)
c_hexagon_4d = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=output_tensor_shape_4d,
axis_separators=[],
dtype=dtype,
mem_scope=io_tensor_mem_scope,
)
a_hexagon_7d.copyfrom(a_numpy_7d)
if dtype == "int8":
rel_tolerance = 0
abs_tolerance = 0
else:
assert False, f"TODO: decide acceptable tolerances for dtype {dtype}"
timer = hexagon_mod.time_evaluator(
"main", hexagon_session.device, number=10, repeat=1
)
timing_result = timer(a_hexagon_7d, c_hexagon_4d)
try:
tvm.testing.assert_allclose(
ref_output_4d, c_hexagon_4d.numpy(), rtol=rel_tolerance, atol=abs_tolerance
)
except AssertionError as exception:
raise bu.NumericalAccuracyException(str(exception))
except bu.NumericalAccuracyException as exception:
print()
print(f"FAIL: Numerical accuracy error. See log file.")
log_file.write("\n")
log_file.write(f"FAIL: {exception}\n")
self.benchmark_table.record_fail(
**keys_dict, comments=f"Numerical accuracy error. See log file."
)
except bu.UnsupportedException as exception:
print()
print(f"SKIP: {exception}")
log_file.write("\n")
log_file.write(f"SKIP: {exception}\n")
self.benchmark_table.record_skip(
**keys_dict, comments=f"Unsupported configuration: {exception}"
)
self.benchmark_table.record_success(timing_result, **keys_dict)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_cache_read_write.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Lower cache_read and cache_write to Hexagon DMA via tensorize """
import numpy as np
import tvm.testing
from tvm import te, tir
from tvm.contrib.hexagon.session import Session
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
def intrin_mem_copy(shape, dtype, dst_scope, src_scope):
"""Define and return tensor intrinsic for mem copy"""
src = te.placeholder(shape=shape, dtype=dtype, name="src")
dst = te.compute(shape, lambda i: src[i], name="dst")
size = shape[0] * np.dtype(dtype).itemsize
src_buffer = tvm.tir.decl_buffer(
shape,
dtype,
scope=src_scope,
offset_factor=1,
name="mem_copy_src_buffer",
)
dst_buffer = tvm.tir.decl_buffer(
shape,
dtype,
scope=dst_scope,
offset_factor=1,
name="mem_copy_dst_buffer",
)
zero_indices = [0 for _ in shape]
def intrin_func(ins, outs):
ir_builder = tvm.tir.ir_builder.create()
_src = ins[0]
_dst = outs[0]
dst_handle = ir_builder.buffer_ptr(dst_buffer)
src_handle = ir_builder.buffer_ptr(src_buffer)
ir_builder.emit(
tvm.tir.call_intrin(
"handle",
"tir.mem_copy",
tvm.tir.call_intrin("handle", "tir.address_of", dst_handle[zero_indices]),
tvm.tir.call_intrin("handle", "tir.address_of", src_handle[zero_indices]),
size,
)
)
return ir_builder.get()
return te.decl_tensor_intrin(dst.op, intrin_func, binds={src: src_buffer, dst: dst_buffer})
def verify(hexagon_session: Session, schedule, x_tensor, y_tensor, z_tensor, size):
"""Verify correctness with reference from numpy"""
print(tvm.lower(schedule, [x_tensor, y_tensor, z_tensor]))
func = tvm.build(
schedule,
[x_tensor, y_tensor, z_tensor],
get_hexagon_target("v68"),
name="dmacpy",
)
mod = hexagon_session.load_module(func)
x_array = tvm.nd.array(
np.random.randint(low=-128, high=127, size=size, dtype=x_tensor.dtype),
device=hexagon_session.device,
)
y_array = tvm.nd.array(
np.random.randint(low=-128, high=127, size=size, dtype=y_tensor.dtype),
device=hexagon_session.device,
)
z_array = tvm.nd.array(
np.random.randint(low=-128, high=127, size=size, dtype=z_tensor.dtype),
device=hexagon_session.device,
)
mod["dmacpy"](x_array, y_array, z_array)
ref = x_array.numpy() + y_array.numpy()
np.testing.assert_equal(z_array.numpy(), ref)
@tvm.testing.requires_hexagon
def test_cache_read_write(hexagon_session: Session):
"""Test cache_read and cache_write to global.vtcm for hexagon"""
size = 128
outer_shape = (size,)
factor = 16
inner_shape = (factor,)
dtype = "int8"
x_tensor = te.placeholder(shape=outer_shape, dtype=dtype, name="x")
y_tensor = te.placeholder(shape=outer_shape, dtype=dtype, name="y")
z_tensor = te.compute(outer_shape, lambda i: x_tensor[i] + y_tensor[i], name="z")
s = te.create_schedule(z_tensor.op)
x_vtcm = s.cache_read(x_tensor, "global.vtcm", [z_tensor])
y_vtcm = s.cache_read(y_tensor, "global.vtcm", [z_tensor])
z_vtcm = s.cache_write(z_tensor, "global.vtcm")
zouter, _ = s[z_vtcm].split(z_vtcm.op.axis[0], factor=factor)
s[x_vtcm].compute_at(s[z_vtcm], zouter)
s[y_vtcm].compute_at(s[z_vtcm], zouter)
mem_copy_read = intrin_mem_copy(inner_shape, dtype, "global.vtcm", "global")
(cache_read_x,) = s[x_vtcm].op.axis
s[x_vtcm].tensorize(cache_read_x, mem_copy_read)
(cache_read_y,) = s[y_vtcm].op.axis
s[y_vtcm].tensorize(cache_read_y, mem_copy_read)
mem_copy_write = intrin_mem_copy(outer_shape, dtype, "global", "global.vtcm")
(cache_write_z,) = s[z_tensor].op.axis
s[z_tensor].tensorize(cache_write_z, mem_copy_write)
verify(hexagon_session, s, x_tensor, y_tensor, z_tensor, size)
def layout_transform_2d(n):
return [n // 16, te.AXIS_SEPARATOR, n % 16]
@tvm.testing.requires_hexagon
def test_cache_read_write_2d(hexagon_session: Session):
"""Test 2D cache_read and cache_write to global.vtcm for hexagon"""
size = 128
outer_shape = (size,)
factor = 16
inner_shape = (factor,)
dtype = "int8"
x_tensor = te.placeholder(shape=outer_shape, dtype=dtype, name="x")
y_tensor = te.placeholder(shape=outer_shape, dtype=dtype, name="y")
z_tensor = te.compute(outer_shape, lambda i: x_tensor[i] + y_tensor[i], name="z")
s = te.create_schedule(z_tensor.op)
x_vtcm = s.cache_read(x_tensor, "global.vtcm", [z_tensor])
y_vtcm = s.cache_read(y_tensor, "global.vtcm", [z_tensor])
z_vtcm = s.cache_write(z_tensor, "global.vtcm")
layout_x_vtcm = s[x_vtcm].transform_layout(layout_transform_2d)
layout_y_vtcm = s[y_vtcm].transform_layout(layout_transform_2d)
_ = s[z_vtcm].transform_layout(layout_transform_2d)
mem_copy_read = intrin_mem_copy(inner_shape, dtype, "global.vtcm", "global")
s[x_vtcm].tensorize(layout_x_vtcm[1], mem_copy_read)
s[y_vtcm].tensorize(layout_y_vtcm[1], mem_copy_read)
# The loop schedule over `z` is not modified when calling `transform_layout`
# on `z_vtcm` above therefore we must call `split` to modify the loop schedule
# over `z` to match the layout of `z_vtcm` such that we can accurately write
# `z_vtcm` back to `z` using memory copy intrinsic
_, zinner = s[z_tensor].split(z_tensor.op.axis[0], factor=factor)
mem_copy_write = intrin_mem_copy(inner_shape, dtype, "global", "global.vtcm")
s[z_tensor].tensorize(zinner, mem_copy_write)
verify(hexagon_session, s, x_tensor, y_tensor, z_tensor, size)
@T.prim_func
def scale_by_two(buffer_a: T.Buffer[(8192,), "int8"], buffer_c: T.Buffer[(8192,), "int8"]):
for i in T.serial(
0,
8192,
):
with T.block("C"):
buffer_c[i] = buffer_a[i] * T.int8(2)
def test_vtcm_lowering():
"""Test lowering with vtcm mem scope"""
mod = tvm.IRModule.from_expr(scale_by_two.with_attr("global_symbol", "main"))
sch = tir.Schedule(mod, debug_mask="all")
block_c = sch.get_block("C")
(flat,) = sch.get_loops(block_c)
outer, _, _, _ = sch.split(flat, factors=[8, 4, 2, 128])
cache_block = sch.cache_read(block_c, 0, storage_scope="global.vtcm")
sch.compute_at(cache_block, outer)
lowered = tvm.lower(sch.mod["main"])
def ir_module_has_allocate_nodes(irmod):
nallocs = 0
def _visit(stmt):
nonlocal nallocs
if isinstance(stmt, tvm.tir.Allocate):
nallocs += 1
tvm.tir.stmt_functor.post_order_visit(irmod["main"].body, _visit)
return nallocs
assert not ir_module_has_allocate_nodes(lowered), (
"AllocateNode found in lowered IRModule, "
"VTCM allocations should have been lowered to tir.nd_mem_alloc_with_scope"
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_fixed_point_conversion.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test float to fixed-point conversion. We do it by constructing a numpy array with the
wide range of floating-point values. These values are converted into the
fixed-point value using topi.hexagon.utils.get_fixed_point_value. Then, these values are
converted back into float using scale_factor provided by the function. These converted
floating point values are then compared against the original values and an assertion is
raised if they happened to be outside of the expected tolerance.
"""
import math
import struct
import numpy as np
import tvm.topi.hexagon.utils as utils
class TestFixedPointConversion:
"""Fixed point conversation test class"""
def test_fixed_point_conversion(self):
"""Test fixed point conversion"""
# Construct array with wide range of values
fp1 = np.random.uniform(0.00001, 0.0002, size=(10))
fp2 = np.random.uniform(0.001, 0.02, size=(10))
fp3 = np.random.uniform(1, 20, size=(10))
fp4 = np.random.uniform(900, 1000, size=(10))
fp5 = np.random.uniform(1e9, 1e10, size=(10))
# Test for values with largest possible exponent as per IEEE-754 floating-point
# standard (actual exp value = 127, stored exp value = 254).
fp6 = np.random.uniform(2.4e38, 2.5e38, size=(1))
# Test for very small floating-point values.
fp7 = np.random.uniform(1.4e-34, 1.7e-34, size=(1))
float_arr = np.concatenate((fp1, fp2, fp3, fp4, fp5, fp6, fp7))
for flp in float_arr:
fxp, rsh = utils.get_fixed_point_value(flp, "int16")
# Compute scale_factor using rsh (rsh is log2 of the scale_factor). While doing this,
# we use IEEE-754 floating-point representation since rsh can be negative or positive.
scale = ((rsh + 127) & 0xFF) << 23 # Add bias (127) and position it into exponent bits
scale_i = struct.pack("I", scale) # Pack it as integer
scale_f = struct.unpack("f", scale_i) # Unpack as float
converted_flp = fxp / scale_f[0]
assert math.isclose(flp, converted_flp, rel_tol=1e-2)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_fixed_point_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Fixed Point Multiply on Hexagon."""
import re
import numpy as np
import tvm.testing
from tvm import relay
from tvm.relay.backend import Executor
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon.pytest_plugin import HEXAGON_AOT_LLVM_TARGET
from .infrastructure import get_hexagon_target
@tvm.testing.requires_hexagon
def test_vmpy_intrinsic_presence():
"""
check intrinsic lowering for fixed_point_multiply operation.
GraphExecutor is used here since get_source("asm") is not supported with aot.
"""
ishape = (1, 128)
a = relay.var("a", relay.TensorType(ishape, "int32"))
y = relay.fixed_point_multiply(a, 1395864320, 1) # 1.3
relay_mod = tvm.IRModule.from_expr(y)
params = {}
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3):
hexagon_lowered = tvm.relay.build(
relay_mod,
get_hexagon_target("v68"),
executor=executor,
params=params,
)
asm = hexagon_lowered.lib.get_source("asm")
# Check that 'vmpye' instruction was generated in asm file.
vmpye_regex = re.compile(r"v\d{1,2}.w = vmpye\(v\d{1,2}.w,v\d{1,2}.uh\)")
assert vmpye_regex.search(asm) is not None
# Check that 'vmpyo' instruction was generated in asm file.
vmpyo_regex = re.compile(r"v\d{1,2}.w \+= vmpyo\(v\d{1,2}.w,v\d{1,2}.h\):<<1:rnd:sat:shift")
assert vmpyo_regex.search(asm) is not None
def build_module(relay_mod, target):
params = {}
executor = Executor("aot", {"link-params": True})
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target, host=target),
executor=executor,
params=params,
)
return lowered
def run_module(mod, inputs):
mod.set_input(**inputs)
mod.run()
output = mod.get_output(0).numpy()
return output
class TestFixedPointMultiply:
"""Fixed point Multiply test class"""
in_scale_const, out_scale_const = tvm.testing.parameters(
(1.3, 30.0),
(1.37, 1.0),
(0.6, 1.0),
((1.7, 0.6), 1.0),
((0.007, 1.9), 1.0),
)
multiplier, shift = tvm.testing.parameters(
(1288490240, -2), # 0.15
(1395864320, 1), # 1.3
(1288490188, 0), # 0.6
)
@tvm.testing.requires_hexagon
def test_fixed_point_multiply(self, hexagon_session: Session, multiplier: int, shift: int):
"""Fixed point multiply test."""
ishape = (6, 32)
a = relay.var("a", relay.TensorType(ishape, "int32"))
fpm = relay.fixed_point_multiply(a, multiplier, shift)
relay_mod = tvm.IRModule.from_expr(fpm)
with tvm.transform.PassContext(opt_level=3):
# Compile for Hexagon...
hexagon_lowered = build_module(relay_mod, HEXAGON_AOT_LLVM_TARGET)
# Compile for LLVM...
llvm_lowered = build_module(relay_mod, tvm.target.Target("llvm"))
data_in = np.arange(-96, 96).reshape(ishape)
inputs = {"a": data_in}
# Run hexagon...
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)
hexagon_output = run_module(hexagon_mod, inputs)
# Run llvm...
llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered["default"](tvm.cpu(0)))
expected_output = run_module(llvm_mod, inputs)
tvm.testing.assert_allclose(hexagon_output, expected_output)
@tvm.testing.requires_hexagon
def test_per_channel(self, hexagon_session: Session, in_scale_const, out_scale_const):
"""Per channel multiply test."""
ishape = [1, 128, 56, 56]
axis = 1
a = relay.var("a", shape=ishape, dtype="int32")
# Make list of input scales from in_scale_const parameter.
if isinstance(in_scale_const, tuple):
in_scale = list(in_scale_const) * (ishape[axis] // len(in_scale_const))
else:
in_scale = [in_scale_const] * ishape[axis]
assert len(in_scale) == ishape[axis]
# qnn.requantize is lowered to fixed_point_multiply if zp == 0 and in_dtype == out_dtype.
iscale = relay.const(in_scale)
izero = relay.const(0)
oscale = relay.const(out_scale_const)
ozero = relay.const(0)
op = relay.qnn.op.requantize(a, iscale, izero, oscale, ozero, axis=axis, out_dtype="int32")
mod = tvm.IRModule.from_expr(op)
with tvm.transform.PassContext(opt_level=3):
# Compile for Hexagon...
hexagon_lowered = build_module(mod, HEXAGON_AOT_LLVM_TARGET)
# Compile for LLVM...
llvm_lowered = build_module(mod, tvm.target.Target("llvm"))
a_np = np.random.randint(-1000, 1000, size=np.prod(ishape)).reshape(ishape)
inputs = {"a": a_np}
# Run hexagon...
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)
hexagon_output = run_module(hexagon_mod, inputs)
# Run llvm...
llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered["default"](tvm.cpu(0)))
expected_output = run_module(llvm_mod, inputs)
tvm.testing.assert_allclose(hexagon_output, expected_output)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_launcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring,redefined-outer-name
""" Test rpc based launcher for hexagon """
import pytest
import numpy as np
import tvm.testing
from tvm import relay, te
from tvm.contrib.hexagon.session import Session
from tvm.relay.backend import Executor, Runtime
from tvm.contrib.hexagon.build import HexagonLauncherRPC
from tvm.contrib.hexagon.hexagon_profiler import HexagonProfiler
from .infrastructure import get_hexagon_target
@tvm.testing.requires_hexagon
def test_add(hexagon_session: Session):
"""Test simple add"""
dtype = "int8"
placeholder_a = tvm.te.placeholder((2,), dtype=dtype)
placeholder_b = tvm.te.placeholder((1,), dtype=dtype)
compute_c = tvm.te.compute(
placeholder_a.shape, lambda i: placeholder_a[i] + placeholder_b[0], name="C"
)
sched = tvm.te.create_schedule(compute_c.op)
func = tvm.build(
sched,
[placeholder_a, placeholder_b, compute_c],
get_hexagon_target("v68"),
name="add",
)
mod = hexagon_session.load_module(func)
a_data = tvm.nd.array(np.array([2, 3], dtype=dtype), device=hexagon_session.device)
assert (a_data.numpy() == np.array([2, 3])).all()
b_data = tvm.nd.array(np.array([4], dtype=dtype), device=hexagon_session.device)
assert (b_data.numpy() == np.array([4])).all()
c_data = tvm.nd.array(np.array([0, 0], dtype=dtype), device=hexagon_session.device)
assert (c_data.numpy() == np.array([0, 0])).all()
mod["add"](a_data, b_data, c_data)
assert (c_data.numpy() == np.array([6, 7])).all()
@tvm.testing.requires_hexagon
def test_add_vtcm(hexagon_session: Session):
"""Test add on VTCM"""
dtype = "int8"
placeholder_a = tvm.te.placeholder((2,), dtype=dtype)
placeholder_b = tvm.te.placeholder((1,), dtype=dtype)
compute_c = tvm.te.compute(
placeholder_a.shape, lambda i: placeholder_a[i] + placeholder_b[0], name="C"
)
sched = tvm.te.create_schedule(compute_c.op)
func = tvm.build(
sched,
[placeholder_a, placeholder_b, compute_c],
get_hexagon_target("v68"),
name="add",
)
mod = hexagon_session.load_module(func)
a_data = tvm.nd.empty(
placeholder_a.shape, placeholder_a.dtype, hexagon_session.device, "global.vtcm"
)
a_data.copyfrom(np.array([2, 3]))
b_data = tvm.nd.empty(
placeholder_b.shape, placeholder_b.dtype, hexagon_session.device, "global.vtcm"
)
b_data.copyfrom(np.array([4]))
c_data = tvm.nd.empty(compute_c.shape, compute_c.dtype, hexagon_session.device, "global.vtcm")
c_data.copyfrom(np.array([0, 0]))
mod["add"](a_data, b_data, c_data)
result = c_data.numpy()
assert (result == np.array([6, 7])).all()
class TestMatMul:
"""Test matmul class"""
size_m = tvm.testing.parameter(32)
size_n = tvm.testing.parameter(32)
size_k = tvm.testing.parameter(32)
@tvm.testing.requires_hexagon
def test_matmul(self, hexagon_session, size_m, size_n, size_k):
"""Test matmul"""
placeholder_x = te.placeholder((size_m, size_k), dtype="float32")
placeholder_y = te.placeholder((size_k, size_n), dtype="float32")
reduce_k1 = te.reduce_axis((0, size_k), name="k1")
compute_z = te.compute(
(size_m, size_n),
lambda i, j: te.sum(
placeholder_x[i, reduce_k1] * placeholder_y[reduce_k1, j], axis=[reduce_k1]
),
)
schedule = te.create_schedule(compute_z.op)
func = tvm.build(
schedule,
[placeholder_x, placeholder_y, compute_z],
get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
x_data = np.random.uniform(size=[i.value for i in placeholder_x.shape]).astype(
placeholder_x.dtype
)
y_data = np.random.uniform(size=[i.value for i in placeholder_y.shape]).astype(
placeholder_y.dtype
)
z_data = np.zeros([i.value for i in compute_z.shape], dtype=compute_z.dtype)
x_array = tvm.nd.array(x_data, device=hexagon_session.device)
y_array = tvm.nd.array(y_data, device=hexagon_session.device)
z_array = tvm.nd.array(z_data, device=hexagon_session.device)
mod(x_array, y_array, z_array)
target_llvm = tvm.target.Target("llvm")
mod = tvm.build(
schedule,
[placeholder_x, placeholder_y, compute_z],
tvm.target.Target(target_llvm, host=target_llvm),
)
device = tvm.cpu(0)
xtcpu = tvm.nd.array(x_data, device)
ytcpu = tvm.nd.array(y_data, device)
ztcpu = tvm.nd.array(z_data, device)
mod(xtcpu, ytcpu, ztcpu)
tvm.testing.assert_allclose(z_array.numpy(), ztcpu.numpy(), rtol=1e-4)
@tvm.testing.requires_hexagon
def test_graph_executor(hexagon_session: Session):
"""Test graph executor"""
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
conv2d_op = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], conv2d_op)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
runtime = Runtime("cpp")
executor = Executor("graph")
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
params = {"weight": weight_in}
inputs = {"data": data_in}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
get_hexagon_target("v68"),
runtime=runtime,
executor=executor,
)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_graph_executor_multiple_conv2d(hexagon_session: Session):
"""Test multiple conv2d nodes with graph_executor"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
conv2d_op1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
conv2d_op2 = relay.nn.conv2d(
conv2d_op1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], conv2d_op2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
runtime = Runtime("cpp")
executor = Executor("graph")
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
get_hexagon_target("v68"),
runtime=runtime,
executor=executor,
)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_aot_executor(hexagon_session: Session, aot_host_target, aot_target):
"""Test AOT executor"""
dtype = "float32"
input_shape = (1, 128, 128, 3)
w_shape = (5, 5, 3, 8)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight = relay.var("weight", relay.TensorType(w_shape, dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight_data = np.random.rand(w_shape[0], w_shape[1], w_shape[2], w_shape[3]).astype(dtype=dtype)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight": weight_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("graph"),
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_aot_executor_multiple_conv2d(hexagon_session: Session, aot_host_target, aot_target):
"""Test multiple conv2d nodes with AOT executor"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
conv2d_op1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
conv2d_op2 = relay.nn.conv2d(
conv2d_op1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], conv2d_op2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("graph"),
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
data_dtype = tvm.testing.parameter("int8", "uint8")
weight_dtype = tvm.testing.parameter("int8", "uint8")
@tvm.testing.requires_hexagon
def test_conv2d_relay_vrmpy(hexagon_session, data_dtype, weight_dtype):
if data_dtype == "int8" and weight_dtype == "uint8":
pytest.skip("(i8, u8) input pair is not supported")
def get_conv2d_nchw(d_shape, w_shape, padding, strides=(1, 1)):
out_dtype = "int32"
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
target_hexagon = tvm.target.hexagon("v68")
target = tvm.target.Target(target_hexagon, host=target_hexagon)
I, O, H, W = 64, 256, 56, 56
kH = kW = 3
padding = (1, 1)
strides = (1, 1)
data_shape = (1, I, H, W)
weight_shape = (O, I, kH, kW)
bias_shape = (weight_shape[0],)
bias = relay.var("bias", shape=bias_shape, dtype="int32")
conv2d = get_conv2d_nchw(
data_shape,
weight_shape,
padding,
strides=strides,
)
bias_add = relay.nn.bias_add(conv2d, bias)
mod = tvm.IRModule.from_expr(bias_add)
if data_dtype == "uint8":
data_np = np.random.uniform(0, 255, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-128, 127, size=data_shape).astype("int8")
if weight_dtype == "uint8":
weight_np = np.random.uniform(0, 255, size=weight_shape).astype("uint8")
else:
weight_np = np.random.uniform(-128, 127, size=weight_shape).astype("int8")
bias_np = np.random.randint(low=-127, high=128, size=bias_shape).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
with tvm.transform.PassContext(
opt_level=3,
):
executor = relay.backend.Executor("graph", {"link-params": True})
lib = relay.build(mod, target=target, params=params, executor=executor)
asm = lib.lib.get_source("asm")
assert "vrmpy" in asm
rt_mod = hexagon_session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
np.testing.assert_equal(out, ref)
@tvm.testing.requires_hexagon
def test_dense_relay_vrmpy(hexagon_session, data_dtype, weight_dtype):
if data_dtype == "int8" and weight_dtype == "uint8":
pytest.skip("(i8, u8) input pair is not supported")
target_hexagon = tvm.target.hexagon("v68")
target = tvm.target.Target(target_hexagon, host=target_hexagon)
M = 128
N = 1000
K = 2048
data_shape = (M, K)
weight_shape = (N, K)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype=weight_dtype)
dense = relay.nn.dense(data, weight, out_dtype="int32")
if data_dtype == "uint8":
data_np = np.random.uniform(0, 255, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-128, 127, size=data_shape).astype("int8")
if weight_dtype == "uint8":
weight_np = np.random.uniform(0, 255, size=weight_shape).astype("uint8")
else:
weight_np = np.random.uniform(-128, 127, size=weight_shape).astype("int8")
bias_np = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
bias_add = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(bias_add)
with tvm.transform.PassContext(
opt_level=3,
):
executor = relay.backend.Executor("graph", {"link-params": True})
lib = relay.build(mod, target=target, params=params, executor=executor)
asm = lib.lib.get_source("asm")
assert "vrmpy" in asm
rt_mod = hexagon_session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
ref = np.dot(data_np.astype("int32"), weight_np.transpose().astype("int32"))
ref += bias_np
np.testing.assert_equal(out, ref)
@tvm.testing.requires_hexagon
def test_lwp(
hexagon_server_process,
hexagon_launcher: HexagonLauncherRPC,
hexagon_session: Session,
hexagon_debug,
):
dtype = "float32"
data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype))
weight = relay.var("weight", relay.TensorType((5, 5, 3, 8), dtype))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
weight_in = np.random.rand(5, 5, 3, 8).astype(dtype=dtype)
data_in = np.random.rand(1, 64, 64, 3).astype(dtype=dtype)
params = {"weight": weight_in}
inputs = {"data": data_in}
with tvm.transform.PassContext(opt_level=3, config={"tir.instrument_lwp": True}):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
# Create HexagonProfiler object
dso_binary = "test_binary.so"
profiler = HexagonProfiler(dso_binary, lowered, hexagon_server_process, hexagon_debug)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
# Get lightweight profiling output as a CSV file
profiler.get_profile_output(hexagon_launcher, hexagon_session)
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(weight=weight_in)
llvm_graph_mod.run(data=data_in)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_hexagon
def test_lwp_multiple_conv2d(
hexagon_server_process,
hexagon_launcher: HexagonLauncherRPC,
hexagon_session: Session,
hexagon_debug,
):
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
y1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
y2 = relay.nn.conv2d(
y1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], y2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
target_hexagon = tvm.target.hexagon("v68")
runtime = Runtime("cpp")
executor = Executor("graph")
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.instrument_lwp": True}):
lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_hexagon, host=target_hexagon),
runtime=runtime,
executor=executor,
)
# Create HexagonProfiler object
dso_binary = "test_binary.so"
profiler = HexagonProfiler(dso_binary, lowered, hexagon_server_process, hexagon_debug)
graph_mod = hexagon_session.get_executor_from_factory(lowered)
graph_mod.set_input(**params)
graph_mod.run(**inputs)
hexagon_output = graph_mod.get_output(0).numpy()
# Get lightweight profiling output as a CSV file
profiler.get_profile_output(hexagon_launcher, hexagon_session)
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=runtime,
executor=executor,
)
llvm_graph_mod = tvm.contrib.graph_executor.GraphModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_graph_mod.set_input(**params)
llvm_graph_mod.run(**inputs)
expected_output = llvm_graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_maxpool2d_blocked.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contrib tests for blocked conv2d and maxpool2d"""
import numpy as np
import tvm
import tvm.testing
from tvm import te, topi
from tvm.topi import testing
from .infrastructure import build_and_run, get_block_shape, get_packed_shape
# Blocked layout: NHWC8h8w32c :: [N, H//8, W//8, C//32, 8h, 8w, 32c]
def maxpool2d_logical(
shape_nhwc,
window_shape,
stride,
padding,
dtype,
storage_scope="global",
):
"""
Maxpool2d TE wherein the input activation is defined by its
logical NHWC shape. The packed physical layout for the
activation is nhwc8h8w32c.
"""
block_h, block_w, block_c = get_block_shape()
shape = get_packed_shape(shape_nhwc)
logical_output_shape = (
shape_nhwc[0],
(shape_nhwc[1] - window_shape[0] + padding[0] + padding[1]) // stride[0] + 1,
(shape_nhwc[2] - window_shape[1] + padding[2] + padding[3]) // stride[0] + 1,
shape_nhwc[3],
)
output_shape = get_packed_shape(logical_output_shape)
_, height, width, _ = shape_nhwc
placeholder_x = te.placeholder(shape_nhwc, dtype=dtype)
# Combination of padding required by maxpool operator and padding to evenly divisible
# number of blocks. Note that this padding should be inlined in the schedule so
# as to avoid input copying.
pad_h = (block_h - ((height + padding[1]) % block_h)) % block_h
pad_w = (block_w - ((width + padding[3]) % block_w)) % block_w
x_pad = topi.nn.pad(
placeholder_x, [0, padding[0], padding[2], 0], [0, pad_h, pad_w, 0], pad_value=0
)
# Calculate packed layout
x_packed = te.compute(
shape,
lambda n, ho, wo, co, hi, wi, ci: x_pad[
n, ho * block_h + hi, wo * block_w + wi, co * block_c + ci
],
)
reduce_h = te.reduce_axis((0, window_shape[0]), name="rh")
reduce_w = te.reduce_axis((0, window_shape[1]), name="rw")
def compute(batch, h_outer, w_outer, c_outer, h_inner, w_inner, c_inner):
# Construct blockized strided maxpool height indices
h = h_outer * block_h + h_inner
h_contig = h * stride[0] + reduce_h
h_block_id = h_contig // block_h
h_block_offset = h_contig % block_h
# Construct blockized strided maxpool width indices
w_idx = w_outer * block_w + w_inner
w_contig = w_idx * stride[1] + reduce_w
w_block_id = w_contig // block_w
w_block_offset = w_contig % block_w
return te.max(
x_packed[
batch, h_block_id, w_block_id, c_outer, h_block_offset, w_block_offset, c_inner
],
axis=[reduce_h, reduce_w],
)
compute_y = te.compute(output_shape, compute)
schedule = te.create_schedule(compute_y.op)
# Ensure the padding and array packing is performed inline
schedule[x_pad].compute_inline()
schedule[x_packed].compute_inline()
binds = {}
if storage_scope and storage_scope != "global":
with tvm.transform.PassContext():
x_buffer = tvm.tir.decl_buffer(shape, name="Xb", dtype=dtype, scope=storage_scope)
y_buffer = tvm.tir.decl_buffer(
output_shape, name="Yb", dtype=dtype, scope=storage_scope
)
binds = {placeholder_x: x_buffer, compute_y: y_buffer}
return (schedule, [placeholder_x, compute_y], binds)
class BaseMaxPooling:
batch = tvm.testing.parameter(1)
in_size = tvm.testing.parameter(8, 112)
in_channel = tvm.testing.parameter(64)
window_size = tvm.testing.parameter(3)
stride = tvm.testing.parameter(2)
pad = tvm.testing.parameter(1)
dtype = tvm.testing.parameter("float32")
class TestMaxPooling(BaseMaxPooling):
"""Test MaxPool class"""
@tvm.testing.parametrize_targets("llvm")
def test_maxpool(self, shape_nhwc, window_size, stride, pad, dtype, target):
"""Test blocked maxpool"""
inputs = [np.random.uniform(0, 255, size=shape_nhwc).astype(dtype)]
ref_output = testing.poolnd_python(
inputs[0],
(window_size, window_size),
strides=(stride, stride),
dilation=(1, 1),
padding_before=(pad, pad),
padding_after=(pad, pad),
pool_type="max",
)
output = build_and_run(
inputs,
maxpool2d_logical,
target,
target,
shape_nhwc,
window_shape=(window_size, window_size),
stride=(stride, stride),
padding=(pad, pad, pad, pad),
dtype=dtype,
)
return output, ref_output
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_memory_alloc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test memory allocation."""
import numpy as np
import tvm
from tvm.script import tir as T
from tvm.contrib.hexagon import allocate_hexagon_array
from .infrastructure import get_hexagon_target
def generated_func(shape: tuple, dtype: str, axis_separators: list):
"""Generate element wise function."""
dim0, dim1 = shape
@T.prim_func
def elwise(a: T.handle, b: T.handle):
a_buffer = T.match_buffer(a, shape, dtype=dtype, axis_separators=axis_separators)
b_buffer = T.match_buffer(b, shape, dtype=dtype, axis_separators=axis_separators)
for i, j in T.grid(dim0, dim1):
with T.block("compute"):
b_buffer[i, j] = a_buffer[i, j] * T.cast(2, dtype=dtype)
return elwise
class TestMemoryAlloc:
"""Memory allocation test."""
dtype = tvm.testing.parameter("int8")
shape = tvm.testing.parameter((128, 128))
(scope, axis_separators,) = tvm.testing.parameters(
("global", []),
("global.vtcm", []),
("global.vtcm", [1]),
("global.ddr", []),
("global.ddr", [1]),
)
def test_global_axis_separator(self, hexagon_session, shape, dtype, scope, axis_separators):
"""Test with global axis separator."""
mod1 = tvm.build(
generated_func(shape, dtype, axis_separators),
target=get_hexagon_target("v69"),
)
mod2 = hexagon_session.load_module(mod1)
a_np = np.ones(shape=shape, dtype=dtype)
a = allocate_hexagon_array(
hexagon_session.device, data=a_np, mem_scope=scope, axis_separators=axis_separators
)
b_np = np.zeros(shape=shape, dtype=dtype)
b = allocate_hexagon_array(
hexagon_session.device, data=b_np, mem_scope=scope, axis_separators=axis_separators
)
mod2(a, b)
tvm.testing.assert_allclose(a.numpy() * 2, b.numpy(), atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_meta_schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test rpc based launcher for hexagon """
import tempfile
import numpy as np
import pytest
import tvm.testing
import tvm.topi.testing
from tvm import meta_schedule as ms
from tvm import relay, te
from tvm.contrib.hexagon.meta_schedule import (
get_hexagon_local_builder,
get_hexagon_rpc_runner,
)
from tvm.meta_schedule import postproc, schedule_rule
from tvm.meta_schedule.arg_info import TensorInfo
from tvm.meta_schedule.builder import BuilderInput
from tvm.meta_schedule.runner import RunnerInput
from tvm.script import tir as T
from tvm.tir import FloatImm
from tvm.tir.tensor_intrin.hexagon import VRMPY_u8u8i32_INTRIN
from .infrastructure import get_hexagon_target
MATMUL_N = 16
MATMUL_M = 32
@tvm.script.ir_module
class MatmulModule:
"""Matmultest class"""
# pylint: disable=no-self-argument
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # type: ignore
# pylint: disable=missing-function-docstring
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, (16, 16), "float32")
b_buffer = T.match_buffer(b, (16, 16), "float32")
c_buffer = T.match_buffer(c, (16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi_axis, vj_axis, vk_axis = T.axis.remap("SSR", [i, j, k])
with T.init():
c_buffer[vi_axis, vj_axis] = 0.0 # type: ignore
c_buffer[vi_axis, vj_axis] = (
c_buffer[vi_axis, vj_axis]
+ a_buffer[vi_axis, vk_axis] * b_buffer[vk_axis, vj_axis]
)
@tvm.testing.requires_hexagon
def test_builder_runner(hexagon_launcher):
"""Test builder and runner."""
if hexagon_launcher.is_simulator():
pytest.skip(msg="Tuning on simulator not supported.")
mod = MatmulModule
builder = get_hexagon_local_builder()
runner = get_hexagon_rpc_runner(hexagon_launcher, number=1, repeat=1, min_repeat_ms=0)
(builder_result,) = builder.build([BuilderInput(mod, get_hexagon_target("v68"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
def dense_compute(m, n, k):
"""dense compute"""
X = te.placeholder((m, k), name="X", dtype="uint8")
packed_width = te.placeholder((n // 32, k // 4, 32, 4), name="packed_width", dtype="uint8")
axis_k = te.reduce_axis((0, k), name="k")
out = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, axis_k].astype("int32")
* packed_width[
tvm.tir.indexdiv(j, 32), tvm.tir.indexdiv(axis_k, 4), j % 32, axis_k % 4
].astype("int32"),
axis=axis_k,
),
name="compute",
)
return [X, packed_width, out]
def schedule_dense(sch, block, m_size, do_tune):
"""dense schedule"""
a_y, a_x, _ = sch.get_loops(block)[-3:]
if do_tune:
y_factors = sch.sample_perfect_tile(a_y, n=2, max_innermost_factor=128)
a_yo, a_yi = sch.split(a_y, factors=y_factors)
else:
a_yo, a_yi = sch.split(a_y, factors=[None, min(m_size, 32)])
a_xo, a_xi = sch.split(a_x, factors=[None, 32])
sch.reorder(a_yo, a_xo, a_yi, a_xi)
a_xi, a_k = sch.get_loops(block)[-2:]
a_ko, a_ki = sch.split(a_k, factors=[None, 4])
sch.reorder(a_ko, a_xi, a_ki)
fused = sch.fuse(a_yo, a_xo)
sch.parallel(fused)
dec = sch.decompose_reduction(block, a_ko)
init_loop = sch.get_loops(dec)[-1]
sch.vectorize(init_loop)
sch.tensorize(a_xi, VRMPY_u8u8i32_INTRIN)
def verify_dense(sch, target, m_size, n_size, k_size, hexagon_session):
"""Verify dense operator."""
f = tvm.build(sch.mod["main"], target=target, name="dense")
mod = hexagon_session.load_module(f)
dev = hexagon_session.device
a_np = np.random.uniform(1, 10, size=(m_size, k_size)).astype("uint8")
b_np = np.random.uniform(1, 10, size=(n_size, k_size)).astype("uint8")
c_np = np.dot(a_np.astype("int32"), b_np.transpose().astype("int32"))
pack_width = np.random.uniform(1, 10, size=(n_size // 32, (k_size // 4), 32, 4)).astype("uint8")
for r_idx in range(n_size // 32):
for k_output in range(k_size // 4):
for s_idx in range(32):
for t_idx in range(4):
pack_width[r_idx][k_output][s_idx][t_idx] = b_np[r_idx * 32 + s_idx][
k_output * 4 + t_idx
]
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(pack_width, dev)
c = tvm.nd.array(np.zeros((m_size, n_size), dtype="int32"), dev)
mod(a, b, c)
np.testing.assert_equal(c.numpy(), c_np)
evaluator = mod.time_evaluator(mod.entry_name, dev, number=10)
gflops = (n_size * m_size * k_size) * 2 / 1e9
time_ms = evaluator(a, b, c).mean * 1e3
print("%f ms, %f GOPS" % (time_ms, gflops / (time_ms / 1e3)))
@tvm.testing.requires_hexagon
def test_vrmpy_dense(hexagon_launcher):
"""Test vector reduce muliply dense."""
if hexagon_launcher.is_simulator():
pytest.skip(msg="Tuning on simulator not supported.")
do_tune = True
m_size, n_size, k_size = 128, 768, 768
workload = te.create_prim_func(dense_compute(m_size, n_size, k_size))
if not do_tune:
ir_module = tvm.IRModule({"main": workload})
sch = tvm.tir.Schedule(ir_module)
block = sch.get_block("compute")
schedule_dense(sch, block, m_size, do_tune)
else:
with tempfile.TemporaryDirectory() as work_dir:
def schedule_dense_for_tune(sch):
block = sch.get_block("compute")
return schedule_dense(sch, block, None, True)
target = get_hexagon_target("v69")
database = ms.tir_integration.tune_tir(
mod=workload,
target=target,
work_dir=work_dir,
max_trials_global=8,
space=ms.space_generator.ScheduleFn(
schedule_dense_for_tune,
sch_rules=[],
postprocs=[],
mutator_probs={},
),
strategy="replay-trace",
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=10),
)
sch = ms.tir_integration.compile_tir(database, workload, target)
with hexagon_launcher.create_session() as session:
verify_dense(sch, get_hexagon_target("v68"), m_size, n_size, k_size, session)
# This is an example of a schedule found by vrmpy auto tensorization.
# It gets 440 GFLOPS on SD888.
@tvm.script.ir_module
class ModuleVRMPYAutoTensorize:
"""Vector Reduce Multimply auto tensorize test class."""
# pylint: disable=no-self-argument
@T.prim_func
def main( # type: ignore
X: T.Buffer[(128, 768), "uint8"], # type: ignore
packed_width: T.Buffer[(24, 192, 32, 4), "uint8"], # type: ignore
compute: T.Buffer[(128, 768), "int32"], # type: ignore
) -> None:
# pylint: disable=missing-function-docstring
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0_0_i1_0_0_fused in T.parallel(
512, annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}
):
for i0_1_init, i1_0_1_init, i0_2_init, i1_0_2_init in T.grid(2, 3, 1, 1):
with T.block("compute_o_init"):
i = T.axis.spatial(128, i0_0_i1_0_0_fused // 8 * 2 + i0_1_init + i0_2_init)
j_o = T.axis.spatial(24, i1_0_2_init + i0_0_i1_0_0_fused % 8 * 3 + i1_0_1_init)
T.reads()
T.writes(compute[i, j_o * 32 : j_o * 32 + 32]) # type: ignore
for i1_1 in T.vectorized(32):
with T.block("compute_init"):
j_i_init = T.axis.spatial(32, i1_1)
T.reads()
T.writes(compute[i, j_o * 32 + j_i_init])
compute[i, j_o * 32 + j_i_init] = 0 # type: ignore
for i2_0_0, i0_1, i1_0_1, i2_0_1, i0_2, i1_0_2 in T.grid(32, 2, 3, 6, 1, 1):
with T.block("compute_o_update"):
i = T.axis.spatial(128, i0_0_i1_0_0_fused // 8 * 2 + i0_1 + i0_2)
j_o = T.axis.spatial(24, i1_0_2 + i0_0_i1_0_0_fused % 8 * 3 + i1_0_1)
k_o = T.axis.reduce(192, i2_0_0 * 6 + i2_0_1)
T.reads(
compute[i, j_o * 32 : j_o * 32 + 32], # type: ignore
X[i, k_o * 4 : k_o * 4 + 4], # type: ignore
packed_width[j_o, k_o, 0:32, 0:4], # type: ignore
)
T.writes(compute[i, j_o * 32 : j_o * 32 + 32]) # type: ignore
a_buffer = T.match_buffer(
X[i, k_o * 4 : k_o * 4 + 4],
[4],
dtype="uint8",
offset_factor=1, # type: ignore
)
b_buffer = T.match_buffer(
packed_width[j_o, k_o, 0:32, 0:4], [32, 4], dtype="uint8", offset_factor=1
)
c_buffer = T.match_buffer(
compute[i, j_o * 32 : j_o * 32 + 32],
[32],
dtype="int32",
offset_factor=1, # type: ignore
)
a_u8x4: T.uint8x4 = a_buffer[0:4] # type: ignore
a_i32: T.int32 = T.reinterpret(a_u8x4, dtype="int32") # type: ignore
b_i32x32: T.int32x32 = T.reinterpret(
b_buffer[0, 0:128], dtype="int32x32"
) # type: ignore
c_buffer[0:32] = T.call_llvm_pure_intrin( # type: ignore
4390, T.uint32(3), c_buffer[0:32], b_i32x32, a_i32, dtype="int32x32"
)
@tvm.testing.requires_hexagon
def test_vrmpy_dense_auto_tensorize(hexagon_launcher):
"""Test VRMPY dense operator."""
if hexagon_launcher.is_simulator():
pytest.skip(msg="Tuning on simulator not supported.")
m_size, n_size, k_size = 128, 768, 768
workload = te.create_prim_func(dense_compute(m_size, n_size, k_size))
sch_rules = [
schedule_rule.MultiLevelTilingWithIntrin(
VRMPY_u8u8i32_INTRIN,
structure="SRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=schedule_rule.ReuseType(
req="may",
levels=[1, 2],
scope="global",
),
),
schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=16,
max_vectorize_extent=128,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
]
postprocs = [
postproc.RewriteParallelVectorizeUnroll(),
postproc.RewriteReductionBlock(),
postproc.RewriteTensorize(vectorize_init_loop=True),
]
# Make this to False to compile and run the best tuned schedule
run_tuning = True
if run_tuning:
with tempfile.TemporaryDirectory() as work_dir:
target = get_hexagon_target("v68")
database = ms.tir_integration.tune_tir(
mod=workload,
target=target,
max_trials_global=8,
num_trials_per_iter=8,
work_dir=work_dir,
space=ms.space_generator.PostOrderApply(
f_block_filter=None,
sch_rules=sch_rules,
postprocs=postprocs,
mutator_probs={},
),
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=10),
)
sch = ms.tir_integration.compile_tir(database, workload, target)
else:
sch = tvm.tir.Schedule(ModuleVRMPYAutoTensorize, debug_mask="all")
with hexagon_launcher.create_session() as session:
verify_dense(sch, get_hexagon_target("v68"), m_size, n_size, k_size, session)
@tvm.testing.requires_hexagon
def test_conv2d_relay_auto_schedule(hexagon_launcher):
"""Test conv2d using auto schedule."""
if hexagon_launcher.is_simulator():
pytest.skip(msg="Tuning on simulator not supported.")
i_size, o_size, h_size, w_size = 64, 64, 56, 56
k_height_size = k_width_size = 3
strides = (1, 1)
padding = (1, 1)
d_shape = (1, h_size, w_size, i_size)
w_shape = (k_height_size, k_width_size, i_size, o_size)
bias_shape = (1, 1, 1, w_shape[3])
out_channel = w_shape[3]
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
bias = relay.var("bias", shape=bias_shape, dtype="float16")
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=(k_height_size, k_width_size),
channels=out_channel,
padding=padding,
strides=strides,
out_dtype="float16",
data_layout="NHWC",
kernel_layout="HWIO",
)
mod = tvm.IRModule.from_expr(conv2d + bias)
mod = mod.with_attr("executor", relay.backend.Executor("graph", {"link-params": True}))
data_np = np.random.randn(*d_shape).astype("float16")
weight_np = np.random.randn(*w_shape).astype("float16")
bias_np = np.random.randn(*bias_shape).astype("float16")
params = {"weight": weight_np, "bias": bias_np}
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
with tempfile.TemporaryDirectory() as work_dir:
target = get_hexagon_target("v69")
database = ms.relay_integration.tune_relay(
mod=mod,
params=params,
target=target,
max_trials_global=8,
strategy="replay-trace",
work_dir=work_dir,
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=20),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
params=params,
target=target,
)
with hexagon_launcher.create_session() as session:
rt_mod = session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
# Fairly loose check since fp16 results between x86 and Hexagon have
# non-trivial difference.
assert np.mean(np.abs(ref - out)) < 0.5
@tvm.testing.requires_hexagon
def test_dense_relay_auto_schedule(hexagon_launcher):
"""
This is for testing RewriteLayout postproc. Without this postproc,
dense on Hexagon is extremely slow.
"""
if hexagon_launcher.is_simulator():
pytest.skip(msg="Tuning on simulator not supported.")
target_hexagon = tvm.target.hexagon("v69")
target = tvm.target.Target(target_hexagon, host=target_hexagon)
data_shape = (128, 128)
weight_shape = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float16")
weight = relay.var("weight", shape=weight_shape, dtype="float16")
dense = relay.nn.dense(data, weight)
mod = tvm.IRModule.from_expr(dense)
mod = mod.with_attr("executor", relay.backend.Executor("graph", {"link-params": True}))
weight_np = np.random.randn(*weight_shape).astype("float32")
data_np = np.random.randn(*data_shape).astype("float32")
params = {"weight": weight_np}
ref = np.dot(data_np, weight_np.transpose())
with tempfile.TemporaryDirectory() as work_dir:
target = get_hexagon_target("v69")
database = ms.relay_integration.tune_relay(
mod=mod,
params=params,
target=target,
max_trials_global=8,
strategy="replay-trace",
work_dir=work_dir,
builder=get_hexagon_local_builder(),
runner=get_hexagon_rpc_runner(hexagon_launcher, number=20),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
params=params,
target=target,
)
with hexagon_launcher.create_session() as session:
rt_mod = session.get_executor_from_factory(lib)
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
# Fairly loose check since fp16 results between x86 and Hexagon have
# non-trivial difference.
assert np.mean(np.abs(ref - out)) < 0.1
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test mobilenet model with aot executor"""
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.contrib.hexagon.session import Session
from tvm.relay.backend import Executor, Runtime
def get_mobilenet():
"""Download and import mobilenet model with ONNX"""
import onnx # pylint: disable=import-outside-toplevel
model_url = "https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx" # pylint: disable=line-too-long
model_path = tvm.contrib.download.download_testdata(
model_url, "mobilenetv2-7.onnx", module="onnx"
)
return onnx.load(model_path)
@pytest.mark.parametrize("enable_usmp", [False, True])
@tvm.testing.requires_hexagon
def test_mobilenet_aot(hexagon_session: Session, aot_host_target, aot_target, enable_usmp):
"""Test mobilenet with aot executor"""
dtype = "float32"
onnx_model = get_mobilenet()
data_in = np.random.rand(1, 3, 224, 224).astype(dtype=dtype)
input_name = "data"
shape_dict = {input_name: data_in.shape}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)
inputs = {input_name: data_in}
target_llvm = tvm.target.Target("llvm")
config = {"tir.usmp.enable": enable_usmp}
with tvm.transform.PassContext(opt_level=3, config=config):
hexagon_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
params=params,
)
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)
hexagon_mod.set_input(**inputs)
hexagon_mod.run()
hexagon_output = hexagon_mod.get_output(0).numpy()
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("aot", {"interface-api": "packed"}),
params=params,
)
llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_mod.set_input(**inputs)
llvm_mod.run()
expected_output = llvm_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_parallel_hvx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test parallelizing HVX workloads and compare them to single thread examples.
"""
import numpy as np
from numpy.random import default_rng
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test {} with {} operations... \n"
" -Single Thread: {} ms \n"
" -Parallel: {} ms\n -Speedup: {}x\n"
)
def get_vrmpy_shape_dtypes(operations):
return ((operations, 128), "uint8", (operations, 128), "uint8", (operations, 32), "int32")
def get_vmpy_vadd_shape_dtype(operations):
return ((operations, 128), "uint8", (operations, 128), "uint8", (operations, 128), "int16")
def vmpy_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int16")
for n in range(shape[0]):
for i in range(0, 128, 2):
expected[n, i // 2] = np.int16(a[n, i]) * np.int16(b[n, i])
for i in range(1, 128, 2):
expected[n, i // 2 + 64] = np.int16(a[n, i]) * np.int16(b[n, i])
return expected
def vadd_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int16")
for n in range(shape[0]):
for i in range(0, 128, 2):
expected[n, i // 2] = np.int16(a[n, i]) + np.int16(b[n, i])
for i in range(1, 128, 2):
expected[n, i // 2 + 64] = np.int16(a[n, i]) + np.int16(b[n, i])
return expected
def vrmpy_expected_producer(shape, a, b):
expected = np.zeros(shape, dtype="int32")
for n in range(shape[0]):
for i in range(32):
for r_ind in range(4):
expected[n, i] = expected[n, i] + np.uint32(a[n, i * 4 + r_ind]) * np.uint32(
b[n, i * 4 + r_ind]
)
return expected
def get_vmpy_operator(operations):
"""Generate vector multiply operator"""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 128], dtype="int16")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 128)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vmpybusv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int16x128",
)
return operator
def get_vadd_operator(operations):
"""Generate vadd operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 128], dtype="int16")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 128)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vaddubh.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int16x128",
)
return operator
def get_vrmpy_operator(operations):
"""Generate vrmpy operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
return operator
def evaluate(hexagon_session, shape_dtypes, expected_output_producer, sch):
"""Evaluate schedule."""
a_shape, a_dtype, b_shape, b_dtype, c_shape, c_dtype = shape_dtypes
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v68"))
module = hexagon_session.load_module(func_tir)
rng = default_rng()
a = rng.integers(0, 16, a_shape, dtype=a_dtype)
b = rng.integers(0, 16, b_shape, dtype=b_dtype)
c = np.zeros(c_shape, dtype=c_dtype)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
runtime = timer(a_hexagon, b_hexagon, c_hexagon)
tvm.testing.assert_allclose(c_hexagon.asnumpy(), expected_output_producer(c_shape, a, b))
return round(runtime.mean * 1000, 6)
class TestMatMulVec:
"""MatMul test class."""
(
operation_name,
operator_producer,
shape_dtypes_producer,
expected_output_producer,
) = tvm.testing.parameters(
("vrmpy", get_vrmpy_operator, get_vrmpy_shape_dtypes, vrmpy_expected_producer),
("vmpy", get_vmpy_operator, get_vmpy_vadd_shape_dtype, vmpy_expected_producer),
("vadd", get_vadd_operator, get_vmpy_vadd_shape_dtype, vadd_expected_producer),
)
# Experimentally best split factor but all multiples of 4 perform pretty well.
# This is because there are 4 HVX untis available on the device and pipelining
# works best with parallels of the number of available HVX.
split_factor = tvm.testing.parameter(4)
# Removed most of these to speedup CI.
operation_count = tvm.testing.parameter(
128,
# 256,
# 512,
# Single thread runs faster since L2 cache can handle the entire request quickly
# 1024,
# 2048,
# Significant performance degredation once the inputs and outputs cannot all fit in L2
# 4096,
# 8192,
# 16384,
)
@tvm.testing.requires_hexagon
def test(
self,
hexagon_session,
operation_count,
operation_name,
operator_producer,
shape_dtypes_producer,
expected_output_producer,
split_factor,
):
"""Test function handler."""
sch = tvm.tir.Schedule(operator_producer(operation_count))
single_thread_runtime = evaluate(
hexagon_session, shape_dtypes_producer(operation_count), expected_output_producer, sch
)
sch = tvm.tir.Schedule(operator_producer(operation_count))
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_output, _ = sch.split(b[0], factors=[split_factor, None])
sch.parallel(b_output)
parallel_runtime = evaluate(
hexagon_session, shape_dtypes_producer(operation_count), expected_output_producer, sch
)
speedup = round(single_thread_runtime / parallel_runtime, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
operation_name, operation_count, single_thread_runtime, parallel_runtime, speedup
)
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_parallel_hvx_load_vtcm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test different strategies for loading data into vtcm before running HVX workloads. """
import numpy as np
import tvm
from numpy.random import default_rng
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test with {} MB of data to load... \n"
" -No VTCM: {} Gops \n -Basic VTCM: {} Gops \n"
" -Vectorized: {} Gops\n -Vectorized and"
" Parallelized: {} Gops\n -Preallocated and Vectorized: {} Gops\n"
" -Preallocated, Vectorized, and Parallelized: {} Gops\n"
" -Single DMA: {} Gops\n -Preloaded: {} Gops\n"
)
def apply_parallel_unroll_vectorize(sch, blocks, outer_split, unroll_split, vector_split):
"""Apply parallel unroll vectorized."""
for block in blocks:
vb_index, vi_index = sch.get_loops(block)
v = sch.fuse(vb_index, vi_index)
vbo, vbi, vio, vii = sch.split( # pylint: disable=unused-variable
v, factors=[outer_split, None, unroll_split, vector_split]
) # pylint: disable=unused-variable
sch.vectorize(vii)
sch.unroll(vio)
sch.parallel(vbo)
return sch
def apply_unroll_vectorize(sch, blocks, unroll_split, vector_split):
for block in blocks:
vb_index, vi_index = sch.get_loops(block)
v = sch.fuse(vb_index, vi_index)
_, vio, vii = sch.split(v, factors=[None, unroll_split, vector_split])
sch.vectorize(vii)
sch.unroll(vio)
return sch
def apply_vrmpy_parallelization(sch):
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_outer, _ = sch.split(b[0], factors=[4, None])
sch.parallel(b_outer)
return sch
def apply_vtcm_cache_read_write(sch):
block = sch.get_block("c_buffer")
sch.cache_read(block, 0, "global.vtcm")
sch.cache_read(block, 1, "global.vtcm")
sch.cache_write(block, 0, "global.vtcm")
return sch
def vrmpy(operations):
"""Generate VRMPY operator"""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128)
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128)
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind, T.ramp(0, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(a_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
T.reinterpret(b_buffer[vn_ind, T.ramp(0, 1, 128)], dtype="int32x32"),
dtype="int32x32",
)
return operator
def preloaded_vrmpy(operations):
"""Generate preloaded VRMPY operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(
a,
[T.cast(operations, "int32") * 128],
dtype="uint8",
align=128,
scope="global.vtcm",
)
b_buffer = T.match_buffer(
b,
[T.cast(operations, "int32") * 128],
dtype="uint8",
align=128,
scope="global.vtcm",
)
c_buffer = T.match_buffer(
c, [T.cast(operations, "int32") * 32], dtype="int32", align=128, scope="global.vtcm"
)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_buffer[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)], dtype="int32x32"
),
T.reinterpret(
b_buffer[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)], dtype="int32x32"
),
dtype="int32x32",
)
return operator
def preallocated_vrmpy(operations):
"""Generate preallocated VRMPY operator."""
size = operations * 128
out_size = operations * 32
@T.prim_func
def operator(
a: T.handle, b: T.handle, c: T.handle, a_v: T.handle, b_v: T.handle, c_v: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128, scope="global")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128, scope="global")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, [size], dtype="uint8", align=128, scope="global.vtcm")
b_global_vtcm = T.match_buffer(b_v, [size], dtype="uint8", align=128, scope="global.vtcm")
c_global_vtcm = T.match_buffer(
c_v, [out_size], dtype="int32", align=128, scope="global.vtcm"
)
for n, i in T.grid(operations, 128):
with T.block("a_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
a_global_vtcm[vn_ind * 128 + vi_index] = a_buffer[vn_ind, vi_index]
for n, i in T.grid(operations, 128):
with T.block("b_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
b_global_vtcm[vn_ind * 128 + vi_index] = b_buffer[vn_ind, vi_index]
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
T.reinterpret(
b_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
dtype="int32x32",
)
for n, i in T.grid(operations, 32):
with T.block("c_buffer_global.vtcm"):
vn_ind, vi_index = T.axis.remap("SS", [n, i])
c_buffer[vn_ind, vi_index] = c_global_vtcm[vn_ind * 32 + vi_index]
return operator
def preallocated_single_dma_vrmpy(operations):
"""Generate preallocated single DMA VRMPY operator."""
size = operations * 128
out_size = operations * 32
@T.prim_func
def operator(
a: T.handle,
b: T.handle,
c: T.handle,
a_v: T.handle,
b_v: T.handle,
c_v: T.handle,
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations, 128], dtype="uint8", align=128, scope="global")
b_buffer = T.match_buffer(b, [operations, 128], dtype="uint8", align=128, scope="global")
c_buffer = T.match_buffer(c, [operations, 32], dtype="int32", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, [size], dtype="uint8", align=128, scope="global.vtcm")
b_global_vtcm = T.match_buffer(b_v, [size], dtype="uint8", align=128, scope="global.vtcm")
c_global_vtcm = T.match_buffer(
c_v, [out_size], dtype="int32", align=128, scope="global.vtcm"
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
a_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
a_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_buffer.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
dtype="int32",
)
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
b_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
b_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
b_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
b_buffer.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
dtype="int32",
)
)
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 32, 1, 32)] = T.call_llvm_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyubv.128B"),
T.uint32(2),
T.reinterpret(
a_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
T.reinterpret(
b_global_vtcm[T.ramp(T.cast(vn_ind, "int32") * 128, 1, 128)],
dtype="int32x32",
),
dtype="int32x32",
)
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
c_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
c_buffer.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
c_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
c_global_vtcm.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
dtype="int32",
)
)
return operator
def evaluate_result(operations, tag, time, result, expected_output):
transfer_mb = round(3 * operations * 128 / 1e6, 2)
gops = round(operations * 128 * 3 / time.mean / 1e9, 3)
mean_ms = round(time.mean * 1000, 6)
print(f"\ntest_{transfer_mb}MB_{tag} took {mean_ms} ms @ GOPS: {gops}")
tvm.testing.assert_allclose(result, expected_output)
def setup_and_run(hexagon_session, sch, a, b, c, operations, mem_scope="global"):
"""Setup and run operator."""
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v69"))
module = hexagon_session.load_module(func_tir)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device, mem_scope=mem_scope)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device, mem_scope=mem_scope)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device, mem_scope=mem_scope)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
time = timer(a_hexagon, b_hexagon, c_hexagon)
gops = round(operations * 128 * 3 / time.mean / 1e9, 4)
return gops, c_hexagon.asnumpy()
def setup_and_run_preallocated(hexagon_session, sch, a, b, c, operations):
"""Setup and run for preallocated."""
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v69"))
module = hexagon_session.load_module(func_tir)
a_vtcm = np.zeros((a.size), dtype="uint8")
b_vtcm = np.zeros((b.size), dtype="uint8")
c_vtcm = np.zeros((c.size), dtype="int32")
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device, mem_scope="global")
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device, mem_scope="global")
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device, mem_scope="global")
a_vtcm_hexagon = tvm.runtime.ndarray.array(
a_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
b_vtcm_hexagon = tvm.runtime.ndarray.array(
b_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
c_vtcm_hexagon = tvm.runtime.ndarray.array(
c_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
time = timer(a_hexagon, b_hexagon, c_hexagon, a_vtcm_hexagon, b_vtcm_hexagon, c_vtcm_hexagon)
gops = round(operations * 128 * 3 / time.mean / 1e9, 4)
return gops, c_hexagon.asnumpy()
class TestMatMulVec:
"""MatMul test class."""
# Removed most of these to speedup CI.
operations = tvm.testing.parameter(
1024,
# 2048,
# 4096,
# 5 * 2048, # 3.93MB of total transfer
# 16384, #Only works on 8Gen1 HDK's
# 5 * 4096, # 7.86MB of total transfer. Only works on 8Gen1 HDK's
)
# Experimentally best configurations for the memcopy
outer_split = tvm.testing.parameter(4)
unroll_split = tvm.testing.parameter(8)
vector_split = tvm.testing.parameter(64)
c_vector_split = tvm.testing.parameter(16)
c_vector_split_unallocated = tvm.testing.parameter(8)
@tvm.testing.fixture
def input_a(self, operations):
return default_rng().integers(0, 16, (operations, 128), dtype="uint8")
@tvm.testing.fixture
def input_b(self, operations):
return default_rng().integers(0, 16, (operations, 128), dtype="uint8")
@tvm.testing.fixture
def input_c(self, operations):
return np.zeros((operations, 32), dtype="int32")
@tvm.testing.fixture
def expected_output(self, operations, input_a, input_b, input_c):
expected_output = np.zeros(input_c.shape, dtype="int32")
for n in range(operations):
for i in range(32):
for r_ind in range(4): # pylint: disable=unused-variable
expected_output[n, i] = expected_output[n, i] + np.uint32(
input_a[n, i * 4 + r_ind]
) * np.uint32(input_b[n, i * 4 + r_ind])
return expected_output
@tvm.testing.requires_hexagon
def test_loading_vtcm_for_vrmpy(
self,
hexagon_session,
operations,
input_a,
input_b,
input_c,
expected_output,
outer_split,
unroll_split,
vector_split,
c_vector_split,
c_vector_split_unallocated,
):
"""Load VTCM for VRMPY operator test."""
# Run parallel vrmpy without loading to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
base_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with basic memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
basic_load_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with vectorized memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
sch = apply_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
unroll_split,
vector_split,
)
sch = apply_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], unroll_split, c_vector_split_unallocated
)
vectorized_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with vectorized and parallelized memory loads to VTCM.
sch = tvm.tir.Schedule(vrmpy(operations))
sch = apply_vtcm_cache_read_write(sch)
sch = apply_vrmpy_parallelization(sch)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
outer_split,
unroll_split,
vector_split,
)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("c_buffer_global.vtcm")],
outer_split,
unroll_split,
c_vector_split_unallocated,
)
vectorized_parallelized_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations
)
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated and vectorized memory loads to VTCM.
sch = tvm.tir.Schedule(preallocated_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
sch = apply_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
unroll_split,
vector_split,
)
sch = apply_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], unroll_split, c_vector_split
)
preallocated_vectorized_runtime, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated, vectorized, and parallelized memory loads to VTCM.
sch = tvm.tir.Schedule(preallocated_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
sch = apply_parallel_unroll_vectorize(
sch,
[sch.get_block("a_buffer_global.vtcm"), sch.get_block("b_buffer_global.vtcm")],
outer_split,
unroll_split,
vector_split,
)
sch = apply_parallel_unroll_vectorize(
sch, [sch.get_block("c_buffer_global.vtcm")], outer_split, unroll_split, c_vector_split
)
prealloc_vector_parallelized, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with preallocated single dma memory load to VTCM.
sch = tvm.tir.Schedule(preallocated_single_dma_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
single_dma_runtime, result = setup_and_run_preallocated(
hexagon_session, sch, input_a, input_b, input_c, operations
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
# Run parallel vrmpy with data preloaded in VTCM.
sch = tvm.tir.Schedule(preloaded_vrmpy(operations))
sch = apply_vrmpy_parallelization(sch)
input_a = input_a.reshape(operations * 128)
input_b = input_b.reshape(operations * 128)
input_c = input_c.reshape(operations * 32)
preloaded_runtime, result = setup_and_run(
hexagon_session, sch, input_a, input_b, input_c, operations, "global.vtcm"
)
result = result.reshape((operations, 32))
tvm.testing.assert_allclose(result, expected_output)
transfer_mb = round(3 * operations * 128 / 1e6, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
transfer_mb,
base_runtime,
basic_load_runtime,
vectorized_runtime,
vectorized_parallelized_runtime,
preallocated_vectorized_runtime,
prealloc_vector_parallelized,
single_dma_runtime,
preloaded_runtime,
)
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_parallel_scalar.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test parallelism for multiple different scalar workloads. """
import numpy as np
from numpy.random import default_rng
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
TEST_OUTPUT_TEMPLATE = (
"Test {} with {} operations... \n"
" -Single Thread: {} ms \n"
" -Parallel: {} ms\n -Speedup: {}x\n"
)
def get_add_operator(operations):
"""Generate add operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] + b_buffer[vn_ind]
return operator
def get_multiply_operator(operations):
"""Generate multiply operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] * b_buffer[vn_ind]
return operator
def get_sub_operator(operations):
"""Generate subtract operator."""
@T.prim_func
def operator(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
a_buffer = T.match_buffer(a, [operations], dtype="float64")
b_buffer = T.match_buffer(b, [operations], dtype="float64")
c_buffer = T.match_buffer(c, [operations], dtype="float64")
for n in T.grid(operations):
with T.block("c_buffer"):
vn_ind = T.axis.remap("S", [n])
c_buffer[vn_ind] = a_buffer[vn_ind] - b_buffer[vn_ind]
return operator
def evaluate(hexagon_session, operations, expected, sch):
"""Evalute schedule."""
shape = operations
dtype = "float64"
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v68"))
module = hexagon_session.load_module(func_tir)
rng = default_rng()
a = rng.random(shape, dtype=dtype)
b = rng.random(shape, dtype=dtype)
c = np.zeros(shape, dtype=dtype)
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device)
b_hexagon = tvm.runtime.ndarray.array(b, device=hexagon_session.device)
c_hexagon = tvm.runtime.ndarray.array(c, device=hexagon_session.device)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
runtime = timer(a_hexagon, b_hexagon, c_hexagon)
tvm.testing.assert_allclose(c_hexagon.asnumpy(), expected(a, b))
return round(runtime.mean * 1000, 6)
class TestMatMulVec:
"""MatMul test class."""
(operation_name, operator_producer, expected_output_producer,) = tvm.testing.parameters(
("add", get_add_operator, (lambda a, b: a + b)),
("mul", get_multiply_operator, (lambda a, b: a * b)),
("sub", get_sub_operator, (lambda a, b: a - b)),
)
# Removed most of these to speedup CI.
operations = tvm.testing.parameter(
128,
# 256,
# 512,
# Single thread runs faster since L2 cache can handle the entire request quickly
# 1024,
# 2048,
# Significant performance degredation once the inputs and outputs cannot all fit in L2
# 4096,
# 8192,
# 16384,
)
split_factor = tvm.testing.parameter(4)
@tvm.testing.requires_hexagon
def test_add(
self,
hexagon_session,
operation_name,
operator_producer,
expected_output_producer,
operations,
split_factor,
):
"""Test Add operator."""
sch = tvm.tir.Schedule(operator_producer(operations))
single_thread_runtime = evaluate(hexagon_session, operations, expected_output_producer, sch)
sch = tvm.tir.Schedule(operator_producer(operations))
block = sch.get_block("c_buffer")
b = sch.get_loops(block)
b_output, _ = sch.split(b[0], factors=[split_factor, None])
sch.parallel(b_output)
parallel_runtime = evaluate(hexagon_session, operations, expected_output_producer, sch)
speedup = round(single_thread_runtime / parallel_runtime, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
operation_name, operations, single_thread_runtime, parallel_runtime, speedup
)
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_run_unit_tests.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" capture gtest output and return over FFI """
import tvm
from tvm.contrib.hexagon.session import Session
# use pytest -sv to observe gtest output
# use --gtest_args to pass arguments to gtest
# for example to run all "foo" tests twice and observe gtest output run
# pytest -sv <this file> --gtests_args="--gtest_filter=*foo* --gtest_repeat=2"
@tvm.testing.requires_hexagon
def test_run_unit_tests(hexagon_session: Session, gtest_args):
"""Try running gtest unit tests and capture output and error code"""
try:
func = hexagon_session._rpc.get_function("hexagon.run_unit_tests")
except:
print(
(
"This test requires TVM Runtime to be built with a Hexagon gtest"
"version using Hexagon API cmake flag"
"-DUSE_HEXAGON_GTEST=/path/to/hexagon/sdk/utils/googletest/gtest"
)
)
raise
gtest_error_code_and_output = func(gtest_args)
gtest_error_code = int(gtest_error_code_and_output.splitlines()[0])
gtest_output = gtest_error_code_and_output.split("\n", 1)[-1]
print(gtest_output)
if gtest_error_code != 0:
raise RuntimeError(
f"Hexagon gtest retruned non-zero error code = {gtest_error_code}:\n{gtest_output}"
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_sigmoid.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sigmoid operator tests."""
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import tir
from tvm import topi
from tvm.contrib.hexagon import allocate_hexagon_array
from .infrastructure import get_hexagon_target
def sigmoid_compute(sigmoid_input):
return topi.sigmoid(sigmoid_input)
def sigmoid_stir_schedule(sigmoid_input, sigmoid_output):
sigmoid_func = te.create_prim_func([sigmoid_input, sigmoid_output])
sch = tir.Schedule(sigmoid_func, debug_mask="all")
block = sch.get_block("compute")
(n,) = sch.get_loops(block)
sch.vectorize(n)
return sch
class BaseSigmoid:
(in_shape, dtype, min_val, max_val,) = tvm.testing.parameters(
((64,), "float16", -8.0, 8.0),
((64,), "float16", -6.0, 7.0),
((64,), "float16", -10.0, 15.0),
((64,), "float16", -10.0, 0.0),
((64,), "float16", 0.0, 10.0),
)
class TestSigmoid(BaseSigmoid):
"""Sigmoid test class."""
@tvm.testing.fixture
def input_np(self, in_shape, dtype, min_val, max_val):
return np.random.uniform(low=min_val, high=max_val, size=in_shape).astype(dtype)
@tvm.testing.fixture
def ref_output_np(self, input_np):
output_np = 1 / (1 + np.exp(-input_np))
return output_np
@tvm.testing.requires_hexagon
def test_sigmoid(
self,
in_shape,
dtype,
input_np,
ref_output_np,
hexagon_session,
):
"""Sigmoid test."""
input_tensor = te.placeholder(in_shape, name="input_tensor", dtype=dtype)
output_tensor = sigmoid_compute(input_tensor)
tir_s = sigmoid_stir_schedule(input_tensor, output_tensor)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=input_np,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=ref_output_np.shape,
dtype=ref_output_np.dtype,
)
func_name = "sigmoid"
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(tir_s.mod, target=get_hexagon_target("v69"), name=func_name)
assert "hvx_sigmoid" in runtime_module.get_source("asm")
assert "vmin" in runtime_module.get_source("asm")
assert "vmax" in runtime_module.get_source("asm")
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
ref_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_software_pipeline_async.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Async software pipeline tests."""
import numpy as np
import tvm
from tvm import tir
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
def compute(comp_type, outer, inner, dtype):
"""Generate compute function."""
if comp_type == "single_input":
@T.prim_func
def a_plus_1_primfunc(
a_buffer: T.Buffer[(outer, inner), dtype], out: T.Buffer[(outer, inner), dtype]
):
for i in T.serial(outer):
for j in T.serial(inner):
with T.block("compute"):
with T.block():
out[i, j] = a_buffer[i, j] + T.cast(1, dtype)
return a_plus_1_primfunc
else:
@T.prim_func
def a_plus_b_plus_1_primfunc(
a_buffer: T.Buffer[(outer, inner), dtype],
b_buffer: T.Buffer[(outer, inner), dtype],
out: T.Buffer[(outer, inner), dtype],
):
for i in T.serial(outer):
for j in T.serial(inner):
with T.block("compute"):
with T.block():
out[i, j] = a_buffer[i, j] + b_buffer[i, j] + T.cast(1, dtype)
return a_plus_b_plus_1_primfunc
class TestAsyncSoftwarePipeline:
"""Async software pipeline test class."""
outer = tvm.testing.parameter(8, 16)
inner = tvm.testing.parameter(64, 128)
dtype = tvm.testing.parameter("uint8", "float16")
scope = tvm.testing.parameter("global", "global.vtcm")
# TODO(Joseph) Turn on "multi_input_diffQ" compute type once we have upstreamed
# changes in the InjectSoftwarePipeline pass to alleviate this restriction:
# 'a_buffer dependency on multiple async stages is not supported'
comp_type = tvm.testing.parameter("single_input", "multi_input_sameQ")
# TODO(Straw) Add back "cache_write" schedule type once we have upstreamed
# buffer dependency analysis in InjectSoftwarePipeline pass
# to insert approprite TIR "wait" attributes for this schedule
sched_type = tvm.testing.parameter("cache_read", "cache_read_write")
@tvm.testing.fixture
def data(self, comp_type, outer, inner, dtype):
out_np = np.random.uniform(low=0, high=128, size=(outer, inner)).astype(dtype)
a_np = np.random.uniform(low=0, high=128, size=(outer, inner)).astype(dtype)
if comp_type == "single_input":
return out_np, a_np
else:
b_np = np.random.uniform(low=0, high=128, size=(outer, inner)).astype(dtype)
return out_np, a_np, b_np
@tvm.testing.fixture
def verify(self, dtype):
def check(out, ref):
if "int" in dtype:
np.testing.assert_equal(out.numpy(), ref)
else:
np.testing.assert_allclose(out.numpy(), ref, rtol=1e-3, atol=1e-3)
return check
@tvm.testing.fixture
def reference(self, comp_type):
"""Returns reference data."""
if comp_type == "single_input":
def a_plus_1_ref(a):
return a + 1
return a_plus_1_ref
else:
def a_plus_b_plus_1_ref(a, b):
return a + b + 1
return a_plus_b_plus_1_ref
@tvm.testing.fixture
def schedule(self, comp_type, sched_type, outer, inner, dtype, scope):
"""Generate schedule."""
sch = tir.Schedule(compute(comp_type, outer, inner, dtype))
compute_block = sch.get_block("compute")
i, _ = sch.get_loops(compute_block)
if "read" in sched_type:
cache_read_a = sch.cache_read(compute_block, 0, scope)
sch.compute_at(cache_read_a, i)
if "multi_input" in comp_type:
cache_read_b = sch.cache_read(compute_block, 1, scope)
sch.compute_at(cache_read_b, i)
if "write" in sched_type:
cache_write_out = sch.cache_write(compute_block, 0, scope)
sch.reverse_compute_at(cache_write_out, i)
if "read" in sched_type and "write" in sched_type:
if comp_type == "single_input":
sch.annotate(i, "software_pipeline_stage", [0, 1, 2])
sch.annotate(i, "software_pipeline_order", [0, 1, 2])
sch.annotate(i, "software_pipeline_async_stages", [0, 2])
elif comp_type == "multi_input_sameQ":
sch.annotate(i, "software_pipeline_stage", [0, 0, 1, 2])
sch.annotate(i, "software_pipeline_order", [0, 1, 2, 3])
sch.annotate(i, "software_pipeline_async_stages", [0, 2])
elif comp_type == "multi_input_diffQ":
sch.annotate(i, "software_pipeline_stage", [0, 1, 2, 3])
sch.annotate(i, "software_pipeline_order", [0, 1, 2, 3])
sch.annotate(i, "software_pipeline_async_stages", [0, 1, 2])
elif "read" in sched_type:
if comp_type == "single_input":
sch.annotate(i, "software_pipeline_stage", [0, 1])
sch.annotate(i, "software_pipeline_order", [0, 1])
sch.annotate(i, "software_pipeline_async_stages", [0])
elif comp_type == "multi_input_sameQ":
sch.annotate(i, "software_pipeline_stage", [0, 0, 1])
sch.annotate(i, "software_pipeline_order", [0, 1, 2])
sch.annotate(i, "software_pipeline_async_stages", [0])
elif comp_type == "multi_input_diffQ":
sch.annotate(i, "software_pipeline_stage", [0, 1, 2])
sch.annotate(i, "software_pipeline_order", [0, 1, 2])
sch.annotate(i, "software_pipeline_async_stages", [0, 1])
elif "write" in sched_type:
sch.annotate(i, "software_pipeline_stage", [0, 1])
sch.annotate(i, "software_pipeline_order", [0, 1])
sch.annotate(i, "software_pipeline_async_stages", [1])
return sch
@tvm.testing.requires_hexagon
def test_async_software_pipeline(
self, hexagon_launcher, comp_type, data, reference, schedule, verify
):
"""Async software pipeline test."""
out_np = data[0]
a_np = data[1]
if comp_type == "single_input":
ref = reference(a_np)
else:
b_np = data[2]
ref = reference(a_np, b_np)
with tvm.transform.PassContext(
config={"tir.use_async_copy": 1, "tir.merge_async_commit_queue_scope": False}
):
# tvm.lower(schedule.mod["main"]).show()
func = tvm.build(schedule.mod["main"], target=get_hexagon_target("v68"))
with hexagon_launcher.create_session() as hexagon_session:
dev = hexagon_session.device
mod = hexagon_session.load_module(func)
out = tvm.nd.array(out_np, device=dev)
a = tvm.nd.array(a_np, device=dev)
if comp_type == "single_input":
mod(a, out)
else:
b = tvm.nd.array(b_np, device=dev)
mod(a, b, out)
verify(out, ref)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_thread_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add hexagon thread pool test"""
import numpy as np
import tvm
import tvm.contrib.hexagon
import tvm.script
import tvm.testing
from tvm.contrib.hexagon.session import Session
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
@tvm.script.ir_module
class ElemwiseSumIRModule:
"""IRModule definition for elementwise sum"""
# pylint: disable=no-self-argument,invalid-name,missing-function-docstring
@T.prim_func
def elemwise_sum_serial(a: T.handle, b: T.handle, c: T.handle, n: T.int32):
T.func_attr({"global_symbol": "elemwise_sum_serial", "tir.noalias": True})
A = T.match_buffer(a, (n,), dtype="float32")
B = T.match_buffer(b, (n,), dtype="float32")
C = T.match_buffer(c, (n,), dtype="float32")
for i in T.serial(n):
with T.block("C"):
vi = T.axis.spatial(n, i)
C[vi] = A[vi] + B[vi]
@T.prim_func
def elemwise_sum_parallel(a: T.handle, b: T.handle, c: T.handle, n: T.int32):
T.func_attr({"global_symbol": "elemwise_sum_parallel", "tir.noalias": True})
A = T.match_buffer(a, (n,), dtype="float32")
B = T.match_buffer(b, (n,), dtype="float32")
C = T.match_buffer(c, (n,), dtype="float32")
for i in T.parallel(n):
with T.block("C"):
vi = T.axis.spatial(n, i)
C[vi] = A[vi] + B[vi]
# pylint: enable=no-self-argument,invalid-name,missing-function-docstring
def generate_add_test_data(hexagon_session: Session, n=128 * 1024):
a = tvm.nd.array(np.random.uniform(size=n).astype("float32"), hexagon_session.device)
b = tvm.nd.array(np.random.uniform(size=n).astype("float32"), hexagon_session.device)
c = tvm.nd.array(np.zeros(n, dtype="float32"), hexagon_session.device)
return (a, b, c, n)
def benchmark_func(mod, name, args, hexagon_session):
(a, b, c, n) = args
evaluator = mod.time_evaluator(name, hexagon_session.device, number=100)
return evaluator(a, b, c, n).mean
@tvm.testing.requires_hexagon
def test_speedup(hexagon_session: Session, capsys):
"""Test speedup"""
func = tvm.build(
ElemwiseSumIRModule,
target=get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
args = generate_add_test_data(hexagon_session)
parallel_mean = benchmark_func(mod, "elemwise_sum_parallel", args, hexagon_session)
serial_mean = benchmark_func(mod, "elemwise_sum_serial", args, hexagon_session)
with capsys.disabled():
print("... speedup of {:.2f}".format(serial_mean / parallel_mean), end=" ")
@tvm.testing.requires_hexagon
def test_elemwise_sum_parallel(hexagon_session: Session):
"""Test parallel elementwise sum"""
func = tvm.build(
ElemwiseSumIRModule,
target=get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(func)
(a, b, c, n) = generate_add_test_data(hexagon_session)
mod["elemwise_sum_parallel"](a, b, c, n)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_usmp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP tests"""
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.contrib.hexagon.session import Session
from tvm.relay.backend import Executor, Runtime
from tvm.testing.usmp import is_tvm_backendallocworkspace_calls
@pytest.mark.parametrize("usmp_enabled", [False, True])
@tvm.testing.requires_hexagon
def test_conv2d(hexagon_session: Session, aot_host_target, aot_target, usmp_enabled):
"""Try conv2d on AOT target with usmp_enabled and check for TVMBackendAllocWorkspace calls"""
dtype = "float32"
input_shape = (1, 8, 8, 3)
w1_shape = (5, 5, 3, 1)
w2_shape = (5, 5, 1, 3)
data = relay.var("data", relay.TensorType(input_shape, dtype))
weight1 = relay.var("weight1", relay.TensorType(w1_shape, dtype))
weight2 = relay.var("weight2", relay.TensorType(w2_shape, dtype))
outpu1 = relay.nn.conv2d(
data,
weight1,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
output2 = relay.nn.conv2d(
outpu1,
weight2,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
f = relay.Function([data, weight1, weight2], output2)
relay_mod = tvm.IRModule.from_expr(f)
relay_mod = relay.transform.InferType()(relay_mod)
weight1_data = np.random.rand(w1_shape[0], w1_shape[1], w1_shape[2], w1_shape[3]).astype(
dtype=dtype
)
weight2_data = np.random.rand(w2_shape[0], w2_shape[1], w2_shape[2], w2_shape[3]).astype(
dtype=dtype
)
input_data = np.random.rand(
input_shape[0], input_shape[1], input_shape[2], input_shape[3]
).astype(dtype=dtype)
params = {"weight1": weight1_data, "weight2": weight2_data}
inputs = {"data": input_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.usmp.enable": usmp_enabled}):
lowered = tvm.relay.build(
relay_mod,
params=params,
target=tvm.target.Target(aot_target, host=aot_host_target),
runtime=Runtime("cpp"),
executor=Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
assert is_tvm_backendallocworkspace_calls(lowered.lib) != usmp_enabled
aot_mod = hexagon_session.get_executor_from_factory(lowered)
aot_mod.set_input(**inputs)
aot_mod.run()
hexagon_output = aot_mod.get_output(0).numpy()
target_llvm = tvm.target.Target("llvm")
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
runtime=Runtime("cpp"),
executor=Executor("aot"),
)
llvm_mod = tvm.runtime.executor.AotModule(llvm_lowered["default"](tvm.cpu(0)))
llvm_mod.set_input(**params)
llvm_mod.run(**inputs)
expected_output = llvm_mod.get_output(0).numpy()
tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_vtcm_bandwidth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test theoretical bandwith for data transfers to VTCM for different strategies."""
import numpy as np
from numpy.random import default_rng
import tvm
from tvm.script import tir as T
from .infrastructure import get_hexagon_target
MB = 1024**2
KB = 1024
TEST_OUTPUT_TEMPLATE = (
"Test bandwidth with buffer size {}MB... \n"
" -Base: {} GBps \n -Vectorized: {} GBps\n"
" -Vectorized and Parallelized: {} GBps\n"
" -Single DMA Copy: {} GBps\n"
)
def memcopy_operator(size):
"""Generate memory copy operator."""
@T.prim_func
def operator(a: T.handle, a_v: T.handle) -> None:
a_buffer = T.match_buffer(a, size, dtype="int8", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, size, dtype="int8", align=128, scope="global.vtcm")
for ax0 in T.serial(size):
with T.block("A_global.vtcm"):
v0_ind = T.axis.spatial(size, ax0)
T.reads(a_buffer[v0_ind])
T.writes(a_global_vtcm[v0_ind])
a_global_vtcm[v0_ind] = a_buffer[v0_ind]
return operator
def single_dma_operator(size):
"""Generate single dma operator."""
@T.prim_func
def operator(a: T.handle, a_v: T.handle) -> None:
a_buffer = T.match_buffer(a, size, dtype="int8", align=128, scope="global")
a_global_vtcm = T.match_buffer(a_v, size, dtype="int8", align=128, scope="global.vtcm")
T.evaluate(
T.tvm_call_packed(
"device_api.hexagon.mem_copy_DLTensor",
T.tvm_stack_make_array(
a_global_vtcm.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_global_vtcm.dtype,
0,
dtype="handle",
),
T.tvm_stack_make_array(
a_buffer.data,
T.tvm_stack_make_shape(size, dtype="handle"),
0,
1,
a_buffer.dtype,
0,
dtype="handle",
),
T.cast(size, dtype="int"),
dtype="int32",
)
)
return operator
def evaluate(hexagon_session, sch, size):
"""Evaluate schedule."""
a_shape = size
func_tir = tvm.build(sch.mod["main"], target=get_hexagon_target("v69"))
module = hexagon_session.load_module(func_tir)
rng = default_rng()
a = rng.integers(-128, 127, a_shape, dtype="int8")
a_vtcm = np.zeros(a_shape, dtype="int8")
a_hexagon = tvm.runtime.ndarray.array(a, device=hexagon_session.device, mem_scope="global")
a_vtcm_hexagon = tvm.runtime.ndarray.array(
a_vtcm, device=hexagon_session.device, mem_scope="global.vtcm"
)
# These are reduced for CI but number=100 and repeat=10 does a good job of removing noise.
number = 1
repeat = 1
timer = module.time_evaluator(
"__tvm_main__", hexagon_session.device, number=number, repeat=repeat
)
runtime = timer(a_hexagon, a_vtcm_hexagon)
gbps = round((size / 2**30) / runtime.mean, 4)
tvm.testing.assert_allclose(a_vtcm_hexagon.asnumpy(), a)
return gbps
class TestMatMulVec:
"""MatMul test class."""
# Removed most of these to speedup CI.
size = tvm.testing.parameter(
# 10 * KB,
# 20 * KB,
# 40 * KB,
# 80 * KB,
# 160 * KB,
# 320 * KB,
640 * KB,
# MB,
# 2 * MB,
# 3 * MB,
# 4 * MB,
# 8 * MB, # Only works on 8gen1 HDKs
)
outer_split = tvm.testing.parameter(4)
unroll_split = tvm.testing.parameter(2)
vector_split = tvm.testing.parameter(128)
@tvm.testing.requires_hexagon
def test_bandwidth(self, hexagon_session, size, outer_split, unroll_split, vector_split):
"""Test bandwidth."""
# Run the base memcopy operator.
sch = tvm.tir.Schedule(memcopy_operator(size))
base_gpbs = evaluate(hexagon_session, sch, size)
# Run with some basic unroll and vectorize scheduling.
sch = tvm.tir.Schedule(memcopy_operator(size))
vtcm_block_a = sch.get_block("A_global.vtcm")
v_block = sch.get_loops(vtcm_block_a)
_, vio_a, vii_a = sch.split(v_block[0], factors=[None, unroll_split, vector_split])
sch.unroll(vio_a)
sch.vectorize(vii_a)
vectorize_gbps = evaluate(hexagon_session, sch, size)
# Run with some basic unroll and vectorize scheduling and parallelization.
sch = tvm.tir.Schedule(memcopy_operator(size))
vtcm_block_a = sch.get_block("A_global.vtcm")
v_block = sch.get_loops(vtcm_block_a)
vbo_a, _, vio_a, vii_a = sch.split(
v_block[0], factors=[outer_split, None, unroll_split, vector_split]
)
sch.unroll(vio_a)
sch.vectorize(vii_a)
sch.parallel(vbo_a)
parallel_gbps = evaluate(hexagon_session, sch, size)
# Run using a single dma copy to transfer the data.
sch = tvm.tir.Schedule(single_dma_operator(size))
single_dma_gbps = evaluate(hexagon_session, sch, size)
mbs = round(size / MB, 2)
print(
TEST_OUTPUT_TEMPLATE.format(
mbs, base_gpbs, vectorize_gbps, parallel_gbps, single_dma_gbps
)
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/test_wo_qnn_canonicalization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""No QNN canonicalization tests."""
import numpy as np
import tvm.testing
from tvm import relay
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon.pytest_plugin import HEXAGON_AOT_LLVM_TARGET
from tvm.relay.backend import Executor
@tvm.testing.requires_hexagon
def test_no_qnn_pass():
"""No QNN pass test."""
x = relay.var("x", shape=(4, 8), dtype="float32")
op0 = relay.qnn.op.quantize(x, relay.const(2.0), relay.const(10), out_dtype="uint8")
op1 = relay.qnn.op.dequantize(op0, relay.const(0.5), relay.const(5))
mod = tvm.IRModule.from_expr(op1)
target_hexagon = tvm.target.hexagon("v68")
# Default compilation flow
with tvm.transform.PassContext(opt_level=3):
opt_mod_1, _ = relay.optimize(mod, tvm.target.Target(target_hexagon, host=target_hexagon))
# Disable QNN legalization and canonicalization passes
with tvm.transform.PassContext(opt_level=3, disabled_pass=["qnn.Legalize"]):
opt_mod_2, _ = relay.optimize(mod, tvm.target.Target(target_hexagon, host=target_hexagon))
# Check that QNN ops are absent with default compilation flow.
assert "qnn.quantize" not in opt_mod_1.astext(show_meta_data=False)
assert "qnn.dequantize" not in opt_mod_1.astext(show_meta_data=False)
# Check that QNN ops are present without "qnn.Legalize" passes.
assert "qnn.quantize" in opt_mod_2.astext(show_meta_data=False)
assert "qnn.dequantize" in opt_mod_2.astext(show_meta_data=False)
def execute(executor, data_np, weight_np, bias_np=None):
executor.set_input("data", data_np)
executor.set_input("weight", weight_np)
if bias_np is not None:
executor.set_input("bias", bias_np)
executor.run()
return executor.get_output(0)
@tvm.testing.requires_hexagon
def test_qnn_conv2d_rq(hexagon_session: Session):
"""QNN conv2d test."""
data_shape = [1, 8, 32, 32]
weight_shape = [16, 8, 3, 3]
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
op0 = relay.qnn.op.quantize(data, relay.const(0.078), relay.const(0), out_dtype="int8")
op1 = relay.qnn.op.quantize(weight, relay.const(0.07), relay.const(0), out_dtype="int8")
op2 = relay.qnn.op.conv2d(
op0,
op1,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(0.078),
kernel_scale=relay.const(0.07),
padding=[0, 0, 0, 0],
channels=16,
kernel_size=[3, 3],
)
op5 = relay.qnn.op.requantize(
op2,
input_scale=relay.const(0.05),
input_zero_point=relay.const(0),
output_scale=relay.const(0.21),
output_zero_point=relay.const(61),
out_dtype="int8",
)
relay_mod = tvm.IRModule.from_expr(op5)
target_llvm = tvm.target.Target("llvm")
executor = Executor("aot")
with tvm.transform.PassContext(opt_level=3, disabled_pass=["qnn.Legalize"]):
hexagon_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET),
executor=executor,
)
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
executor=executor,
)
data_np = np.random.rand(*data_shape) - 0.5
weight_np = np.random.rand(*weight_shape) - 0.5
hx_m = hexagon_session.get_executor_from_factory(hexagon_lowered)
hexagon_output = execute(hx_m, data_np, weight_np)
dev = tvm.cpu(0)
llvm_m = tvm.runtime.executor.AotModule(llvm_lowered["default"](dev))
llvm_out = execute(llvm_m, data_np, weight_np)
np.testing.assert_equal(hexagon_output.numpy(), llvm_out.numpy())
@tvm.testing.requires_hexagon
def test_qnn_dense_bias_rq(hexagon_session: Session):
"""QNN dense with bias test."""
data_shape = [8, 8]
weight_shape = [16, 8]
bias_shape = [16]
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
op0 = relay.qnn.op.quantize(data, relay.const(0.08), relay.const(0), out_dtype="int8")
op1 = relay.qnn.op.quantize(weight, relay.const(0.07), relay.const(0), out_dtype="int8")
op2 = relay.qnn.op.dense(
op0,
op1,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(0.08),
kernel_scale=relay.const(0.07),
units=None,
)
op3 = relay.qnn.op.quantize(bias, relay.const(0.5), relay.const(0), out_dtype="int32")
op4 = relay.nn.bias_add(op2, op3)
op5 = relay.qnn.op.requantize(
op4,
input_scale=relay.const(0.05),
input_zero_point=relay.const(0),
output_scale=relay.const(0.212),
output_zero_point=relay.const(10),
out_dtype="int8",
)
relay_mod = tvm.IRModule.from_expr(op5)
target_llvm = tvm.target.Target("llvm")
executor = Executor("aot")
with tvm.transform.PassContext(opt_level=3, disabled_pass=["qnn.Legalize"]):
hexagon_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET),
executor=executor,
)
with tvm.transform.PassContext(opt_level=3):
llvm_lowered = tvm.relay.build(
relay_mod,
tvm.target.Target(target_llvm, host=target_llvm),
executor=executor,
)
data_np = np.random.rand(*data_shape) - 0.5
weight_np = np.random.rand(*weight_shape) - 0.5
bias_np = np.random.rand(*bias_shape)
hx_m = hexagon_session.get_executor_from_factory(hexagon_lowered)
hexagon_output = execute(hx_m, data_np, weight_np, bias_np)
dev = tvm.cpu(0)
llvm_m = tvm.runtime.executor.AotModule(llvm_lowered["default"](dev))
llvm_out = execute(llvm_m, data_np, weight_np, bias_np)
np.testing.assert_equal(hexagon_output.numpy(), llvm_out.numpy())
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon TOPI tests """
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon TOPI Slice OP tests """
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_argmax_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Hexagon slice argmax op """
import numpy as np
import tvm
import tvm.testing
from tvm import te
import tvm.topi.hexagon.slice_ops as sl
import tvm.contrib.hexagon
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
class TestArgMaxSlice:
"""Argmax Slice Op Tests"""
(
input_shape,
input_layout,
output_layout,
dtype,
in_axis,
in_axis_sep,
out_axis_sep,
) = tvm.testing.parameters(
((1, 64, 64, 32), "nhwc-8h2w32c2w-2d", "nhw-32h16w-2d", "float16", [3], [4], [3]),
((3, 32, 16, 32), "nhwc-8h2w32c2w-2d", "nhw-32h16w-2d", "float16", [3], [4], [3]),
((1, 32, 32, 64), "nhwc-8h2w32c2w-2d", "nhw-32h16w-2d", "float16", [3], [4], [3]),
((1, 64, 64, 32), "nhwc-8h8w32c-2d", "nhw-32h16w-2d", "int8", [3], [4], [3]),
((3, 32, 16, 32), "nhwc-8h8w32c-2d", "nhw-32h16w-2d", "int8", [3], [4], [3]),
((1, 32, 32, 64), "nhwc-8h8w32c-2d", "nhw-32h16w-2d", "int8", [3], [4], [3]),
)
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
return np.random.uniform(size=input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, input_layout):
return transform_numpy(input_np, "nhwc", input_layout)
@tvm.testing.fixture
def expected_output_np(self, input_np, in_axis):
ref_np = np.argmax(input_np, *in_axis).astype("int32")
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, output_layout):
return transform_numpy(expected_output_np, "nhw", output_layout)
@tvm.testing.requires_hexagon
def test_argmax_slice(
self,
input_shape,
dtype,
input_layout,
output_layout,
in_axis,
transformed_input_np,
transformed_expected_output_np,
in_axis_sep,
out_axis_sep,
hexagon_session,
working_scope,
):
"""Top level testing function for argmax"""
argmax_input = te.placeholder(input_shape, name="A", dtype=dtype)
output = sl.argmax.argmax_compute(argmax_input, in_axis)
argmax_func = te.create_prim_func([argmax_input, output])
tir_s = sl.argmax_schedule(argmax_func, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=in_axis_sep,
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=out_axis_sep,
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
tir_irm = tvm.lower(tir_s.mod, [argmax_input, output], name="argmax")
runtime_module = tvm.build(
tir_irm, [argmax_input, output], target=get_hexagon_target("v69"), name="argmax"
)
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_expected_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_avg_pool2d_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from typing import *
from tvm import te
import tvm.testing
from tvm.contrib.hexagon.session import Session
import tvm.topi.hexagon.slice_ops as sl
import tvm.topi.hexagon.qnn as qn
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import (
transform_numpy,
quantize_np,
get_hexagon_target,
)
from ...pytest_util import (
get_multitest_ids,
create_populated_numpy_ndarray,
TensorContentRandom,
)
input_layout = tvm.testing.parameter(
"nhwc-8h2w32c2w-2d",
)
dtype = tvm.testing.parameter("float16", "uint8")
@tvm.testing.fixture
def output_layout(output_shape, dtype):
o_b, o_h, o_w, o_c = output_shape
if dtype == "float16":
if o_h == 1 and o_w == 1:
return "n11c-1024c-2d"
else:
assert o_h % 8 == 0 and o_w % 4 == 0, "Invalid output shape"
return "nhwc-8h2w32c2w-2d"
elif dtype == "int8" or "uint8":
if o_h == 1 and o_w == 1:
return "n11c-2048c-2d"
else:
assert o_h % 8 == 0 and o_w % 8 == 0, "Invalid output shape"
return "nhwc-8h8w32c-2d"
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def input_np(input_shape, dtype: str, input_tensor_populator):
if dtype == "uint8":
dtype = "float32" # Use "float32" input which will be quantized later
return create_populated_numpy_ndarray(input_shape, dtype, input_tensor_populator)
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, output_layout, dtype):
if dtype == "float16":
return transform_numpy(expected_output_np, "nhwc", output_layout)
elif dtype in ("uint8", "int8"):
quant_arr, scale, zero_point = quantize_np(expected_output_np, dtype)
return [transform_numpy(quant_arr, "nhwc", output_layout), scale, zero_point]
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def transformed_input_np_padded(input_np_padded, input_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_padded, "nhwc", input_layout)
elif dtype in ("uint8", "int8"):
quant_arr, scale, zero_point = quantize_np(input_np_padded, dtype)
return [transform_numpy(quant_arr, "nhwc", input_layout), scale, zero_point]
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
class TestAvgPool2dSlice:
_param_descs = [
"out_shape", # output_shape
"kernel", # kernel
"stride", # stride
"dil", # dilation
"pad", # padding
"ceil", # ceil_mode
"cnt_padded", # count_include_pad
None, # input_tensor_populator
]
_multitest_params = [
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 16, 16, 32],
[3, 3],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[8, 8],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
# Test non-one stride and dilation
(
[1, 8, 8, 32],
[3, 3],
[2, 3],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[2, 2],
[2, 2],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[2, 2],
[2, 3],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
# Test non-zero padding
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[1, 1, 1, 1],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[1, 2, 3, 4],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[1, 2, 3, 4],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[3, 2],
[2, 3],
[1, 2, 3, 4],
False,
True,
TensorContentRandom(),
),
# Test n11c-1024c-2d layout which will require input and output to have different layout
(
[1, 1, 1, 2048],
[8, 8],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[6, 6],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[3, 3],
[2, 2],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[4, 4],
[2, 2],
[2, 3],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
]
_param_ids = get_multitest_ids(_multitest_params, _param_descs)
# NOTE: input_layout is always assumed to be "nhwc-8h2w32c2w-2d"
(
output_shape,
kernel,
stride,
dilation,
padding,
ceil_mode,
count_include_pad,
input_tensor_populator,
) = tvm.testing.parameters(*_multitest_params, ids=_param_ids)
@tvm.testing.fixture
def expected_output_np(
self,
input_np,
kernel,
stride,
dilation,
padding,
ceil_mode,
count_include_pad,
):
pad_before = padding[:2]
pad_after = padding[2:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
pad_before,
pad_after,
"avg", # pool_type
count_include_pad,
False, # ceil_mode,
layout="NHWC",
)
return ref_np
@tvm.testing.fixture
def input_shape(self, output_shape, kernel, padding, stride, dilation, output_layout):
# Input shape without any padding; 'ceil' is being ignored from calculation:
o_b, o_h, o_w, o_c = output_shape
d_h, d_w = dilation
s_h, s_w = stride
k_h, k_w = kernel
pad_before_h, pad_before_w = padding[:2]
pad_after_h, pad_after_w = padding[2:]
if output_layout == "n11c-1024c-2d":
assert (
pad_before_w == 0 and pad_after_w == 0 and pad_before_h == 0 and pad_after_h == 0
), "Padding must be zero for n11c-1024c-2d layout"
assert o_h == 1 and o_w == 1, "Output height and width must be 1"
in_h = (o_h - 1) * s_h + d_h * (k_h - 1) + 1 - pad_before_h - pad_after_h
in_w = (o_w - 1) * s_w + d_w * (k_w - 1) + 1 - pad_before_w - pad_after_w
return [o_b, in_h, in_w, o_c]
@tvm.testing.fixture
def input_shape_padded(self, input_shape, padding, output_layout, dtype):
# Input shape is adjusted to account for 'padding'. Also, due to the physical
# layout of the buffer, height and width are adjusted so that they are a
# multiple of the buffer size dictated by the layout.
# NOTE: For float16, the input layout is always assumed to be nhwc-8h2w32c2w-2d and
# for int8/uint8, it's nhwc-8h8w32c-2d.
# For both nhwc-8h2w32c2w-2d and nhwc-8h8w32c-2d, the height should be a multiple
# of 8. However, the width should be a multiple of 4 for the first case and 8 for
# the second case.
height_mult = 8
if dtype == "float16":
width_mult = 4 # input layout : nhwc-8h2w32c2w-2d
elif dtype in ("uint8", "int8"):
width_mult = 8 # input layout : nhwc-8h8w32c-2d
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
pad_before_h, pad_before_w = padding[:2]
pad_after_h, pad_after_w = padding[2:]
padded_input_height = (
(input_shape[1] + pad_before_h + pad_after_h + height_mult - 1) // height_mult
) * height_mult
padded_input_width = (
(input_shape[2] + pad_before_w + pad_after_w + width_mult - 1) // width_mult
) * width_mult
return [input_shape[0], padded_input_height, padded_input_width, input_shape[3]]
@tvm.testing.fixture
def input_np_padded(self, input_np, input_shape, input_shape_padded, padding):
pad_before_h, pad_before_w = padding[:2]
pad_after_h = input_shape_padded[1] - input_shape[1] - pad_before_h
pad_after_w = input_shape_padded[2] - input_shape[2] - pad_before_w
input_padded = np.pad(
input_np,
((0, 0), (pad_before_h, pad_after_h), (pad_before_w, pad_after_w), (0, 0)),
"constant",
)
return input_padded
@tvm.testing.fixture
def schedule_args(
self,
stride,
kernel,
dtype,
dilation,
input_layout,
output_layout,
output_shape,
input_shape_padded,
transformed_input_np_padded,
transformed_expected_output_np,
):
"""
Construct schedule args based on dtype
"""
A = te.placeholder(input_shape_padded, name="A", dtype=dtype)
if dtype == "float16":
M = sl.avg_pool2d_compute(A, kernel, stride, dilation, output_shape)
tir_schedule = sl.avg_pool2d_schedule(M, A, output_layout, input_layout)
elif dtype in ("uint8", "int8"):
in_data, in_scale, in_zero_point = transformed_input_np_padded
_, out_scale, out_zero_point = transformed_expected_output_np
M = qn.qnn_avg_pool2d_compute(
A,
kernel,
stride,
dilation,
output_shape,
dtype,
in_zero_point,
in_scale,
out_zero_point,
out_scale,
)
tir_schedule = qn.qnn_avg_pool2d_schedule(M, A, output_layout, input_layout)
return [tir_schedule.mod, [A, M]]
@tvm.testing.requires_hexagon
def test_avg_pool2d_slice(
self,
dtype,
output_layout,
output_shape,
transformed_input_np_padded,
transformed_expected_output_np,
schedule_args,
hexagon_session: Session,
):
in_data = transformed_input_np_padded
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
*schedule_args,
get_hexagon_target("v69"),
name="avg_pool2d",
)
input_axis_separator = [4]
if output_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
"n11c-1024c-2d",
"n11c-2048c-2d",
):
output_axis_separator = [4]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
if dtype == "float16":
in_data_np = transformed_input_np_padded
out_data_np = transformed_expected_output_np
elif dtype in ("uint8", "int8"):
in_data_np, _, _ = transformed_input_np_padded
out_data_np, _, _ = transformed_expected_output_np
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=in_data_np,
axis_separators=input_axis_separator,
mem_scope="global.vtcm",
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
out_data_np.shape,
dtype,
axis_separators=output_axis_separator,
mem_scope="global.vtcm",
)
mod = hexagon_session.load_module(func)
mod(input_arr, output_arr)
b, h, w, c = output_shape
if output_layout == "nhwc-8h2w32c2w-2d":
output_np = output_arr.numpy().reshape([b, h // 8, w // 4, c // 32, 8, 2, 32, 2])
elif output_layout == "nhwc-8h8w32c-2d":
output_np = output_arr.numpy().reshape([b, h // 8, w // 8, c // 32, 8, 8, 32])
elif output_layout == "n11c-2048c-2d":
output_np = output_arr.numpy().reshape([b, 1, 1, c // 2048, 2048])
elif output_layout == "n11c-1024c-2d":
output_np = output_arr.numpy().reshape([b, 1, 1, c // 1024, 1024])
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
if dtype == "float16":
np.testing.assert_allclose(output_np, out_data_np, rtol=1e-3, atol=1e-3)
else:
np.testing.assert_allclose(output_np, out_data_np, rtol=1, atol=1)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Hexagon slice cast ops """
import pytest
import numpy as np
import tvm
import tvm.testing
from tvm import te
import tvm.topi.hexagon.slice_ops as sl
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
class TestCastF16F32Slice2d:
"""
For testing Cast F16 to F32 Slice ops
"""
input_shape, orig_layout, input_layout, output_layout, axis_sep = tvm.testing.parameters(
((1, 16, 12, 64), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 64, 64, 32), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 16, 12, 64), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-4h2w32c2w-2d", [4]),
((1, 64, 64, 32), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-4h2w32c2w-2d", [4]),
((1, 1024), "nc", "nc-1024c-2d", "nc-1024c-2d", [2]),
((1, 1024), "nc", "nc-1024c-2d", "nc-512c-2d", [2]),
)
dtype = tvm.testing.parameter("float16")
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
return np.random.uniform(size=input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, orig_layout, input_layout):
return transform_numpy(input_np, orig_layout, input_layout)
@tvm.testing.fixture
def expected_output_np(self, input_np):
ref_np = input_np.astype("float32")
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, orig_layout, output_layout):
return transform_numpy(expected_output_np, orig_layout, output_layout)
@tvm.testing.requires_hexagon
def test_cast_fp16_fp32_slice(
self,
input_shape,
dtype,
input_layout,
output_layout,
transformed_input_np,
transformed_expected_output_np,
axis_sep,
hexagon_session,
working_scope,
):
"""
Top level testing function for cast fp16 to fp32
"""
if hexagon_session.is_simulator():
pytest.skip(msg="Due to https://github.com/apache/tvm/issues/11957")
cast_input = te.placeholder(input_shape, name="A", dtype=dtype)
cast_output = sl.cast_f16_f32_compute(cast_input)
cast_func = te.create_prim_func([cast_input, cast_output])
tir_s = sl.cast_f16_f32_schedule(cast_func, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=axis_sep,
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=axis_sep,
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
tir_irm = tvm.lower(tir_s.mod, [cast_input, cast_output], name="cast_f16_f32")
runtime_module = tvm.build(
tir_irm, target=get_hexagon_target("v69"), name="cast_f16_f32"
)
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_expected_output_np,
1e-3,
1e-3,
)
class TestCastF32F16Slice2d:
"""
For testing Cast F32 to F16 Slice ops
"""
(input_shape, orig_layout, input_layout, output_layout, axis_sep,) = tvm.testing.parameters(
((1, 16, 12, 64), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 64, 64, 32), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 16, 12, 64), "nhwc", "nhwc-4h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 64, 64, 32), "nhwc", "nhwc-4h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 1024), "nc", "nc-1024c-2d", "nc-1024c-2d", [2]),
((1, 1024), "nc", "nc-512c-2d", "nc-1024c-2d", [2]),
)
dtype = tvm.testing.parameter("float32")
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
return np.random.uniform(size=input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, orig_layout, input_layout):
return transform_numpy(input_np, orig_layout, input_layout)
@tvm.testing.fixture
def expected_output_np(self, input_np):
ref_np = input_np.astype("float16")
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, orig_layout, output_layout):
return transform_numpy(expected_output_np, orig_layout, output_layout)
@tvm.testing.requires_hexagon
def test_cast_fp32_fp16_slice(
self,
input_shape,
dtype,
input_layout,
output_layout,
transformed_input_np,
transformed_expected_output_np,
axis_sep,
hexagon_session,
working_scope,
):
"""
Top level testing function for cast fp32 to fp16
"""
if hexagon_session.is_simulator():
pytest.skip(msg="Due to https://github.com/apache/tvm/issues/11957")
cast_input = te.placeholder(input_shape, name="A", dtype=dtype)
cast_output = sl.cast_f32_f16_compute(cast_input)
cast_func = te.create_prim_func([cast_input, cast_output])
tir_s = sl.cast_f32_f16_schedule(cast_func, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=axis_sep,
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=axis_sep,
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
tir_irm = tvm.lower(tir_s.mod, [cast_input, cast_output], name="cast_f32_f16")
runtime_module = tvm.build(
tir_irm, target=get_hexagon_target("v69"), name="cast_f32_f16"
)
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_expected_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_clip_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
import numpy as np
from tvm import te
import tvm.testing
import tvm.topi.hexagon.slice_ops as sl
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
input_layout = tvm.testing.parameter(
"nhwc-8h2w32c2w-2d",
)
@tvm.testing.fixture
def input_np(input_shape, dtype):
return np.random.random(input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, output_layout):
return transform_numpy(expected_output_np, "nhwc", output_layout)
@tvm.testing.fixture
def transformed_input_np(input_np, input_layout):
return transform_numpy(input_np, "nhwc", input_layout)
class TestClipSlice:
input_shape, output_shape, A_min, A_max, output_layout, dtype = tvm.testing.parameters(
([1, 8, 4, 32], [1, 8, 4, 32], 0.1, 0.5, "nhwc-8h2w32c2w-2d", "float16")
)
@tvm.testing.fixture
def expected_output_np(self, input_np, A_min, A_max):
ref_np = np.clip(input_np, A_min, A_max)
return ref_np
@tvm.testing.requires_hexagon
def test_clip_slice(
self,
input_shape,
output_shape,
input_np,
input_layout,
output_layout,
dtype,
A_min,
A_max,
transformed_input_np,
transformed_expected_output_np,
hexagon_session,
):
# establish target and input placeholder
A = te.placeholder(input_shape, name="A", dtype=dtype)
# get the compute function and schedule
M = sl.clip_compute(A, A_min, A_max)
# Assume layout is nhwc-8h2w32c2w-2d
tir_schedule = sl.clip_schedule(M, A, output_layout, input_layout)
# build the function
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
tir_schedule.mod,
target=get_hexagon_target("v69"),
name="clip",
)
# allocate input and output nd arrays
axis_separators = [4]
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
dtype=dtype,
axis_separators=axis_separators,
mem_scope="global.vtcm",
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
transformed_expected_output_np.shape,
dtype=dtype,
axis_separators=axis_separators,
mem_scope="global.vtcm",
)
# execute
mod = hexagon_session.load_module(func)
mod(input_arr, output_arr)
# convert output nd array to numpy array
output_np = output_arr.numpy()
b, h, w, c = output_shape
reshaped_output_np = np.reshape(output_np, [b, h // 8, w // 4, c // 32, 8, 2, 32, 2])
# test results
np.testing.assert_allclose(
reshaped_output_np, transformed_expected_output_np, rtol=1e-3, atol=1e-3
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_conv2d_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long, redefined-outer-name
"""Test conv2d slice op for hexagon"""
import numpy as np
import tvm
import tvm.testing
from tvm.topi.hexagon.slice_ops.conv2d import conv2d_compute, conv2d_schedule
from tvm.topi.testing import conv2d_nhwc_python
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
input_layout = tvm.testing.parameter(
"nhwc-8h2w32c2w-2d",
)
output_layout = tvm.testing.parameter(
"nhwc-8h2w32c2w-2d",
)
weights_layout = tvm.testing.parameter("iohw-16i32o2i-1d")
@tvm.testing.fixture
def input_np(in_shape, dtype):
return np.random.uniform(size=in_shape).astype(dtype)
@tvm.testing.fixture
def weights_np(filt_shape, dtype):
return (np.random.uniform(size=filt_shape)).astype(dtype)
@tvm.testing.fixture
def dilated_filt_shape(filt_shape, dilation):
"""Compute the dilated filter shape when dilation > 1"""
filt_height, filt_width, in_channel, out_channel = filt_shape
dilation_height, dilation_width = dilation
if dilation_height == 1 and dilation_width == 1:
return filt_shape
dilated_height, dilated_width = (
dilation_height * (filt_height - 1) + 1,
dilation_width * (filt_width - 1) + 1,
)
return dilated_height, dilated_width, in_channel, out_channel
@tvm.testing.fixture
def dilated_weights_np(weights_np, dilation, dilated_filt_shape):
"""Get dilated weights from original weights for testing"""
filt_height, filt_width, in_channels, out_channels = weights_np.shape
dilation_height, dilation_width = dilation
if dilation_height == 1 and dilation_width == 1:
return weights_np
dilated_height, dilated_width = dilated_filt_shape[0], dilated_filt_shape[1]
dilated_weights = np.zeros(dilated_filt_shape, dtype="float16")
for in_channel in range(in_channels):
for out_channel in range(out_channels):
for dilation_i, height_i in zip(
range(0, dilated_height, dilation_height), range(filt_height)
):
for dilation_j, width_j in zip(
range(0, dilated_width, dilation_width), range(filt_width)
):
dilated_weights[dilation_i, dilation_j, in_channel, out_channel] = weights_np[
height_i, width_j, in_channel, out_channel
]
return dilated_weights
@tvm.testing.fixture
def input_np_padded(input_np, in_shape, padded_in_shape):
pad_height = padded_in_shape[1] - in_shape[1]
pad_width = padded_in_shape[2] - in_shape[2]
pad_channel = padded_in_shape[3] - in_shape[3]
input_padded = np.pad(
input_np, ((0, 0), (0, pad_height), (0, pad_width), (0, pad_channel)), "constant"
)
return input_padded
@tvm.testing.fixture
def padded_filt_shape(filt_shape):
filt_height, filt_width, in_channels, out_channels = filt_shape
in_channels = ((in_channels + 31) // 32) * 32
out_channels = ((out_channels + 31) // 32) * 32
return filt_height, filt_width, in_channels, out_channels
@tvm.testing.fixture
def weights_np_padded(weights_np, filt_shape, padded_filt_shape):
pad_in_channels = padded_filt_shape[2] - filt_shape[2]
pad_out_channels = padded_filt_shape[3] - filt_shape[3]
filt_padded = np.pad(weights_np, ((0, 0), (0, 0), (0, pad_in_channels), (0, pad_out_channels)))
return filt_padded
@tvm.testing.fixture
def weights_np_transformed(weights_np_padded):
height, width, in_channel, out_channel = weights_np_padded.shape
weights_np_reverse_width = weights_np_padded[:, ::-1, :, :]
transformed_weights_np = weights_np_reverse_width.reshape(
[height, width, in_channel // 32, 16, 2, out_channel // 32, 32]
).transpose(2, 5, 0, 1, 3, 6, 4)
return transformed_weights_np
def generate_test_config(test_params):
"""Utility function to generate test config with meaningful ids"""
test_config = {}
dims = lambda vals: "x".join(map(str, vals))
for param in test_params:
in_shape, filt_shape, stride, dilation = param
test_name = f"nhwc{dims(in_shape)}-hwio{dims(filt_shape)}-stride{dims(stride)}-dilation{dims(dilation)}"
test_config[test_name] = param
return test_config
class TestConv2dSlice:
"""Test class that defines the conv2d slice test"""
test_params = [
[
(1, 10, 6, 32),
(3, 3, 32, 32),
(1, 1),
(1, 1),
],
[
(1, 18, 10, 32),
(3, 3, 32, 32),
(1, 1),
(1, 1),
],
[
(1, 10, 6, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
],
[
(1, 12, 8, 4),
(3, 3, 4, 32),
(1, 1),
(2, 2),
],
[
(1, 12, 8, 32),
(5, 5, 32, 32),
(1, 1),
(1, 1),
],
[
(1, 16, 12, 32),
(5, 5, 32, 32),
(1, 1),
(2, 2),
],
[
(1, 13, 9, 32),
(6, 6, 32, 32),
(1, 1),
(1, 1),
],
[
(1, 18, 10, 32),
(3, 3, 32, 32),
(2, 2),
(1, 1),
],
[
(1, 20, 12, 32),
(5, 5, 32, 32),
(2, 2),
(1, 1),
],
[
(1, 22, 14, 32),
(7, 7, 32, 32),
(2, 2),
(1, 1),
],
[
(1, 28, 20, 32),
(7, 7, 32, 32),
(2, 2),
(2, 2),
],
[
(1, 10, 4, 4),
(3, 1, 4, 32),
(1, 1),
(1, 1),
],
[
(1, 18, 8, 4),
(3, 1, 4, 32),
(2, 2),
(1, 1),
],
[
(1, 20, 8, 4),
(3, 1, 4, 32),
(2, 2),
(2, 2),
],
]
test_config = generate_test_config(test_params)
in_shape, filt_shape, stride, dilation = tvm.testing.parameters(
*test_config.values(), ids=test_config.keys()
)
dtype = tvm.testing.parameter("float16")
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def padded_in_shape(self, in_shape):
in_batch, in_height, in_width, in_channel = in_shape
in_height = ((in_height + 7) // 8) * 8
in_width = ((in_width + 3) // 4) * 4
in_channel = ((in_channel + 31) // 32) * 32
return in_batch, in_height, in_width, in_channel
@tvm.testing.fixture
def out_shape(self, in_shape, dilated_filt_shape, stride):
in_batch, in_height, in_width, _ = in_shape
filt_height, filt_width, _, num_filt = dilated_filt_shape
out_height = (in_height - filt_height) // stride[0] + 1
out_width = (in_width - filt_width) // stride[1] + 1
out_channel = num_filt
return in_batch, out_height, out_width, out_channel
@tvm.testing.fixture
def expected_output_np(self, input_np, dilated_weights_np, stride):
ref_np = conv2d_nhwc_python(
input_np.astype("float32"), dilated_weights_np.astype("float32"), stride, padding=0
).astype("float16")
return ref_np
@tvm.testing.requires_hexagon
def test_conv2d(
self,
padded_in_shape,
padded_filt_shape,
stride,
dilation,
dtype,
out_shape,
input_layout,
weights_layout,
output_layout,
input_np_padded,
weights_np_transformed,
expected_output_np,
working_scope,
hexagon_session,
):
"""Main test function that tests the conv2d slice op"""
input_tensor = tvm.te.placeholder(padded_in_shape, name="InputTensor", dtype=dtype)
weights = tvm.te.placeholder(padded_filt_shape, name="Weights", dtype=dtype)
output_name = "output"
output_tensor = conv2d_compute(
input_tensor, weights, out_shape, stride, dilation, dtype, output_name
)
tir_schedule = conv2d_schedule(
output_tensor,
[input_tensor, weights],
input_layout,
weights_layout,
output_layout,
output_name,
)
func_name = f"fconv2d_{dtype}"
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(
tir_schedule.mod,
target=get_hexagon_target("v69"),
name=func_name,
)
input_np_transformed = transform_numpy(input_np_padded, "nhwc", input_layout)
output_np_transformed = transform_numpy(expected_output_np, "nhwc", output_layout)
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=input_np_transformed,
axis_separators=[4],
mem_scope=working_scope,
)
weights_arr = allocate_hexagon_array(
hexagon_session.device, data=weights_np_transformed, mem_scope=working_scope
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=output_np_transformed.shape,
dtype=output_np_transformed.dtype,
axis_separators=[4],
mem_scope=working_scope,
)
mod = hexagon_session.load_module(runtime_module)
mod(input_arr, weights_arr, output_arr)
output_np = output_arr.numpy()
np.testing.assert_allclose(output_np, output_np_transformed, atol=1.0, rtol=0.05)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_depthwise_conv2d_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, disable=line-too-long, redefined-outer-name
"""Test depthwise_conv2d slice op for hexagon."""
import numpy as np
import tvm
import tvm.testing
import tvm.topi.hexagon.qnn as qn
from tvm.topi.testing import depthwise_conv2d_python_nhwc
from tvm.topi.hexagon.slice_ops.dwconv2d import dwconv2d_compute, dwconv2d_schedule
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, quantize_np
@tvm.testing.fixture
def input_np(in_shape, dtype, low, high):
if dtype in ("uint8"):
return np.random.uniform(low=low, high=high, size=in_shape).astype("float32")
if dtype in ("int8"):
return np.random.uniform(low=-low, high=high, size=in_shape).astype("float32")
return np.random.uniform(size=in_shape).astype(dtype)
@tvm.testing.fixture
def input_np_padded(input_np, in_shape, padded_in_shape):
pad_height = padded_in_shape[1] - in_shape[1]
pad_width = padded_in_shape[2] - in_shape[2]
pad_channel = padded_in_shape[3] - in_shape[3]
input_padded = np.pad(
input_np, ((0, 0), (0, pad_height), (0, pad_width), (0, pad_channel)), "constant"
)
return input_padded
@tvm.testing.fixture
def in_out_layout(dtype):
if dtype == "float16":
return "nhwc-8h2w32c2w-2d"
elif dtype in ("uint8", "int8"):
return "nhwc-8h8w32c-2d"
else:
raise RuntimeError(f"Unsupported quantized data type '{dtype}'")
@tvm.testing.fixture
def expected_output_np(input_np, dilated_weights_np, stride, dtype):
dilated_weights_np_t = dilated_weights_np.transpose(0, 1, 3, 2)
ref_type = dtype
if dtype in ("uint8", "int8"):
# for quantized versions, return float32 output
ref_type = "float32"
ref_np = depthwise_conv2d_python_nhwc(
input_np.astype("float32"), dilated_weights_np_t.astype("float32"), stride, padding=0
).astype(ref_type)
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, in_out_layout, dtype):
if dtype == "float16":
return transform_numpy(expected_output_np, "nhwc", in_out_layout)
elif dtype in ("uint8", "int8"):
quant_arr, scale, zero_point = quantize_np(expected_output_np, dtype)
return [transform_numpy(quant_arr, "nhwc", in_out_layout), scale, zero_point]
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def transformed_input_np_padded(input_np_padded, in_out_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_padded, "nhwc", in_out_layout)
if dtype in ("uint8", "int8"):
quant_arr, scale, zero_point = quantize_np(input_np_padded, dtype)
return [transform_numpy(quant_arr, "nhwc", in_out_layout), scale, zero_point]
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def weights_np(filt_shape, dtype):
if dtype == "float16":
return np.random.uniform(size=filt_shape).astype(dtype)
elif dtype in ("uint8", "int8"):
weight_arr = np.random.uniform(low=-5, high=5, size=filt_shape).astype("float32")
return weight_arr
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def dilated_filt_shape(filt_shape, dilation):
"""Compute the dilated filter shape when dilation > 1"""
filt_height, filt_width, in_channel, out_channel = filt_shape
dilation_height, dilation_width = dilation
if dilation_height == 1 and dilation_width == 1:
return filt_shape
dilated_height = dilation_height * (filt_height - 1) + 1
dilated_width = dilation_width * (filt_width - 1) + 1
return dilated_height, dilated_width, in_channel, out_channel
@tvm.testing.fixture
def dilated_weights_np(weights_np, dilation, dilated_filt_shape, dtype):
"""Get dilated weights from original weights for testing"""
if dtype in ["int8", "uint8"]:
dtype = "float32"
filt_height, filt_width, in_channels, out_channels = weights_np.shape
dilated_weights = np.zeros(dilated_filt_shape)
dilation_height, dilation_width = dilation
if dilation_height == 1 and dilation_width == 1:
return weights_np
dilated_height, dilated_width = dilated_filt_shape[0], dilated_filt_shape[1]
for in_channel in range(in_channels):
for out_channel in range(out_channels):
for dilation_i, height_i in zip(
range(0, dilated_height, dilation_height), range(filt_height)
):
for dilation_j, width_j in zip(
range(0, dilated_width, dilation_width), range(filt_width)
):
dilated_weights[dilation_i, dilation_j, in_channel, out_channel] = weights_np[
height_i, width_j, in_channel, out_channel
]
return dilated_weights
@tvm.testing.fixture
def transformed_weights_np(weights_np, dtype):
height, width, in_channel, out_channel = weights_np.shape
t = weights_np.reshape([height, width, in_channel, out_channel // 32, 32]).transpose(
3, 0, 1, 2, 4
)
if dtype == "float16":
return t
if dtype in ("uint8", "int8"):
quant_arr, scale, zero_point = quantize_np(t, dtype)
return [quant_arr, scale, zero_point]
raise RuntimeError(f"Unsupported data type '{dtype}'")
def generate_test_config(test_params):
"""Utility function to generate test config with meaningful ids"""
test_config = {}
dims = lambda vals: "x".join(map(str, vals))
for param in test_params:
in_shape, filt_shape, stride, dilation = param[:4]
test_name = f"nhwc{dims(in_shape)}-hwio{dims(filt_shape)}-stride{dims(stride)}-dilation{dims(dilation)}"
test_config[test_name] = param
return test_config
class Testdwconv2dSlice:
"""Test class that defines the dwconv2d slice test"""
test_params = [
[(1, 10, 10, 32), (3, 3, 1, 32), (1, 1), (1, 1), 0.0, 10.0],
[(1, 10, 10, 64), (3, 3, 1, 64), (1, 1), (1, 1), 0.0, 10.0],
[(1, 12, 12, 32), (5, 5, 1, 32), (1, 1), (1, 1), 0.0, 20.0],
[(1, 16, 16, 32), (5, 5, 1, 32), (1, 1), (2, 2), 0.0, 1.0],
[(1, 18, 10, 32), (3, 3, 1, 32), (1, 1), (1, 1), 0.0, 10.0],
[(1, 18, 18, 32), (3, 3, 1, 32), (2, 2), (1, 1), 0.0, 10.0],
[(1, 18, 10, 96), (3, 3, 1, 96), (1, 1), (1, 1), 0.0, 10.0],
[(1, 21, 21, 32), (7, 7, 1, 32), (2, 2), (1, 1), 0.0, 10.0],
[(1, 28, 28, 32), (7, 7, 1, 32), (2, 2), (2, 2), 0.0, 10.0],
[(1, 28, 28, 96), (7, 7, 1, 96), (2, 2), (2, 2), 0.0, 10.0],
[(1, 10, 16, 32), (3, 1, 1, 32), (1, 1), (1, 1), 0.0, 10.0],
]
test_config = generate_test_config(test_params)
in_shape, filt_shape, stride, dilation, low, high = tvm.testing.parameters(
*test_config.values(), ids=test_config.keys()
)
dtype = tvm.testing.parameter("float16", "uint8")
working_scope = tvm.testing.parameter("global.vtcm")
weights_layout = tvm.testing.parameter("ohwi32o-1d")
@tvm.testing.fixture
def padded_in_shape(self, in_shape, dtype):
"""Padding the input shape according to layout"""
# NOTE: For float16, the input layout is always assumed to be nhwc-8h2w32c2w-2d and
# for int8/uint8, it's nhwc-8h8w32c-2d.
# For both nhwc-8h2w32c2w-2d and nhwc-8h8w32c-2d, the height should be a multiple
# of 8. However, the width should be a multiple of 4 for the first case and 8 for
# the second case.
in_batch, in_height, in_width, in_channel = in_shape
in_height = ((in_height + 7) // 8) * 8
if dtype == "float16":
in_width = ((in_width + 3) // 4) * 4
elif dtype in ("uint8", "int8"):
in_width = ((in_width + 7) // 8) * 8
in_channel = ((in_channel + 31) // 32) * 32
return in_batch, in_height, in_width, in_channel
@tvm.testing.fixture
def out_shape(self, in_shape, dilated_filt_shape, stride):
in_batch, in_height, in_width, _ = in_shape
filt_height, filt_width, _, num_filt = dilated_filt_shape
out_height = (in_height - filt_height) // stride[0] + 1
out_width = (in_width - filt_width) // stride[1] + 1
out_channel = num_filt
return in_batch, out_height, out_width, out_channel
@tvm.testing.requires_hexagon
def test_dwconv2d(
self,
dtype,
in_out_layout,
weights_layout,
padded_in_shape,
weights_np,
filt_shape,
stride,
dilation,
out_shape,
input_np,
input_np_padded,
transformed_weights_np,
expected_output_np,
target,
working_scope,
transformed_input_np_padded,
transformed_expected_output_np,
hexagon_session,
):
"""Main test function that tests the dwconv2d slice op"""
input_tensor = tvm.te.placeholder(padded_in_shape, name="InputTensor", dtype=dtype)
weights = tvm.te.placeholder(filt_shape, name="Weights", dtype=dtype)
target_hexagon = tvm.target.hexagon("v69")
target = tvm.target.Target(target_hexagon, host=target_hexagon)
# Construct compute and schedule based on dtype
if dtype in ("uint8", "int8"):
in_data_np, activation_scale, activation_zero_point = transformed_input_np_padded
(
weights_data_np,
weight_scale,
weight_zero_point,
) = transformed_weights_np
out_data_np, output_scale, output_zero_point = transformed_expected_output_np
output_tensor = qn.qdepthwise_conv2d_compute(
input_tensor,
weights,
out_shape,
stride,
dilation,
dtype,
activation_zero_point,
activation_scale,
weight_zero_point,
weight_scale,
output_zero_point,
output_scale,
)
tir_schedule = qn.qdepthwise_conv2d_schedule(
output_tensor, [input_tensor, weights], in_out_layout, weights_layout
)
elif dtype == "float16":
in_data_np = transformed_input_np_padded
out_data_np = transformed_expected_output_np
weights_data_np = transformed_weights_np
output_tensor = dwconv2d_compute(
input_tensor, weights, out_shape, stride, dilation, dtype
)
tir_schedule = dwconv2d_schedule(
output_tensor, [input_tensor, weights], in_out_layout, weights_layout
)
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
func_name = "depthwise_conv2d_slice"
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(
tir_schedule.mod,
[input_tensor, output_tensor],
target=target,
name=func_name,
)
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=in_data_np,
axis_separators=[4],
mem_scope=working_scope,
)
weights_arr = allocate_hexagon_array(
hexagon_session.device, data=weights_data_np, mem_scope=working_scope
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
out_data_np.shape,
dtype=dtype,
axis_separators=[4],
mem_scope=working_scope,
)
mod = hexagon_session.load_module(runtime_module)
mod(input_arr, weights_arr, output_arr)
n, h, w, c = out_shape
if dtype in ("uint8", "int8"):
output_np = output_arr.numpy().reshape([n, h // 8, w // 8, c // 32, 8, 8, 32])
np.testing.assert_allclose(output_np, out_data_np, atol=3, rtol=0.02)
elif dtype == "float16":
output_np = output_arr.numpy()
np.testing.assert_allclose(output_np, out_data_np, atol=0.01, rtol=0.01)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_dequantize_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Tests for Hexagon dequantize """
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.topi.hexagon import qnn
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import (
transform_numpy,
quantize_np,
get_hexagon_target,
)
class TestDequantizeSlice2d:
"""
For testing Dequantize Slice ops
"""
input_shape, orig_layout, input_layout, output_layout, axis_sep, dtype = tvm.testing.parameters(
((1, 16, 64, 128), "nhwc", "nhwc-8h8w32c-2d", "nhwc-4h2w32c2w-2d", [4], "int8"),
((1, 16, 64, 128), "nhwc", "nhwc-8h8w32c-2d", "nhwc-4h2w32c2w-2d", [4], "uint8"),
((1, 8, 8, 32), "nhwc", "nhwc-8h8w32c-2d", "nhwc-4h2w32c2w-2d", [4], "int8"),
((1, 8, 8, 32), "nhwc", "nhwc-8h8w32c-2d", "nhwc-4h2w32c2w-2d", [4], "uint8"),
((1, 2048), "nc", "nc-2048c-2d", "nc-512c-2d", [2], "int8"),
((1, 2048), "nc", "nc-2048c-2d", "nc-512c-2d", [2], "uint8"),
)
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape):
arr_np = np.random.random(size=input_shape).astype("float32")
return arr_np
@tvm.testing.fixture
def transformed_input_np(self, input_np, orig_layout, input_layout, dtype):
quant_arr, scale, zero_point = quantize_np(input_np, dtype)
return [transform_numpy(quant_arr, orig_layout, input_layout), scale, zero_point]
@tvm.testing.fixture
def expected_output_np(self, input_np, dtype):
quant_np, scale, zero_point = quantize_np(input_np, dtype)
ref_np = (scale * (quant_np.astype("int32") - zero_point)).astype("float32")
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, orig_layout, output_layout):
return transform_numpy(expected_output_np, orig_layout, output_layout)
@tvm.testing.requires_hexagon
def test_dequant_qnn(
self,
input_shape,
dtype,
input_layout,
output_layout,
transformed_input_np,
transformed_expected_output_np,
axis_sep,
hexagon_session,
working_scope,
):
"""
Top level testing function for dequantize
"""
dequant_input = te.placeholder(input_shape, name="A", dtype=dtype)
in_data_np, in_scale, in_zero_pt = transformed_input_np
dequant_output = qnn.dequantize_compute(dequant_input, in_scale, in_zero_pt)
tir_s = qnn.dequantize_schedule(dequant_input, dequant_output, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=in_data_np,
axis_separators=axis_sep,
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=axis_sep,
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
tir_irm = tvm.lower(tir_s.mod, [dequant_input, dequant_output], name="dequantize")
runtime_module = tvm.build(tir_irm, target=get_hexagon_target("v69"), name="dequantize")
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_expected_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_max_pool2d_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from typing import *
from tvm import te
import tvm.testing
from tvm.contrib.hexagon.session import Session
import tvm.topi.hexagon.slice_ops as sl
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
from ...pytest_util import (
get_multitest_ids,
create_populated_numpy_ndarray,
TensorContentRandom,
)
@tvm.testing.fixture
def input_np(input_shape, dtype: str, input_tensor_populator):
return create_populated_numpy_ndarray(input_shape, dtype, input_tensor_populator)
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, output_layout):
return transform_numpy(expected_output_np, "nhwc", output_layout)
@tvm.testing.fixture
def transformed_input_np_padded(input_np_padded, input_layout):
return transform_numpy(input_np_padded, "nhwc", input_layout)
(input_layout, dtype) = tvm.testing.parameters(
("nhwc-8h2w32c2w-2d", "float16"),
("nhwc-8h8w32c-2d", "uint8"),
)
@tvm.testing.fixture
def output_layout(output_shape, dtype):
o_b, o_h, o_w, o_c = output_shape
if dtype == "float16":
if o_h == 1 and o_w == 1:
return "n11c-1024c-2d"
else:
assert o_h % 8 == 0 and o_w % 4 == 0, "Invalid output shape"
return "nhwc-8h2w32c2w-2d"
elif dtype == "int8" or "uint8":
if o_h == 1 and o_w == 1:
return "n11c-2048c-2d"
else:
assert o_h % 8 == 0 and o_w % 8 == 0, "Invalid output shape"
return "nhwc-8h8w32c-2d"
else:
raise RuntimeError(f"Unsupported data type '{dtype}'")
class TestmaxPool2dSlice:
_param_descs = [
"out_shape", # output_shape
"kernel", # kernel
"stride", # stride
"dil", # dilation
"pad", # padding
"ceil", # ceil_mode
"cnt_padded", # count_include_pad
None, # input_tensor_populator
]
_multitest_params = [
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 16, 16, 32],
[3, 3],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[8, 8],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
# Test non-one stride and dilation
(
[1, 8, 8, 32],
[3, 3],
[2, 3],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[2, 2],
[2, 2],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[2, 2],
[2, 3],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
# Test non-zero padding
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[1, 1, 1, 1],
False,
True,
TensorContentRandom(),
),
(
[1, 8, 8, 32],
[3, 3],
[1, 1],
[1, 1],
[1, 2, 3, 4],
False,
True,
TensorContentRandom(),
),
# Test n11c-1024c-2d layout which will require input and output to have different layout
(
[1, 1, 1, 2048],
[8, 8],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[6, 6],
[1, 1],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[3, 3],
[2, 2],
[1, 1],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
[4, 4],
[2, 2],
[2, 3],
[0, 0, 0, 0],
False,
True,
TensorContentRandom(),
),
]
_param_ids = get_multitest_ids(_multitest_params, _param_descs)
# NOTE: input_layout is always assumed to be "nhwc-8h2w32c2w-2d" for float16
# and "nhwc-8h8w32c-2d" for uint8
(
output_shape,
kernel,
stride,
dilation,
padding,
ceil_mode,
count_include_pad,
input_tensor_populator,
) = tvm.testing.parameters(*_multitest_params, ids=_param_ids)
@tvm.testing.fixture
def expected_output_np(
self,
input_np,
kernel,
stride,
dilation,
padding,
ceil_mode,
count_include_pad,
):
pad_before = padding[:2]
pad_after = padding[2:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
pad_before,
pad_after,
"max", # pool_type
count_include_pad,
False, # ceil_mode,
layout="NHWC",
)
return ref_np
@tvm.testing.fixture
def input_shape(self, output_shape, kernel, padding, stride, dilation, output_layout):
# Input shape without any padding; 'ceil' is being ignored from calculation:
o_b, o_h, o_w, o_c = output_shape
d_h, d_w = dilation
s_h, s_w = stride
k_h, k_w = kernel
pad_before_h, pad_before_w = padding[:2]
pad_after_h, pad_after_w = padding[2:]
if output_layout == "n11c-1024c-2d":
assert (
pad_before_w == 0 and pad_after_w == 0 and pad_before_h == 0 and pad_after_h == 0
), "Padding must be zero for n11c-1024c-2d layout"
assert o_h == 1 and o_w == 1, "Output height and width must be 1"
in_h = (o_h - 1) * s_h + d_h * (k_h - 1) + 1 - pad_before_h - pad_after_h
in_w = (o_w - 1) * s_w + d_w * (k_w - 1) + 1 - pad_before_w - pad_after_w
return [o_b, in_h, in_w, o_c]
@tvm.testing.fixture
def input_shape_padded(self, dtype, input_shape, padding, output_layout):
# Input shape is adjusted to account for 'padding'. Also, due to the physical
# layout of the buffer, height and width are adjusted so that they are a
# multiple of 8 and 4 respectively.
# NOTE: For float16, the input layout is always assumed to be nhwc-8h2w32c2w-2d and
# for int8/uint8, it's nhwc-8h8w32c-2d.
# For both nhwc-8h2w32c2w-2d and nhwc-8h8w32c-2d, the height should be a multiple
# of 8. However, the width should be a multiple of 4 for the first case and 8 for
# the second case.
height_mult = 8
if dtype == "float16":
width_mult = 4 # input layout : nhwc-8h2w32c2w-2d
elif dtype in ("uint8", "int8"):
width_mult = 8 # input layout : nhwc-8h8w32c-2d
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
pad_before_h, pad_before_w = padding[:2]
pad_after_h, pad_after_w = padding[2:]
padded_input_height = (
(input_shape[1] + pad_before_h + pad_after_h + height_mult - 1) // height_mult
) * height_mult
padded_input_width = (
(input_shape[2] + pad_before_w + pad_after_w + width_mult - 1) // width_mult
) * width_mult
return [input_shape[0], padded_input_height, padded_input_width, input_shape[3]]
@tvm.testing.fixture
def input_np_padded(self, input_np, input_shape, input_shape_padded, padding):
pad_before_h, pad_before_w = padding[:2]
pad_after_h = input_shape_padded[1] - input_shape[1] - pad_before_h
pad_after_w = input_shape_padded[2] - input_shape[2] - pad_before_w
input_padded = np.pad(
input_np,
((0, 0), (pad_before_h, pad_after_h), (pad_before_w, pad_after_w), (0, 0)),
"constant",
)
return input_padded
@tvm.testing.requires_hexagon
def test_max_pool2d_slice(
self,
stride,
kernel,
dtype,
dilation,
padding,
ceil_mode, # only needed for manually obtaining the test id string
input_tensor_populator, # only needed for manually obtaining the test id string
count_include_pad,
input_layout,
output_layout,
output_shape,
input_shape,
input_shape_padded,
input_np,
input_np_padded,
transformed_input_np_padded,
transformed_expected_output_np,
expected_output_np,
hexagon_session: Session,
):
A = te.placeholder(input_shape_padded, name="A", dtype=dtype)
M = sl.max_pool2d_compute(A, output_shape, kernel, stride, dilation)
# tir schedule
tir_schedule = sl.max_pool2d_STIR_schedule(M, A, output_layout, input_layout)
sch = tir_schedule.mod
input_axis_separator = [4]
if output_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
"n11c-1024c-2d",
"n11c-2048c-2d",
):
output_axis_separator = [4]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[A, M],
get_hexagon_target("v69"),
name="max_pool2d",
)
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np_padded,
axis_separators=input_axis_separator,
mem_scope="global.vtcm",
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
transformed_expected_output_np.shape,
dtype,
axis_separators=output_axis_separator,
mem_scope="global.vtcm",
)
mod = hexagon_session.load_module(func)
mod(input_arr, output_arr)
b, h, w, c = output_shape
if output_layout == "nhwc-8h2w32c2w-2d":
output_np = output_arr.numpy().reshape([b, h // 8, w // 4, c // 32, 8, 2, 32, 2])
elif output_layout == "nhwc-8h8w32c-2d":
output_np = output_arr.numpy().reshape([b, h // 8, w // 8, c // 32, 8, 8, 32])
elif output_layout == "n11c-2048c-2d":
output_np = output_arr.numpy().reshape([b, 1, 1, c // 2048, 2048])
elif output_layout == "n11c-1024c-2d":
output_np = output_arr.numpy().reshape([b, 1, 1, c // 1024, 1024])
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
if dtype == "float16":
np.testing.assert_allclose(
output_np, transformed_expected_output_np, rtol=1e-3, atol=1e-3
)
elif dtype == "uint8":
np.testing.assert_allclose(output_np, transformed_expected_output_np, atol=1)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_relu_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm.topi.hexagon.slice_ops.relu import relu_compute, relu_stir_schedule
from tvm import te
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
@tvm.testing.fixture
def input_np(in_shape, dtype):
return np.random.uniform(size=in_shape).astype(dtype)
@tvm.testing.fixture
def ref_output_np(input_np):
output_np = input_np * (input_np > 0)
return output_np
@tvm.testing.fixture
def transformed_input_np(input_np, input_layout):
return transform_numpy(input_np, "nhwc", input_layout)
@tvm.testing.fixture
def transformed_ref_output_np(ref_output_np, output_layout):
return transform_numpy(ref_output_np, "nhwc", output_layout)
class BaseRelu:
in_shape = tvm.testing.parameter(
(1, 8, 4, 32),
(1, 16, 4, 32),
(1, 16, 8, 32),
(1, 16, 8, 64),
(2, 8, 4, 32),
(2, 16, 4, 32),
(2, 16, 8, 32),
(2, 16, 8, 64),
)
dtype = tvm.testing.parameter("float16")
working_scope = tvm.testing.parameter("global.vtcm")
input_layout = tvm.testing.parameter("nhwc-8h2w32c2w-2d")
output_layout = tvm.testing.parameter("nhwc-8h2w32c2w-2d")
class TestReluSlice(BaseRelu):
@tvm.testing.requires_hexagon
def test_relu(
self,
in_shape,
dtype,
input_layout,
output_layout,
transformed_input_np,
transformed_ref_output_np,
working_scope,
hexagon_session,
):
InputTensor = te.placeholder(in_shape, name="InputTensor", dtype=dtype)
OutputTensor = relu_compute(InputTensor)
tir_s = relu_stir_schedule(InputTensor, OutputTensor, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=[4],
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_ref_output_np.shape,
dtype=transformed_ref_output_np.dtype,
axis_separators=[4],
mem_scope=working_scope,
)
func_name = "relu"
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(tir_s.mod, target=get_hexagon_target("v69"), name=func_name)
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_ref_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_softmax_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm.topi.testing import softmax_python
import tvm.topi.hexagon.slice_ops as sl
from tvm.contrib.hexagon import allocate_hexagon_array
def transform_numpy(arr_np, layout):
if layout in ["nc-512c-2d"]:
N, C = arr_np.shape
return arr_np.reshape([N, C // 512, 512])
raise RuntimeError(f"Unexpected layout '{layout}'")
@tvm.testing.fixture
def input_np(input_shape, dtype):
return (np.random.uniform(size=input_shape)).astype(dtype)
@tvm.testing.fixture
def transformed_expected_output_np(expected_output_np, output_layout):
return transform_numpy(expected_output_np, output_layout)
@tvm.testing.fixture
def transformed_input_np(input_np, input_layout):
return transform_numpy(input_np, input_layout)
class Basesoftmax2d:
input_shape, input_layout, output_layout, axis_sep = tvm.testing.parameters(
((1, 1024), "nc-512c-2d", "nc-512c-2d", [2])
)
dtype = tvm.testing.parameter("float32")
working_scope = tvm.testing.parameter("global.vtcm")
class TestSoftmax2d(Basesoftmax2d):
@tvm.testing.fixture
def expected_output_np(self, input_np):
if len(input_np.shape) == 2:
ref_np_2d = softmax_python(input_np)
return ref_np_2d
raise RuntimeError(f"Unexpected input shape '{input_np.shape}'")
@tvm.testing.requires_hexagon
def test_softmax_f32(
self,
dtype,
input_layout,
output_layout,
input_shape,
input_np,
transformed_input_np,
transformed_expected_output_np,
expected_output_np,
working_scope,
axis_sep,
hexagon_session,
):
target_hexagon = tvm.target.hexagon(
"v69",
llvm_options="--disable-loop-unrolling-pass",
)
A = te.placeholder(input_shape, name="A", dtype=dtype)
O = sl.softmax_compute(A)
if input_layout == "nc-512c-2d":
tir_s = sl.softmax_stir_schedule(O, A, output_layout, input_layout)
sch = tir_s.mod
else:
raise RuntimeError(f"Unexpected input layout '{input_layout}'")
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.LoopPartition": {"partition_const_loop": True},
},
):
func = tvm.build(
sch,
[A, O],
tvm.target.Target(target_hexagon, host=target_hexagon),
name="softmax_slice",
)
input_arr = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=axis_sep,
mem_scope=working_scope,
)
output_arr = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=axis_sep,
mem_scope=working_scope,
)
mod = hexagon_session.load_module(func)
mod(input_arr, output_arr)
n, c = input_np.shape
output_np = output_arr.numpy().reshape(1, c // 512, 512)
np.testing.assert_allclose(output_np, transformed_expected_output_np, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/slice_op/test_tanh_slice.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test for Hexagon slice tanh op """
import numpy as np
import tvm
import tvm.testing
from tvm import te
import tvm.topi.hexagon.slice_ops as sl
import tvm.contrib.hexagon
from tvm.contrib.hexagon import allocate_hexagon_array
from ...infrastructure import transform_numpy, get_hexagon_target
# pylint: disable=invalid-name
class TestTanhSlice:
"""For Testing Tanh fp16 op"""
input_shape, orig_layout, input_layout, output_layout, axis_sep = tvm.testing.parameters(
((1, 8, 4, 32), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 16, 12, 64), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
((1, 64, 64, 32), "nhwc", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", [4]),
)
dtype = tvm.testing.parameter("float16")
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
return np.random.uniform(size=input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, orig_layout, input_layout):
return transform_numpy(input_np, orig_layout, input_layout)
@tvm.testing.fixture
def expected_output_np(self, input_np):
ref_np = np.tanh(input_np)
return ref_np
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, orig_layout, output_layout):
return transform_numpy(expected_output_np, orig_layout, output_layout)
@tvm.testing.requires_hexagon
def test_tanh(
self,
input_shape,
dtype,
input_layout,
output_layout,
transformed_input_np,
transformed_expected_output_np,
axis_sep,
hexagon_session,
working_scope,
):
"""Top Level testing function for tanh fp16 op"""
A = te.placeholder(input_shape, name="A", dtype=dtype)
M = sl.tanh_te_compute(A)
tanhf16_func = te.create_prim_func([A, M])
tir_s = sl.tanhf16_schedule(tanhf16_func, input_layout, output_layout)
A_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=axis_sep,
mem_scope=working_scope,
)
M_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=transformed_expected_output_np.dtype,
axis_separators=axis_sep,
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
tir_irm = tvm.lower(tir_s.mod, [A, M], name="tanhf16")
runtime_module = tvm.build(tir_irm, target=get_hexagon_target("v69"), name="tanhf16")
mod = hexagon_session.load_module(runtime_module)
mod(A_data, M_data)
output_np = M_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_expected_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for Add, Subtract and Multiply."""
import numpy as np
import tvm
from tvm import te
import tvm.topi.hexagon.slice_ops as sl
import tvm.topi.hexagon.qnn as qn
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import (
transform_numpy,
quantize_np,
get_hexagon_target,
)
ZERO_POINT_A_VAL = None
SCALE_A_VAL = None
ZERO_POINT_B_VAL = None
SCALE_B_VAL = None
ZERO_POINT_M_VAL = None
SCALE_M_VAL = None
def hexagon_wrapper_allocation(
device,
layout,
axis_separators,
tensor_shape=None,
data_original=None,
transformed_data=None,
dtype=None,
):
"""Input layout can either be nhwc-8h2w32c2w-2d or nhwc"""
if layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d"]:
data_nd = allocate_hexagon_array(
device,
tensor_shape=tensor_shape,
data=transformed_data,
dtype=dtype,
axis_separators=axis_separators,
mem_scope="global.vtcm",
)
elif layout == "nhwc":
data_nd = allocate_hexagon_array(
device,
data=data_original,
)
return data_nd
class TestAddSubtractMultiplyBroadcast2d:
"""Test Add, Subtract and Multiply class."""
(
input_shape_a,
input_shape_b,
input_a_layout,
input_b_layout,
output_layout,
dtype,
) = tvm.testing.parameters(
# no broadcast needed - short input
(
[1, 8, 4, 32],
[1, 8, 4, 32],
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"float16",
),
# no broadcast needed - large input
(
[1, 56, 64, 128],
[1, 56, 64, 128],
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"nhwc-8h2w32c2w-2d",
"float16",
),
# one input needs broadcast
(
[1, 56, 64, 128],
[1, 1, 64, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# Both input needs broadcast
(
[1, 56, 1, 128],
[1, 1, 64, 1],
"nhwc",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# One axis in one input needs broadcast
(
[1, 56, 20, 128],
[1, 56, 20, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
# broadcast all axes in one input
(
[1, 48, 56, 32],
[1, 1, 1, 1],
"nhwc-8h2w32c2w-2d",
"nhwc",
"nhwc-8h2w32c2w-2d",
"float16",
),
(
[1, 48, 32, 64],
[1, 48, 32, 64],
"nhwc-8h8w32c-2d",
"nhwc-8h8w32c-2d",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 2 in one input
(
[1, 48, 32, 64],
[1, 48, 1, 64],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 1 in one input
(
[1, 48, 32, 64],
[1, 1, 32, 64],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast axis 3 in one input
(
[1, 8, 8, 32],
[1, 8, 8, 1],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 56, 1, 128],
[1, 1, 64, 1],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 48, 1, 1],
[1, 1, 32, 32],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast both inputs
(
[1, 48, 1, 32],
[1, 1, 32, 1],
"nhwc",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
# broadcast all axes in one input
(
[1, 48, 56, 32],
[1, 1, 1, 1],
"nhwc-8h8w32c-2d",
"nhwc",
"nhwc-8h8w32c-2d",
"uint8",
),
)
op_name = tvm.testing.parameter("add", "subtract", "multiply")
@tvm.testing.fixture
def expected_output_np(self, input_np_a, input_np_b, op_name):
"""Generate expected output."""
if op_name == "add":
out_ref = np.add(input_np_a, input_np_b)
elif op_name == "subtract":
out_ref = np.subtract(input_np_a, input_np_b)
elif op_name == "multiply":
out_ref = np.multiply(input_np_a, input_np_b)
return out_ref
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, output_layout, dtype):
"""Generate expected output."""
if dtype == "float16":
return transform_numpy(expected_output_np, "nhwc", output_layout)
if dtype in ["uint8", "int8"]:
global ZERO_POINT_M_VAL, SCALE_M_VAL
out_ref_quantized, SCALE_M_VAL, ZERO_POINT_M_VAL = quantize_np(
expected_output_np, dtype
)
return transform_numpy(out_ref_quantized, "nhwc", output_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def input_np_a(self, input_shape_a, dtype):
"""Generate numpy input for variable a."""
if dtype in ["uint8", "int8"]:
dtype = "float32"
return np.random.random(input_shape_a).astype(dtype)
@tvm.testing.fixture
def input_np_b(self, input_shape_b, dtype):
"""Generate numpy input for variable b."""
if dtype in ["uint8", "int8"]:
dtype = "float32"
return np.random.random(input_shape_b).astype(dtype)
@tvm.testing.fixture
def quantize_input_np_a(self, input_np_a, dtype):
if dtype in ["uint8", "int8"]:
global ZERO_POINT_A_VAL, SCALE_A_VAL
input_np_a_quantized, SCALE_A_VAL, ZERO_POINT_A_VAL = quantize_np(input_np_a, dtype)
return input_np_a_quantized
return None
@tvm.testing.fixture
def quantize_input_np_b(self, input_np_b, dtype):
if dtype in ["uint8", "int8"]:
global ZERO_POINT_B_VAL, SCALE_B_VAL
input_np_b_quantized, SCALE_B_VAL, ZERO_POINT_B_VAL = quantize_np(input_np_b, dtype)
return input_np_b_quantized
return None
@tvm.testing.fixture
def transformed_input_np_a(self, input_np_a, quantize_input_np_a, input_a_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_a, "nhwc", input_a_layout)
if dtype in ["uint8", "int8"]:
return transform_numpy(quantize_input_np_a, "nhwc", input_a_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def transformed_input_np_b(self, input_np_b, quantize_input_np_b, input_b_layout, dtype):
if dtype == "float16":
return transform_numpy(input_np_b, "nhwc", input_b_layout)
if dtype in ["uint8", "int8"]:
return transform_numpy(quantize_input_np_b, "nhwc", input_b_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.requires_hexagon
def test_transform(
self,
dtype,
input_shape_a,
input_shape_b,
input_np_a,
input_np_b,
quantize_input_np_a,
quantize_input_np_b,
transformed_input_np_a,
transformed_input_np_b,
expected_output_np,
transformed_expected_output_np,
hexagon_session,
output_layout,
input_a_layout,
input_b_layout,
op_name,
):
"""Test transform."""
output_shape = expected_output_np.shape
a_tensor = te.placeholder(input_shape_a, name="a_tensor", dtype=dtype)
b_tensor = te.placeholder(input_shape_b, name="b_tensor", dtype=dtype)
if dtype == "float16":
if op_name == "add":
m_tensor = sl.add_broadcast_compute(a_tensor, b_tensor)
elif op_name == "subtract":
m_tensor = sl.subtract_broadcast_compute(a_tensor, b_tensor)
elif op_name == "multiply":
m_tensor = sl.multiply_broadcast_compute(a_tensor, b_tensor)
tir_schedule = sl.tir_broadcast_schedule(
m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout, op_name
)
elif dtype in ["uint8", "int8"]:
args = [
a_tensor,
b_tensor,
output_shape,
ZERO_POINT_A_VAL,
SCALE_A_VAL,
ZERO_POINT_B_VAL,
SCALE_B_VAL,
ZERO_POINT_M_VAL,
SCALE_M_VAL,
dtype,
]
if op_name == "add":
m_tensor = qn.qadd_broadcast_compute(*args)
elif op_name == "subtract":
m_tensor = qn.qsubtract_broadcast_compute(*args)
elif op_name == "multiply":
m_tensor = qn.qmultiply_broadcast_compute(*args)
tir_schedule = qn.tir_schedule_quant(
m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout
)
sch = tir_schedule.mod
input_axis_separator = [4]
if output_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
):
output_axis_separator = [4]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[a_tensor, b_tensor, m_tensor],
get_hexagon_target("v69"),
name="slice_op_with_transform",
)
if dtype == "float16":
in_data_np_a = input_np_a
in_data_np_b = input_np_b
elif dtype in ["int8", "uint8"]:
in_data_np_a = quantize_input_np_a
in_data_np_b = quantize_input_np_b
else:
raise RuntimeError(f"Unsupport dtype '{dtype}'")
a_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=input_a_layout,
data_original=in_data_np_a,
transformed_data=transformed_input_np_a,
axis_separators=input_axis_separator,
)
b_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=input_b_layout,
data_original=in_data_np_b,
transformed_data=transformed_input_np_b,
axis_separators=input_axis_separator,
)
m_data_nd = hexagon_wrapper_allocation(
hexagon_session.device,
layout=output_layout,
tensor_shape=transformed_expected_output_np.shape,
axis_separators=output_axis_separator,
dtype=dtype,
)
mod = hexagon_session.load_module(func)
mod(a_data_nd, b_data_nd, m_data_nd)
batch, height, width, channel = output_shape
# convert nd to np and reshape to fixed chunk size layout
if output_layout == "nhwc-8h2w32c2w-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch, height // 8, width // 4, channel // 32, 8, 2, 32, 2]
)
elif output_layout == "nhwc-8h8w32c-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch, height // 8, width // 8, channel // 32, 8, 8, 32]
)
if dtype == "float16":
np.testing.assert_allclose(
transformed_expected_output_np, m_data_np, rtol=1e-3, atol=1e-3
)
elif dtype in ["int8", "uint8"]:
np.testing.assert_allclose(transformed_expected_output_np, m_data_np, rtol=1, atol=1)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for matmul"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
class TestMatMulFloat:
"""Test MatMul Float class."""
x_batch, y_batch, m_size, n_size, k_size = tvm.testing.parameters(
(1, 1, 16, 16, 32),
(5, 5, 16, 16, 32),
(5, 5, 16, 20, 32),
(30, 30, 16, 20, 32),
# Test batch broadcasting.
(1, 5, 16, 16, 32),
(5, 1, 16, 16, 32),
)
dtype = tvm.testing.parameter(
"float32",
"float16",
)
# TODO(mehrdadh): add dynamic testing
@tvm.testing.requires_hexagon
def test_batch_matmul(
self, hexagon_session: Session, x_batch, y_batch, m_size, n_size, k_size, dtype
):
"""Test batch MatMul."""
if dtype == "float16":
pytest.xfail("float16 is not supported.")
x = te.placeholder((x_batch, m_size, k_size), name="x")
y = te.placeholder((y_batch, n_size, k_size), name="y")
def get_ref_data():
a_np = np.random.uniform(size=(x_batch, m_size, k_size)).astype(dtype)
b_np = np.random.uniform(size=(y_batch, n_size, k_size)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
with tvm.target.Target(get_hexagon_target("v68")):
fcompute = topi.nn.batch_matmul
fschedule = topi.hexagon.schedule_batch_matmul
out = fcompute(x, y)
s = fschedule([out])
out_shape = out.shape
func = tvm.build(
s,
[x, y, out],
get_hexagon_target("v68"),
name="batch_matmul",
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out_shape), dtype=dtype), dev)
mod["batch_matmul"](a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
class TestMatMulInt8:
"""Test MatMul INT8 class."""
x_batch, y_batch, m_size, n_size, k_size = tvm.testing.parameters(
(1, 1, 2, 3, 1),
(1, 1, 16, 24, 32),
(5, 5, 24, 16, 32),
(30, 30, 16, 20, 32),
(1, 5, 16, 16, 32),
(5, 1, 16, 16, 32),
)
dtype = tvm.testing.parameter(
"float32",
"float16",
)
@tvm.testing.requires_hexagon
def test_batch_matmul_int8(
self, hexagon_session: Session, x_batch, y_batch, m_size, n_size, k_size
):
"""Test batch matmul INT8."""
dtype = "int8"
out_dtype = "int8"
assert x_batch == y_batch or x_batch == 1 or y_batch == 1
x = te.placeholder((x_batch, m_size, k_size), name="x", dtype=dtype)
y = te.placeholder((y_batch, n_size, k_size), name="y", dtype=dtype)
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=(x_batch, m_size, k_size)).astype(
dtype
)
b_np = np.random.randint(low=-128, high=127, size=(y_batch, n_size, k_size)).astype(
dtype
)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype=out_dtype)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
with tvm.target.Target(get_hexagon_target("v68")):
fcompute = topi.nn.batch_matmul
fschedule = topi.hexagon.schedule_batch_matmul
out = fcompute(x, y)
s = fschedule([out])
func = tvm.build(
s,
[x, y, out],
get_hexagon_target("v68"),
name="batch_matmul_int8",
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev)
mod["batch_matmul_int8"](a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test conv2d HVX intrinsic implementation"""
import numpy as np
import tvm
import tvm.contrib.hexagon
from tvm.topi.testing import conv2d_nhwc_python
from ..infrastructure import get_hexagon_target
def build_conv2d(target):
"""Build and the return the conv2d module that calls the intrinsic implementation"""
act_n, act_h, act_w, act_c = (
tvm.te.var("act_n"),
tvm.te.var("act_h"),
tvm.te.var("act_w"),
tvm.te.var("act_c"),
)
filt_h, filt_w, filt_o = tvm.te.var("filt_h"), tvm.te.var("fw"), tvm.te.var("filt_o")
off_l, off_t = tvm.te.var("off_l"), tvm.te.var("off_t")
stride_h, stride_w = tvm.te.var("stride_h"), tvm.te.var("stride_w")
act_flat = tvm.te.placeholder(
shape=(act_n, act_h, act_w, act_c), dtype="float16", name="act_flat"
)
wgt_flat = tvm.te.placeholder(
shape=(filt_h, filt_w, act_c, filt_o), dtype="float16", name="wgt_flat"
)
out_flat = tvm.te.extern(
shape=(act_n, (act_h - filt_h) // stride_h + 1, (act_w - filt_w) // stride_w + 1, filt_o),
inputs=[act_flat, wgt_flat],
fcompute=lambda ins, outs: tvm.tir.call_cpacked(
"conv2d_packed_fp16", # Function from TVM runtime
ins[0],
ins[1],
off_t,
off_l,
stride_h,
stride_w,
outs[0],
tvm.runtime.const(0), # resource_handle (unused)
),
dtype="float16",
)
s = tvm.te.create_schedule(out_flat.op)
func_name = "extern_conv"
with tvm.transform.PassContext(opt_level=3):
module = tvm.build(
s,
[act_flat, wgt_flat, off_t, off_l, stride_h, stride_w, out_flat],
target=target,
name=func_name,
)
return module
def gen_config(params):
"""Utility function to generate useful ids for shape_parameters"""
dims = lambda vals: "x".join(map(str, vals))
config = {}
for param in params:
act_shape, wgt_shape, inp_stride = param
name = f"nhwc{dims(act_shape)}-hwio{dims(wgt_shape)}-stride{dims(inp_stride)}"
config[name] = param
return config
class TestConv2dIntrin:
"""Test Conv2d Intrin class"""
shape_parameters = [
(
(1, 8, 4, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 10, 14, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 14, 6, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 14, 6, 3),
(3, 3, 3, 64),
(1, 1),
),
(
(1, 14, 6, 3),
(5, 5, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(2, 2, 3, 3),
(1, 1),
),
(
(1, 14, 6, 64),
(3, 3, 64, 3),
(1, 1),
),
(
(1, 4, 4, 40),
(3, 3, 40, 3),
(1, 1),
),
(
(1, 4, 4, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 5, 5, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 6, 6, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 7, 7, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(5, 5, 3, 3),
(1, 1),
),
(
(1, 8, 8, 64),
(2, 2, 64, 64),
(1, 1),
),
(
(1, 8, 4, 3),
(3, 3, 3, 3),
(2, 2),
),
(
(1, 14, 6, 3),
(3, 3, 3, 64),
(2, 2),
),
(
(1, 14, 6, 3),
(5, 5, 3, 3),
(2, 2),
),
(
(1, 8, 8, 3),
(2, 2, 3, 3),
(2, 2),
),
]
config = gen_config(shape_parameters)
act_shape, wgt_shape, inp_stride = tvm.testing.parameters(*config.values(), ids=config.keys())
inp_offset = tvm.testing.parameter((0, 0), ids=["offset0x0"])
@tvm.testing.requires_hexagon
def test_conv2d(self, act_shape, wgt_shape, inp_stride, inp_offset, hexagon_session):
"""Test conv2d intrinsic implementation"""
assert act_shape[3] == wgt_shape[2]
# Currently, input offset does not affect the output shape
def get_out_shape(ash, wsh, inp_stride):
assert ash[3] == wsh[2]
osh = (
ash[0],
(ash[1] - wsh[0]) // inp_stride[0] + 1,
(ash[2] - wsh[1]) // inp_stride[1] + 1,
wsh[3],
)
assert tvm.tir.all([x > 0 for x in osh])
return osh
act = np.random.rand(*act_shape).astype("float16")
wgt = np.random.rand(*wgt_shape).astype("float16")
module = build_conv2d(get_hexagon_target("v68"))
mod = hexagon_session.load_module(module)
output = tvm.nd.array(
np.zeros(get_out_shape(act_shape, wgt_shape, inp_stride), dtype="float16"),
device=hexagon_session.device,
)
mod(
tvm.nd.array(act, device=hexagon_session.device),
tvm.nd.array(wgt, device=hexagon_session.device),
inp_offset[0], # off_t
inp_offset[1], # off_l
inp_stride[0], # stride_height
inp_stride[1], # stride_width
output,
)
out = output.numpy()
# Generate reference output and compare:
ref_out = conv2d_nhwc_python(
act.astype("float32"), wgt.astype("float32"), stride=inp_stride, padding="VALID"
).astype("float16")
tvm.testing.assert_allclose(out, ref_out, rtol=5e-2, atol=5e-2)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for convolution."""
import numpy as np
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from ..infrastructure import get_hexagon_target
class BaseConv2DTests:
"""Conv2D test class."""
add_bias = tvm.testing.parameter(False)
apply_relu = tvm.testing.parameter(False)
dilation = tvm.testing.parameter(1)
batch = tvm.testing.parameter(1)
dtype = tvm.testing.parameter("float32")
random_seed = tvm.testing.parameter(0)
@tvm.testing.fixture
def input_shape(self, batch, in_channel, in_size):
return (batch, in_channel, in_size, in_size)
@tvm.testing.fixture
def weight_shape(self, num_filter, in_channel, kernel):
return (num_filter, in_channel, kernel, kernel)
@tvm.testing.fixture
def bias_shape(self, num_filter):
return (num_filter, 1, 1)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self,
random_seed,
input_shape,
weight_shape,
bias_shape,
dtype,
stride,
padding,
dilation,
add_bias,
apply_relu,
):
"""Generate reference data."""
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if dtype == "float16" else dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
w_np = np.random.uniform(size=weight_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding
).astype(dtype)
if add_bias:
c_np = c_np + b_np
if apply_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
@tvm.testing.requires_hexagon
def test_conv2d_nchw(
self,
hexagon_session: Session,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dtype,
ref_data,
dilation,
add_bias,
apply_relu,
):
"""Test Conv2d NCHW."""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
a_np, w_np, b_np, c_np = ref_data
a_tensor = te.placeholder(a_np.shape, name="a_tensor", dtype=dtype)
w_tensor = te.placeholder(w_np.shape, name="w_tensor", dtype=dtype)
bias = te.placeholder(b_np.shape, name="bias", dtype=dtype)
if "int" in dtype:
tol = {"atol": 0, "rtol": 0}
elif dtype == "float32":
tol = {"rtol": 1e-4, "atol": 2e-4}
elif dtype == "float16":
# a_tensor summation in float16 with a single accumulator very
# quickly runs into large rounding errors. At some point,
# this tolerance should be schedule-dependent for to avoid
# false negatives.
num_values_summed = in_channel * kernel * kernel
gap_size = np.nextafter(c_np.max(), np.inf, dtype=c_np.dtype) - c_np.max()
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
with tvm.target.Target(get_hexagon_target("v68")):
fcompute = topi.nn.conv2d_nchw
fschedule = topi.hexagon.schedule_conv2d_nchw
c_tensor = fcompute(
a_tensor, w_tensor, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
c_tensor = topi.add(c_tensor, bias)
if apply_relu:
c_tensor = topi.nn.relu(c_tensor)
s = fschedule([c_tensor])
func_name = "conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding_sum,
dilation,
)
func = tvm.build(
s,
[a_tensor, w_tensor, bias, c_tensor],
get_hexagon_target("v68"),
name=func_name,
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a_data = tvm.nd.array(a_np, dev)
weight = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev)
mod[func_name](a_data, weight, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, **tol)
class TestBatchSize(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(32, 28, 32, 3, 1, 1),
)
batch = tvm.testing.parameter(1, 4, 9)
class TestBiasRelu(BaseConv2DTests):
apply_relu = tvm.testing.parameter(True, False, ids=["relu", "no_relu"])
add_bias = tvm.testing.parameter(True, False, ids=["bias", "no_bias"])
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(32, 8, 32, 24, 1, "SAME"),
)
class TestResNet18Workloads(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 224, 64, 7, 2, 3),
(64, 56, 64, 3, 1, 1),
(64, 56, 64, 1, 1, 0),
(64, 56, 32, 3, 2, 1),
(64, 56, 32, 1, 2, 0),
(64, 28, 32, 3, 1, 1),
)
class TestMobilenet(BaseConv2DTests):
batch, in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(1, 32, 112, 32, 3, 1, 1),
)
class TestWeirdWorkloads(BaseConv2DTests):
batch, in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(2, 2, 2, 2, 2, 2, 2),
(3, 3, 3, 3, 3, 3, 3),
(4, 4, 4, 4, 4, 4, 4),
(5, 5, 5, 5, 5, 5, 5),
(6, 6, 6, 6, 6, 6, 6),
(1, 1, 1, 1, 1, 1, 1),
(2, 13, 71, 59, 3, 1, 1),
)
class TestAsymmetricPadding(BaseConv2DTests):
dilation = tvm.testing.parameter(1, 2)
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 35, 64, 7, 2, (0, 0, 1, 1)),
(64, 8, 128, 3, 1, (3, 3, 2, 2)),
(64, 8, 64, 1, 1, (1, 2, 2, 1)),
(64, 17, 48, 1, 1, (1, 2)),
(64, 8, 64, 3, 1, (3, 1)),
(128, 8, 96, 3, 1, (0, 2)),
(64, 35, 64, 3, 1, (1, 2)),
(64, 8, 64, 1, 1, "VALID"),
(388, 8, 64, 3, 1, "VALID"),
(64, 10, 48, 3, 1, "VALID"),
(64, 19, 64, 1, 1, "SAME"),
(64, 5, 32, 2, 1, "SAME"),
(32, 8, 32, 3, 1, "SAME"),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(32, 8, 32, 24, 1, "SAME"),
(32, 35, 64, 7, 2, (0, 0, 2, 2)),
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for convolution."""
import numpy as np
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
class BaseConv2DTests:
"""Test Conv2D base class."""
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self, dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation
):
"""Generate reference data."""
in_height = in_width = in_size
a_shape = (batch, in_height, in_width, in_channel)
w_shape = (kernel, kernel, in_channel, num_filter)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
@tvm.testing.requires_hexagon
def test_conv2d_nhwc(
self,
hexagon_session: Session,
ref_data,
batch,
in_channel,
in_size,
num_filter,
kernel,
dtype,
stride,
padding,
dilation,
):
"""Test Conv2D NHWC."""
a_np, w_np, b_np = ref_data
a_tensor = te.placeholder(a_np.shape, name="a_tensor", dtype=dtype)
w_tensor = te.placeholder(w_np.shape, name="w_tensor", dtype=dtype)
with tvm.target.Target(get_hexagon_target("v68")):
fcompute = topi.nn.conv2d_nhwc
fschedule = topi.hexagon.schedule_conv2d_nhwc
b_tensor = fcompute(a_tensor, w_tensor, stride, padding, dilation, dtype)
s = fschedule([b_tensor])
func_name = "conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
)
func = tvm.build(
s, [a_tensor, w_tensor, b_tensor], get_hexagon_target("v68"), name=func_name
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a_data = tvm.nd.array(a_np, dev)
weight = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev)
mod[func_name](a_data, weight, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
class TestConv2dNHWC(BaseConv2DTests):
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
) = tvm.testing.parameters(
(1, 64, 32, 64, 3, 1, "SAME", 1),
(4, 32, 16, 32, 5, 2, "SAME", 1),
(1, 64, 32, 64, 3, 1, "VALID", 1),
(4, 32, 16, 32, 5, 2, "VALID", 1),
(1, 32, 16, 64, 3, 2, (0, 0, 1, 1), 1),
(1, 32, 16, 64, 3, 2, (1, 1, 2, 2), 1),
(1, 32, 16, 32, 5, 2, (3, 3, 2, 2), 1),
(1, 32, 16, 64, 3, 2, (0, 1, 2, 3), 1),
(1, 64, 32, 64, 3, 1, "SAME", 2),
(1, 64, 32, 64, 3, 1, (1, 1, 2, 2), 2),
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import tvm
from tvm.contrib.hexagon.session import Session
import tvm.testing
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
# TODO Should add kernal to tvm.testing.fixture
class BaseConv2DTransposeTests:
"""Conv2D transpose base class."""
random_seed = tvm.testing.parameter(0)
@tvm.testing.requires_hexagon
def test_conv2d(
self,
hexagon_session: Session,
batch,
in_channel,
in_size,
num_filter,
stride,
padding,
output_padding,
random_seed,
):
"""Test conv2D."""
in_height, in_width = in_size
kernel_height, kernel_width = (1, 1)
stride_height, stride_width = stride
pad_top, pad_left, pad_bottom, pad_right = padding
a_tensor = te.placeholder((batch, in_channel, in_height, in_width), name="a_tensor")
w_tensor = te.placeholder(
(in_channel, num_filter, kernel_height, kernel_width), name="w_tensor"
)
a_shape = get_const_tuple(a_tensor.shape)
w_shape = get_const_tuple(w_tensor.shape)
dtype = a_tensor.dtype
def get_ref_data():
np.random.seed(random_seed)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
fcompute_args = (
a_tensor,
w_tensor,
[stride_height, stride_width],
[pad_top, pad_left, pad_bottom, pad_right],
a_tensor.dtype,
output_padding,
)
with tvm.target.Target(get_hexagon_target("v68")):
fcompute = topi.nn.conv2d_transpose_nchw
fschedule = topi.hexagon.schedule_conv2d_transpose_nchw
b_tensor = fcompute(*fcompute_args)
c_tensor = topi.nn.relu(b_tensor)
schedule_1 = fschedule([b_tensor])
schedule_2 = fschedule([c_tensor])
dev = hexagon_session.device
a_data = tvm.nd.array(a_np, dev)
weight = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev)
func1 = tvm.build(schedule_1, [a_tensor, w_tensor, b_tensor], get_hexagon_target("v68"))
func2 = tvm.build(schedule_2, [a_tensor, w_tensor, c_tensor], get_hexagon_target("v68"))
mod1 = hexagon_session.load_module(func1)
mod2 = hexagon_session.load_module(func2)
mod1(a_data, weight, b)
mod2(a_data, weight, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
class TestConv2DTranspose(BaseConv2DTransposeTests):
"""Test Conv2D transpose class."""
(batch, in_channel, in_size, num_filter, stride) = tvm.testing.parameters(
(1, 3, (224, 224), 1, (1, 1)),
(1, 8, (224, 224), 1, (1, 1)),
(1, 512, (8, 1), 128, (31, 1)),
(1, 32, (8192, 1), 1, (1, 1)),
)
padding = tvm.testing.parameter((0, 0, 0, 0))
output_padding = tvm.testing.parameter((0, 0))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for dense"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
class TestDense:
"""Dense test class."""
random_seed = tvm.testing.parameter(0)
use_bias = tvm.testing.parameter(True, False)
# batch_size more than 8 would break
batch_size = tvm.testing.parameter(1, 2, 8)
in_dim, out_dim = tvm.testing.parameters((1024, 1000))
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float32"),
("int8", "int32"),
)
@tvm.testing.fixture(cache_return_value=True)
def dense_ref_data(
self, random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype
):
"""Generate reference data."""
np.random.seed(random_seed)
if "float" in in_dtype:
a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
elif in_dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype)
else:
raise ValueError("No method to generate test data for data type '{}'".format(in_dtype))
matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
if use_bias:
matmul += c_np
d_np = np.maximum(matmul, 0)
return (a_np, b_np, c_np, d_np)
@tvm.testing.requires_hexagon
def test_dense(
self,
hexagon_session: Session,
batch_size,
in_dim,
out_dim,
use_bias,
in_dtype,
out_dtype,
dense_ref_data,
):
"""Test dense."""
if in_dtype == "float16":
pytest.xfail("float16 is not supported.")
if "int" in in_dtype:
tol = {"atol": 0, "rtol": 0}
elif in_dtype == "float32":
tol = {"rtol": 1e-5, "atol": 1e-5}
a_tensor = te.placeholder((batch_size, in_dim), name="a_tensor", dtype=in_dtype)
b_tensor = te.placeholder((out_dim, in_dim), name="b_tensor", dtype=in_dtype)
c_tensor = te.placeholder((out_dim,), name="c_tensor", dtype=out_dtype)
a_np, b_np, c_np, d_np = dense_ref_data
fcompute = topi.nn.dense
fschedule = topi.hexagon.schedule_dense
with tvm.target.Target(get_hexagon_target("v68")):
d_tensor = fcompute(a_tensor, b_tensor, c_tensor if use_bias else None, out_dtype)
d_tensor = topi.nn.relu(d_tensor)
schedule = fschedule([d_tensor])
func = tvm.build(
schedule,
[a_tensor, b_tensor, c_tensor, d_tensor],
get_hexagon_target("v68"),
name="dense",
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a_data = tvm.nd.array(a_np, dev)
b_data = tvm.nd.array(b_np, dev)
c_data = tvm.nd.array(c_np, dev)
d_data = tvm.nd.array(np.zeros(get_const_tuple(d_tensor.shape), dtype=out_dtype), dev)
mod["dense"](a_data, b_data, c_data, d_data)
tvm.testing.assert_allclose(d_data.numpy(), d_np, **tol)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long, redefined-outer-name
"""Test depth_to_space slice op for hexagon"""
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm.topi.hexagon.slice_ops.depth_to_space import d2s_compute, d2s_schedule
from tvm.topi.testing import depth_to_space_python
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import transform_numpy, get_hexagon_target
class TestD2SSlice:
"""Test class that defines the Depth to Space slice test"""
d2s_fp16_tests = (
((1, 8, 8, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 8, 8, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 16, 16, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 16, 16, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 8, 8, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 8, 8, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 16, 16, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
((1, 16, 16, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"),
)
d2s_uint8_tests = (
((1, 8, 8, 256), 2, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"),
((1, 8, 8, 1024), 4, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"),
((1, 8, 8, 256), 2, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"),
((1, 8, 8, 1024), 4, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"),
)
(input_shape, block_size, mode, dtype, input_layout, output_layout,) = tvm.testing.parameters(
*d2s_fp16_tests,
*d2s_uint8_tests,
)
working_scope = tvm.testing.parameter("global.vtcm")
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
return np.random.uniform(size=input_shape).astype(dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, input_layout):
return transform_numpy(input_np, "nhwc", input_layout)
@tvm.testing.fixture
def ref_output_np(self, input_np, block_size, mode):
a_np = np.transpose(input_np, axes=[0, 3, 1, 2])
ref_np = depth_to_space_python(a_np, block_size, mode=mode)
ref_np = np.transpose(ref_np, axes=[0, 2, 3, 1])
return ref_np
@tvm.testing.fixture
def transformed_ref_output_np(self, ref_output_np, output_layout):
return transform_numpy(ref_output_np, "nhwc", output_layout)
@tvm.testing.requires_hexagon
def test_d2s_slice(
self,
input_shape,
block_size,
mode,
dtype,
input_layout,
output_layout,
hexagon_session,
working_scope,
transformed_input_np,
transformed_ref_output_np,
):
"""Top level testing function for depth to space"""
input_tensor = te.placeholder(input_shape, name="input_tensor", dtype=dtype)
output = d2s_compute(input_tensor, block_size, "NHWC", mode)
tir_s = d2s_schedule(input_tensor, output, input_layout, output_layout)
input_data = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
axis_separators=[4],
mem_scope=working_scope,
)
output_data = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_ref_output_np.shape,
dtype=transformed_ref_output_np.dtype,
axis_separators=[4],
mem_scope=working_scope,
)
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(
tir_s.mod,
[input_tensor, output],
target=get_hexagon_target("v69"),
name="depth_to_space",
)
mod = hexagon_session.load_module(runtime_module)
mod(input_data, output_data)
output_np = output_data.numpy()
tvm.testing.assert_allclose(
output_np,
transformed_ref_output_np,
1e-3,
1e-3,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Depthwise Conv2D Tests."""
import numpy as np
import tvm
from tvm.contrib.hexagon.session import Session
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from ..infrastructure import get_hexagon_target
class BaseDepthwiseConv2D:
"""Provides the test_conv2d test function, to be used by other test classes.
Test parameter sets are split out into different classes for
readability (e.g. used for mobilenet), and for restrictions
(e.g. implemented only for llvm).
"""
random_seed = tvm.testing.parameter(0)
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
)
@tvm.testing.fixture
def input_shape(self, layout, batch, in_channel, in_size, filter_shape):
"""Returns input shape."""
if layout == "NCHW":
return (batch, in_channel, in_size, in_size)
elif layout == "NHWC":
return (batch, in_size, in_size, in_channel)
elif layout == "NCHWc":
oc_block = filter_shape[-1]
ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0)
return (batch, in_channel // ic_block, in_size, in_size, ic_block)
else:
raise RuntimeError(f"Not supported layout {layout}")
@tvm.testing.fixture
def filter_shape(self, layout, in_channel, channel_multiplier, kernel):
"""Returns filter shape."""
filter_channel = in_channel
if layout == "NCHW":
return (filter_channel, channel_multiplier, kernel, kernel)
elif layout == "NHWC":
return (kernel, kernel, filter_channel, channel_multiplier)
elif layout == "NCHWc":
out_channel = in_channel * channel_multiplier
# For testing the functionality, we choose an arbitrary block
# size that can divide out_channel, regardless of the
# performance.
oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0)
return (out_channel // oc_block, 1, kernel, kernel, 1, oc_block)
else:
raise RuntimeError(f"Not supported layout {layout}")
@tvm.testing.fixture
def scale_shape(self, layout, in_channel, channel_multiplier, filter_shape):
"""Returns scale shape."""
out_channel = in_channel * channel_multiplier
if layout in ("NCHW", "NHWC"):
return (out_channel,)
if layout == "NCHWc":
oc_block = filter_shape[-1]
return (out_channel // oc_block, oc_block)
raise ValueError("Unknown layout {}".format(layout))
@tvm.testing.fixture
def shift_shape(self, scale_shape):
"""Returns shift shape."""
return scale_shape
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self,
random_seed,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
dilation,
stride,
padding,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
):
"""Generate reference data."""
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if in_dtype == "float16" else in_dtype
input_np = np.random.uniform(size=input_shape).astype(in_dtype)
filter_np = np.random.uniform(size=filter_shape).astype(in_dtype)
scale_np = np.random.uniform(size=scale_shape).astype(out_dtype)
shift_np = np.random.uniform(size=shift_shape).astype(out_dtype)
if layout == "NCHW":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw
dilation = (1, 1, dilation, dilation)
reshape = (1, -1, 1, 1)
elif layout == "NHWC":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc
dilation = (dilation, dilation, 1, 1)
reshape = (1, 1, 1, -1)
elif layout == "NCHWc":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc
dilation = (1, 1, dilation, dilation, 1, 1)
reshape = (1, scale_shape[0], 1, 1, scale_shape[1])
dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation)
output_np = np_depthwise_conv2d(
input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding
).astype(out_dtype)
if use_scale_shift:
output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape)
if apply_relu:
output_np = np.maximum(output_np, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
output_np,
)
@tvm.testing.requires_hexagon
def test_conv2d(
self,
hexagon_session: Session,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
kernel,
stride,
padding,
dilation,
ref_data,
):
"""Test conv2D."""
# Transform the padding argument from 'str' to 'tuple' to
# match the "workload" tuple in TopHub. Which padding_args to
# use for each layout chosen to reproduce previous behavior.
if dilation == 1:
padding_args = get_pad_tuple(padding, (kernel, kernel))
padding_args_i = [0, 1, 2, 3] if layout == "NCHW" else [0, 1]
padding_args = [padding_args[i] for i in padding_args_i]
else:
padding_args = padding
# placeholder
input_tensor = te.placeholder(input_shape, name="input_tensor", dtype=in_dtype)
filter_tensor = te.placeholder(filter_shape, name="filter_tensor", dtype=in_dtype)
scale = te.placeholder(scale_shape, name="scale", dtype=out_dtype)
shift = te.placeholder(shift_shape, name="shift", dtype=out_dtype)
if layout == "NCHW":
topi_scale_shift = topi.nn.scale_shift_nchw
fcompute_args = (input_tensor, filter_tensor, stride, padding_args, dilation, out_dtype)
elif layout == "NHWC":
topi_scale_shift = topi.nn.scale_shift_nhwc
fcompute_args = (input_tensor, filter_tensor, stride, padding_args, dilation, out_dtype)
elif layout == "NCHWc":
topi_scale_shift = topi.nn.scale_shift_nchwc
in_layout = "NCHW{}c".format(input_shape[-1])
out_layout = "NCHW{}c".format(filter_shape[-1])
fcompute_args = (
input_tensor,
filter_tensor,
stride,
padding,
dilation,
in_layout,
out_layout,
out_dtype,
)
with tvm.target.Target(get_hexagon_target("v68")):
# Declare, build schedule
if layout == "NCHW":
fcompute = topi.nn.depthwise_conv2d_nchw
fschedule = topi.hexagon.schedule_depthwise_conv2d_nchw
elif layout == "NHWC":
fcompute = topi.nn.depthwise_conv2d_nhwc
fschedule = topi.hexagon.schedule_depthwise_conv2d_nhwc
c_tensor = fcompute(*fcompute_args)
if use_scale_shift:
c_tensor = topi_scale_shift(c_tensor, scale, shift)
if apply_relu:
c_tensor = topi.nn.relu(c_tensor)
schedule = fschedule([c_tensor])
# Build and run
f = tvm.build(
schedule,
[input_tensor, filter_tensor, scale, shift, c_tensor],
get_hexagon_target("v68"),
)
mod = hexagon_session.load_module(f)
input_np, filter_np, scale_np, shift_np, output_np = ref_data
dev = hexagon_session.device
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
output_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype),
dev,
)
mod(input_tvm, filter_tvm, scale_tvm, shift_tvm, output_tvm)
tol = {"rtol": 1e-4, "atol": 1e-5}
tvm.testing.assert_allclose(output_np, output_tvm.numpy(), **tol)
class TestDepthwiseConv2DMobilenetWorkloads(BaseDepthwiseConv2D):
"""Extra tests to verify functionality for workloads used by mobilenet."""
layout = tvm.testing.parameter("NCHW", "NHWC")
use_scale_shift = tvm.testing.parameter(False, ids=["no_scale_shift"])
apply_relu = tvm.testing.parameter(False, ids=["no_relu"])
batch = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1)
kernel = tvm.testing.parameter(3)
padding = tvm.testing.parameter("SAME")
dilation = tvm.testing.parameter(1)
in_channel, in_size, stride = tvm.testing.parameters(
(32, 112, 1),
(64, 112, 2),
(128, 56, 1),
(128, 56, 2),
(256, 28, 1),
)
class TestDepthwiseConv2D(BaseDepthwiseConv2D):
"""Test depthwise conv2D class."""
layout = tvm.testing.parameter("NCHW", "NHWC")
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
apply_relu = tvm.testing.parameter(True, False, ids=["with_relu", "no_relu"])
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 64, 32, 1, 3, 1),
(1, 128, 64, 2, 5, 2),
)
padding = tvm.testing.parameter("VALID")
dilation = tvm.testing.parameter(1)
# TODO(hexagon-team): add TestDepthwiseConv2D_NCHWc test.
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_pad.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for reduce"""
import numpy as np
import tvm
from tvm import te, topi
from tvm.contrib.hexagon.session import Session
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
@tvm.testing.requires_hexagon
def test_nn_pad(hexagon_session: Session):
"""Test nn pad."""
dtype = "uint8"
in_shape = (1, 56, 56, 32)
data_in = np.ones(in_shape).astype(dtype)
a_tensor = te.placeholder(shape=in_shape, name="a_tensor", dtype=dtype)
c_tensor = topi.nn.pad(a_tensor, [0, 1, 1, 0], [0, 1, 1, 0], pad_value=0)
with tvm.target.Target(get_hexagon_target("v68")):
fschedule = topi.hexagon.schedule_pad
s = fschedule(c_tensor)
func = tvm.build(s, [a_tensor, c_tensor], get_hexagon_target("v68"), name="pad")
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(data_in, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev)
mod["pad"](a, b)
# Reference numpy pad output
ref_out = np.pad(data_in, pad_width=((0, 0), (1, 1), (1, 1), (0, 0)))
tvm.testing.assert_allclose(b.numpy(), ref_out)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for pooling"""
import numpy as np
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
class TestAdaptivePool:
"""Adaptive pool test class."""
dshape, out_size, pool_type, layout = tvm.testing.parameters(
((1, 3, 112, 112), (1, 1), "max", "NCHW"),
((1, 3, 112, 112), (1, 1), "avg", "NCHW"),
((1, 14, 56, 78), (34, 13), "max", "NCHW"),
((1, 5, 46, 97), (4, 96), "avg", "NCHW"),
((1, 112, 112, 3), (1, 1), "max", "NHWC"),
((1, 5, 46, 97), (4, 96), "avg", "NHWC"),
((1, 16, 32, 32, 32), (1, 1, 1), "max", "NCDHW"),
((1, 16, 32, 32, 32), (1, 1, 1), "avg", "NCDHW"),
((1, 16, 32, 32, 32), (2, 2, 2), "avg", "NCDHW"),
(
(1, 16, 64, 32, 32),
(7, 8, 9),
"avg",
"NCDHW",
),
(
(1, 16, 64, 32, 32),
(8, 16, 16),
"avg",
"NCDHW",
),
((1, 16, 32, 32, 32), (1, 1, 1), "avg", "NDHWC"),
((1, 16, 32, 32, 32), (2, 2, 2), "max", "NDHWC"),
((1, 16, 32, 32, 32), (2, 4, 4), "max", "NDHWC"),
)
@tvm.testing.requires_hexagon
def test_adaptive_pool(self, hexagon_session: Session, dshape, out_size, pool_type, layout):
"""Test adaptive pool."""
dtype = "float32"
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
oshape = np_out.shape
data = te.placeholder(dshape, name="data", dtype=dtype)
if len(out_size) == 2:
out = topi.nn.adaptive_pool(data, out_size, pool_type, layout)
else:
assert len(out_size) == 3
out = topi.nn.adaptive_pool3d(data, out_size, pool_type, layout)
with tvm.target.Target(get_hexagon_target("v68")):
fschedule = topi.hexagon.schedule_adaptive_pool
s = fschedule(out)
func = tvm.build(
s,
[data, out],
get_hexagon_target("v68"),
name="adaptive-pool",
)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(np_data, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), dev)
mod["adaptive-pool"](a, b)
tvm.testing.assert_allclose(b.numpy(), np_out, rtol=4e-5, atol=1e-6)
def verify_poolnd(
hexagon_session,
n,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCW",
):
"""Pool test verification."""
a_tensor = te.placeholder(input_shape, name="a_tensor")
if n == 1:
b_tensor = topi.nn.pool1d(
a_tensor,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 2:
b_tensor = topi.nn.pool2d(
a_tensor,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 3:
b_tensor = topi.nn.pool3d(
a_tensor,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
else:
raise ValueError(f"PoolND only supports n=1, 2, 3 got n={n}")
b_tensor = topi.nn.relu(b_tensor)
dtype = a_tensor.dtype
output_shape = [int(i) for i in b_tensor.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
padding_before = padding[:n]
padding_after = padding[n:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
padding_before,
padding_after,
pool_type,
count_include_pad,
ceil_mode,
layout=layout,
)
np.testing.assert_equal(tuple(output_shape), tuple(ref_np.shape))
with tvm.target.Target(get_hexagon_target("v68")):
fschedule = topi.hexagon.schedule_pool
s = fschedule(b_tensor, layout)
func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name="pool")
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(input_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=dtype), dev)
mod["pool"](a, b)
tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5)
class TestPool1D:
"""Pool1D test class."""
(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
) = tvm.testing.parameters(
([1, 16, 32], [2], [2], [1], [0, 0], "avg", False, True, "NCW"),
([1, 16, 31], [3], [3], [1], [1, 2], "avg", False, True, "NCW"),
([1, 16, 32], [2], [2], [1], [1, 2], "avg", False, False, "NCW"),
([1, 16, 31], [4], [4], [1], [3, 3], "avg", False, False, "NCW"),
([1, 16, 31], [4], [4], [1], [0, 0], "avg", False, False, "NCW"),
([1, 16, 32], [2], [2], [1], [0, 0], "max", False, True, "NCW"),
([1, 16, 31], [3], [3], [1], [2, 1], "max", False, True, "NCW"),
([1, 16, 31], [3], [3], [1], [2, 1], "max", True, True, "NCW"),
([1, 16, 31], [3], [3], [1], [2, 5], "avg", False, True, "NCW"),
([1, 16, 32], [2], [2], [1], [0, 3], "avg", False, False, "NCW"),
([1, 16, 31], [3], [3], [1], [1, 4], "max", False, True, "NCW"),
([1, 16, 31], [3], [3], [1], [3, 0], "max", True, True, "NCW"),
# Test non-1 dilations
([1, 16, 31], [3], [3], [2], [2, 5], "avg", False, True, "NCW"),
([1, 16, 32], [2], [2], [3], [0, 3], "avg", False, False, "NCW"),
([1, 16, 31], [3], [3], [2], [1, 4], "max", False, True, "NCW"),
([1, 16, 31], [3], [3], [3], [3, 0], "max", True, True, "NCW"),
# Test Channel last
([1, 32, 16], [2], [2], [1], [0, 0], "avg", False, True, "NWC"),
([1, 31, 16], [3], [3], [1], [1, 2], "avg", False, True, "NWC"),
([1, 32, 16], [2], [2], [1], [1, 2], "avg", False, False, "NWC"),
([1, 31, 16], [4], [4], [1], [3, 3], "avg", False, False, "NWC"),
([1, 31, 16], [4], [4], [1], [0, 0], "avg", False, False, "NWC"),
([1, 32, 16], [2], [2], [1], [0, 0], "max", False, True, "NWC"),
([1, 31, 16], [3], [3], [1], [2, 1], "max", False, True, "NWC"),
([1, 31, 16], [3], [3], [1], [2, 1], "max", True, True, "NWC"),
([1, 31, 16], [3], [3], [1], [2, 5], "avg", False, True, "NWC"),
([1, 31, 16], [2], [2], [1], [0, 3], "avg", False, False, "NWC"),
([1, 31, 16], [3], [3], [1], [1, 4], "max", False, True, "NWC"),
([1, 31, 16], [3], [3], [1], [3, 0], "max", True, True, "NWC"),
([1, 31, 16], [3], [3], [2], [2, 5], "avg", False, True, "NWC"),
([1, 32, 16], [2], [2], [3], [0, 3], "avg", False, False, "NWC"),
([1, 31, 16], [3], [3], [2], [1, 4], "max", False, True, "NWC"),
([1, 31, 16], [3], [3], [3], [3, 0], "max", True, True, "NWC"),
)
@tvm.testing.requires_hexagon
def test_pool1d(
self,
hexagon_session: Session,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
):
"""Test Pool1D."""
verify_poolnd(
hexagon_session,
1,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
)
class TestPool2D:
"""Pool2D test class."""
(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
) = tvm.testing.parameters(
([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True, "NCHW"),
([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False, "NCHW"),
([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False, "NCHW"),
([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False, "NCHW"),
([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True, "NCHW"),
([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True, True, "NCHW"),
# Test non-1 dilations
([1, 16, 31, 31], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True, "NCHW"),
([1, 16, 32, 32], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False, True, "NCHW"),
([1, 16, 31, 31], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, True, "NCHW"),
# Test channel last
([1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True, "NHWC"),
([1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False, "NHWC"),
([1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False, "NHWC"),
([1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False, "NHWC"),
([1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True, "NHWC"),
([1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True, "NHWC"),
([1, 32, 32, 16], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False, True, "NHWC"),
([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, True, "NHWC"),
)
@tvm.testing.requires_hexagon
def test_pool2d(
self,
hexagon_session: Session,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
):
"""Test Pool2D."""
verify_poolnd(
hexagon_session,
2,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
)
class TestPool3D:
"""Pool3D test class."""
(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
) = tvm.testing.parameters(
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
True,
"NCDHW",
),
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"max",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
True,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
"NCDHW",
),
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 0, 5, 4, 3, 2],
"max",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[3, 2, 1, 0, 5, 4],
"max",
True,
True,
"NCDHW",
),
# Test non-1 dilation
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
"NCDHW",
),
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[2, 1, 3],
[1, 0, 5, 4, 3, 2],
"max",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[2, 2, 3],
[3, 2, 1, 0, 5, 4],
"max",
True,
True,
"NCDHW",
),
# Test channel last layouts
(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
True,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
True,
"NDHWC",
),
(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
"NDHWC",
),
(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"max",
False,
True,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
False,
True,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
True,
True,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
"NDHWC",
),
(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 0, 5, 4, 3, 2],
"max",
False,
True,
"NDHWC",
),
(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[3, 2, 1, 0, 5, 4],
"max",
True,
True,
"NDHWC",
),
# Test non-1 dilation
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
"NCDHW",
),
(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[2, 1, 3],
[1, 0, 5, 4, 3, 2],
"max",
False,
True,
"NCDHW",
),
(
[1, 16, 31, 31, 31],
[3, 3, 3],
[3, 3, 3],
[2, 2, 3],
[3, 2, 1, 0, 5, 4],
"max",
True,
True,
"NCDHW",
),
)
@tvm.testing.requires_hexagon
def test_pool3d(
self,
hexagon_session: Session,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
):
"""Test Pool3D."""
verify_poolnd(
hexagon_session,
3,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad,
layout,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TIR quantize schedule tests."""
import numpy as np
import tvm
from tvm import te
import tvm.topi.hexagon.qnn as s1
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import (
transform_numpy,
quantize_np,
get_hexagon_target,
)
QUANTIZE_SCALE = None
QUANTIZE_ZERO_POINT = None
class TestQuantize:
"""Test quantize class."""
@tvm.testing.fixture
def expected_output_np(self, input_np, output_dtype):
global QUANTIZE_SCALE, QUANTIZE_ZERO_POINT
quant_np, QUANTIZE_SCALE, QUANTIZE_ZERO_POINT = quantize_np(input_np, output_dtype)
return quant_np
@tvm.testing.fixture
def input_np(self, input_shape, input_dtype):
return np.random.random(input_shape).astype(input_dtype)
@tvm.testing.fixture
def transformed_input_np(self, input_np, input_crouton_layout):
return transform_numpy(input_np, "nhwc", input_crouton_layout)
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, output_layout):
return transform_numpy(expected_output_np, "nhwc", output_layout)
input_crouton_layout, output_layout, input_dtype = tvm.testing.parameters(
("nhwc-4h2w32c2w-2d", "nhwc-8h8w32c-2d", "float32"),
)
output_dtype = tvm.testing.parameter("uint8", "int8")
input_shape = tvm.testing.parameter(
(1, 8, 8, 32), (1, 16, 16, 32), (1, 16, 16, 128), (1, 64, 64, 64)
)
@tvm.testing.requires_hexagon
def test_quantize(
self,
input_dtype,
output_dtype,
transformed_input_np,
input_shape,
expected_output_np,
transformed_expected_output_np,
input_crouton_layout,
output_layout,
hexagon_session,
):
"""Test quantize."""
a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=input_dtype)
m_tensor = s1.quantize_compute(a_tensor, QUANTIZE_SCALE, QUANTIZE_ZERO_POINT, output_dtype)
tir_schedule = s1.tir_quantize_schedule(
m_tensor, a_tensor, input_crouton_layout, output_layout
)
sch = tir_schedule.mod
input_axis_separator = [4]
output_axis_separator = [4]
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[a_tensor, m_tensor],
get_hexagon_target("v69"),
name="quantize",
)
a_data_nd = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
dtype=input_dtype,
axis_separators=input_axis_separator,
mem_scope="global.vtcm",
)
m_data_nd = allocate_hexagon_array(
hexagon_session.device,
tensor_shape=transformed_expected_output_np.shape,
dtype=output_dtype,
axis_separators=output_axis_separator,
mem_scope="global.vtcm",
)
mod = hexagon_session.load_module(func)
mod(a_data_nd, m_data_nd)
b, h, weight, c = expected_output_np.shape
# convert nd to np and reshape to fixed chunk size layout
m_data_np = m_data_nd.numpy().reshape([b, h // 8, weight // 8, c // 32, 8, 8, 32])
np.testing.assert_allclose(transformed_expected_output_np, m_data_np, atol=1)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for reduce"""
import numpy as np
import tvm
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
from ..infrastructure import get_hexagon_target
def _my_npy_argmax(arr, axis, keepdims):
if not keepdims:
return arr.argmax(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmax(axis=axis).reshape(out_shape)
def _my_npy_argmin(arr, axis, keepdims):
if not keepdims:
return arr.argmin(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmin(axis=axis).reshape(out_shape)
class TestReduce:
"""Test reduce class."""
in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters(
((32,), 0, False, "argmax", "float32"),
((32, 24, 32, 24), (1, 2, 3), True, "sum", "float32"),
((2, 3), None, True, "all", "bool"),
((32, 24 * 32 * 24), (1,), False, "max", "float32"),
((32, 128, 24), None, True, "sum", "float32"),
((32, 128, 24), None, True, "all", "bool"),
((32, 24, 32, 24), (0, 2), False, "min", "float32"),
((32, 128), 1, True, "argmax", "float32"),
((32, 24, 32, 24), 2, False, "argmin", "float32"),
((31, 21, 15), None, True, "argmax", "float32"),
((31, 21, 15), None, False, "sum", "float32"),
((2, 3), None, True, "any", "bool"),
((32, 128, 24), None, True, "any", "bool"),
((1, 4, 7), 1, True, "any", "bool"),
((32, 24, 32, 24), 2, False, "any", "bool"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, in_shape, axis, keepdims, reduce_type, dtype):
"""Generate test reference data."""
if dtype == "bool":
in_npy_map = in_npy = np.random.choice([True, False], size=in_shape)
else:
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)
if reduce_type == "sum":
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
elif reduce_type == "all" and dtype == "bool":
out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
elif reduce_type == "any" and dtype == "bool":
out_npy = in_npy_map.any(axis=axis, keepdims=keepdims)
elif reduce_type == "max":
out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
elif reduce_type == "min":
out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
elif reduce_type == "argmin":
out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
return in_npy, in_npy_map, out_npy
@tvm.testing.requires_hexagon
def test_reduce_map(
self, hexagon_session: Session, ref_data, in_shape, axis, keepdims, reduce_type, dtype
):
"""Test reduce map."""
in_npy, in_npy_map, out_npy = ref_data
# Build the logic and compile the function
a_tensor = te.placeholder(shape=in_shape, name="a_tensor", dtype=dtype)
a1_tensor = topi.sqrt(topi.exp(a_tensor))
out_dtype = dtype
if reduce_type == "sum":
b_tensor = topi.sum(a1_tensor, axis=axis, keepdims=keepdims)
elif reduce_type == "all":
b_tensor = topi.all(a_tensor, axis=axis, keepdims=keepdims)
elif reduce_type == "any":
b_tensor = topi.any(a_tensor, axis=axis, keepdims=keepdims)
elif reduce_type == "max":
b_tensor = topi.max(a1_tensor, axis=axis, keepdims=keepdims)
elif reduce_type == "min":
b_tensor = topi.min(a1_tensor, axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
b_tensor = topi.argmax(a1_tensor, axis=axis, keepdims=keepdims)
out_dtype = "int32"
elif reduce_type == "argmin":
b_tensor = topi.argmin(a1_tensor, axis=axis, keepdims=keepdims)
out_dtype = "int32"
else:
raise NotImplementedError
with tvm.target.Target(get_hexagon_target("v68")):
fschedule = topi.hexagon.schedule_reduce
s = fschedule(b_tensor)
func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name=reduce_type)
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype)
mod[reduce_type](data_tvm, out_tvm)
if reduce_type in ["argmax", "argmin"]:
out_tvm_indices = out_tvm.numpy()
if keepdims:
out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
if axis is None:
out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
else:
other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :]))
sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
out_tvm_val = in_npy_map[sel_indices]
if reduce_type == "argmax":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3)
elif reduce_type == "argmin":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3)
else:
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test reshape class."""
import numpy as np
import tvm
import tvm.testing
import tvm.topi.hexagon.slice_ops as sl
from tvm import te
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import transform_numpy, get_hexagon_target
BATCH_FLATTEN_FP16_TESTS = (
([1, 1, 1, 2048], [1, 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"),
([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"),
([1, 8, 8, 1024], [1, 8 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"),
([2, 4, 8, 1024], [2, 4 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"),
)
BATCH_FLATTEN_UINT8_TESTS = (
([1, 1, 1, 2048], [1, 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"),
([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"),
)
def reshape_helper(
func,
fcompute,
fschedule,
data_type,
input_shape,
input_layout,
output_shape,
output_layout,
hexagon_session,
):
"""Reshape helper function."""
a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=data_type)
if func == "reshape":
d_tesnsor = fcompute(a_tensor, output_shape)
elif func == "batch_flatten":
d_tesnsor = fcompute(a_tensor)
else:
raise RuntimeError(f"Unexpected func'{func}'")
tir_s = fschedule(
d_tesnsor,
a_tensor,
output_layout,
input_layout,
)
with tvm.transform.PassContext(opt_level=3):
runtime_module = tvm.build(tir_s.mod, target=get_hexagon_target("v69"), name=func)
mod = hexagon_session.load_module(runtime_module)
a_numpy = (np.random.uniform(-10, 10, input_shape)).astype(data_type)
ref = np.reshape(a_numpy, output_shape)
input_np_transformed = transform_numpy(a_numpy, "nhwc", input_layout)
ref_np_transformed = transform_numpy(ref, "nhwc", output_layout)
input_axis_sep = [4]
if output_layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d"]:
output_axis_sep = [4]
elif output_layout in ["nc-1024-2d", "nc-2048-2d"]:
output_axis_sep = [2]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
a_tvm = allocate_hexagon_array(
hexagon_session.device,
data=input_np_transformed,
axis_separators=input_axis_sep,
mem_scope="global.vtcm",
)
output = allocate_hexagon_array(
hexagon_session.device,
ref_np_transformed.shape,
data_type,
axis_separators=output_axis_sep,
mem_scope="global.vtcm",
)
mod(a_tvm, output)
np.testing.assert_allclose(output.numpy(), ref_np_transformed, atol=1e-07, rtol=0)
class BaseTestBatchFlatten:
"""Test batch flatten class."""
(input_shape, output_shape, input_layout, output_layout, data_type,) = tvm.testing.parameters(
*BATCH_FLATTEN_FP16_TESTS,
*BATCH_FLATTEN_UINT8_TESTS,
)
class TestBatchFlatten(BaseTestBatchFlatten):
"""Test batch flatten class."""
@tvm.testing.requires_hexagon
def test_batch_flatten(
self,
data_type,
input_shape,
input_layout,
output_shape,
output_layout,
hexagon_session,
):
"""Test batch flatten."""
reshape_helper(
"batch_flatten",
sl.batch_flatten_compute,
sl.batch_flatten_stir_schedule,
data_type,
input_shape,
input_layout,
output_shape,
output_layout,
hexagon_session,
)
class BaseTestReshape(BaseTestBatchFlatten):
"""Test reshape base class."""
reshape_fp16_tests = (
([1, 8, 4, 64], [1, 8, 8, 32], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"),
([1, 16, 8, 128], [1, 16, 16, 64], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"),
)
reshape_uint8_tests = (
([1, 8, 8, 128], [1, 8, 16, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"),
([1, 16, 64, 128], [1, 16, 128, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"),
)
(input_shape, output_shape, input_layout, output_layout, data_type,) = tvm.testing.parameters(
*BATCH_FLATTEN_FP16_TESTS,
*BATCH_FLATTEN_UINT8_TESTS,
*reshape_fp16_tests,
*reshape_uint8_tests,
)
class TestReshape(BaseTestReshape):
"""Test reshape class."""
@tvm.testing.requires_hexagon
def test_reshape(
self,
data_type,
input_shape,
input_layout,
output_shape,
output_layout,
hexagon_session,
):
"""Test reshape."""
reshape_helper(
"reshape",
sl.reshape_compute,
sl.reshape_stir_schedule,
data_type,
input_shape,
input_layout,
output_shape,
output_layout,
hexagon_session,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_resize2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Resize 2D tesst.
"""
import numpy as np
import tvm
from tvm import te
from tvm.topi.testing import resize2d_python
import tvm.topi.hexagon as s1
from tvm.contrib.hexagon import allocate_hexagon_array
from ..infrastructure import transform_numpy, get_hexagon_target
class TestResize2d:
"""Test resize 2D class."""
(batch, channel, in_height, in_width, out_height, out_width,) = tvm.testing.parameters(
(
1,
32,
8,
8,
16,
16,
),
(
1,
32,
48,
48,
8,
8,
),
)
(layout, input_crouton_layout, output_layout, dtype,) = tvm.testing.parameters(
("NHWC", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"),
("NHWC", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"),
)
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
method = tvm.testing.parameter("nearest_neighbor", "linear")
@tvm.testing.fixture
def expected_output_np(
self,
input_np,
in_height,
in_width,
out_height,
out_width,
layout,
method,
coord_trans,
):
"""Generate expected output."""
scale_h = out_height / in_height
scale_w = out_width / in_width
return resize2d_python(input_np, (scale_h, scale_w), layout, method, coord_trans)
@tvm.testing.fixture
def input_np(self, input_shape, dtype):
if dtype == "float16":
return np.random.random(input_shape).astype(dtype)
if dtype == "uint8":
return np.random.randint(0, 255, input_shape).astype(dtype)
if dtype == "int8":
return np.random.randint(-128, 127, input_shape).astype(dtype)
raise RuntimeError(f"dtype {dtype} is not valid.")
@tvm.testing.fixture
def transformed_input_np(self, input_np, layout, input_crouton_layout, dtype):
if dtype in ["float16", "uint8", "int8"]:
return transform_numpy(input_np, layout.lower(), input_crouton_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def transformed_expected_output_np(self, expected_output_np, layout, output_layout, dtype):
if dtype in ["float16", "uint8", "int8"]:
return transform_numpy(expected_output_np, layout.lower(), output_layout)
raise RuntimeError(f"Unsupported data type '{dtype}'")
@tvm.testing.fixture
def input_shape(self, batch, channel, in_height, in_width):
return (batch, in_height, in_width, channel)
@tvm.testing.fixture
def output_shape(self, batch, channel, out_height, out_width):
return (batch, out_height, out_width, channel)
@tvm.testing.requires_hexagon
def test_resize2d(
self,
dtype,
input_np,
transformed_input_np,
input_shape,
output_shape,
expected_output_np,
transformed_expected_output_np,
layout,
input_crouton_layout,
output_layout,
coord_trans,
method,
hexagon_session,
):
"""Test resize 2D."""
a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=dtype)
m_tensor = s1.resize2d_compute(
a_tensor,
[0.0] * 4,
(output_shape[1], output_shape[2]),
layout=layout,
coordinate_transformation_mode=coord_trans,
method=method,
out_dtype=dtype,
)
tir_schedule = s1.tir_resize2d_schedule(
m_tensor, a_tensor, input_crouton_layout, output_layout
)
sch = tir_schedule.mod
input_axis_separator = [4]
if output_layout in (
"nhwc-8h2w32c2w-2d",
"nhwc-8h8w32c-2d",
):
output_axis_separator = [4]
else:
raise RuntimeError(f"Unexpected layout '{output_layout}'")
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(
sch,
[a_tensor, m_tensor],
get_hexagon_target("v69"),
name="resize2d",
)
a_data_nd = allocate_hexagon_array(
hexagon_session.device,
data=transformed_input_np,
dtype=dtype,
axis_separators=input_axis_separator,
mem_scope="global.vtcm",
)
m_data_nd = allocate_hexagon_array(
hexagon_session.device,
transformed_expected_output_np.shape,
dtype=dtype,
axis_separators=output_axis_separator,
mem_scope="global.vtcm",
)
mod = hexagon_session.load_module(func)
mod(a_data_nd, m_data_nd)
batch_size, height, width, channel = output_shape
# convert nd to np and reshape to fixed chunk size layout
if output_layout == "nhwc-8h2w32c2w-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch_size, height // 8, width // 4, channel // 32, 8, 2, 32, 2]
)
elif output_layout == "nhwc-8h8w32c-2d":
m_data_np = m_data_nd.numpy().reshape(
[batch_size, height // 8, width // 8, channel // 32, 8, 8, 32]
)
if dtype == "float16":
np.testing.assert_allclose(
transformed_expected_output_np, m_data_np, rtol=1e-3, atol=1e-3
)
elif dtype in ["int8", "uint8"]:
np.testing.assert_allclose(transformed_expected_output_np, m_data_np, rtol=1, atol=1)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_hexagon/topi/test_softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for softmax"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import topi
from tvm import te
from tvm.contrib.hexagon.session import Session
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from ..infrastructure import get_hexagon_target
# TODO(mehrdadh): add log_softmax to config
OPERATOR_CONFIGS = {
"softmax": {
"topi": topi.nn.softmax,
"ref": tvm.topi.testing.softmax_python,
"dimensions": [2, 4],
},
}
class TestSoftmax:
"""Softmax test class."""
dtype = tvm.testing.parameter(
"float16",
"float32",
)
# TODO(mehrdadh): larger size like (1, 16, 256, 256)
# would fail due to TVM_HEXAGON_RPC_BUFF_SIZE_BYTES
shape = tvm.testing.parameter((32, 10), (3, 4), (1, 16, 32, 32))
@tvm.testing.fixture
def softmax_operation(self, shape) -> tuple:
"""Returns the operation name and shape."""
for name, config in OPERATOR_CONFIGS.items():
if len(shape) in config["dimensions"]:
return name
else:
raise ValueError(f"Shape {shape} is not supported.")
@tvm.testing.requires_hexagon
def test_softmax(self, hexagon_session: Session, dtype, shape, softmax_operation):
"""Test softmax."""
if dtype == "float16":
pytest.xfail("float16 is not supported.")
a_tensor = te.placeholder(shape, dtype=dtype, name="a_tensor")
topi_op = OPERATOR_CONFIGS[softmax_operation]["topi"]
b_tensor = topi_op(a_tensor, axis=1)
def get_ref_data(shape):
ref_func = tvm.topi.testing.softmax_python
a_np = np.random.uniform(size=shape).astype(dtype)
if len(shape) == 2:
b_np = ref_func(a_np)
elif len(shape) == 4:
_, c, height, width = a_np.shape
a_np_2d = a_np.transpose(0, 2, 3, 1).reshape(height * width, c)
b_np_2d = tvm.topi.testing.softmax_python(a_np_2d)
b_np = b_np_2d.reshape(1, height, width, c).transpose(0, 3, 1, 2)
return a_np, b_np
# get the test data
a_np, b_np = get_ref_data(shape)
with tvm.target.Target(get_hexagon_target("v68")):
fschedule = topi.hexagon.schedule_softmax
s = fschedule(b_tensor)
func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name="softmax")
mod = hexagon_session.load_module(func)
dev = hexagon_session.device
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev)
mod["softmax"](a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_libtorch_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm.relay
from tvm.relay.op.contrib import torchop
from tvm.testing import requires_libtorch
import_torch_error = None
try:
import torch
except ImportError as e:
torch = None
import_torch_error = str(e)
@pytest.mark.skipif(torch is None, reason=f"PyTorch is not available: {import_torch_error}")
@requires_libtorch
def test_backend():
@torch.jit.script
def script_fn(x, y):
res = x * y
return res
for torch_dt, dt in (
(torch.int32, "int32"),
(torch.float32, "float32"),
(torch.float64, "float64"),
):
x2 = tvm.relay.var("x", shape=[1, 2], dtype=dt)
y2 = tvm.relay.var("y", shape=[2, 2], dtype=dt)
x3 = tvm.relay.var("x", shape=[1, 3], dtype=dt)
y3 = tvm.relay.var("y", shape=[3, 3], dtype=dt)
test_body = tvm.relay.sum(torchop(script_fn, x2, y2)) + tvm.relay.sum(
torchop(script_fn, x3, y3)
)
test_fn = tvm.relay.Function([x2, y2, x3, y3], test_body)
mod = tvm.IRModule({"main": test_fn})
tvm.relay.transform.InferType()(mod)
# mod = tvm.relay.transform.AnnotateTarget("target.torch")(mod)
mod = tvm.relay.transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, target, params={})
ctx = tvm.cpu(0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](ctx))
# int does not have randn, so we cast...
x2t = torch.randn(1, 2).to(dtype=torch_dt)
y2t = torch.randn(2, 2).to(dtype=torch_dt)
x3t = torch.randn(1, 3).to(dtype=torch_dt)
y3t = torch.randn(3, 3).to(dtype=torch_dt)
# Set inputs
rt_mod.set_input(0, x2t)
rt_mod.set_input(1, y2t)
rt_mod.set_input(2, x3t)
rt_mod.set_input(3, y3t)
# Execute
rt_mod.run()
# Get outputs
tvm_output = rt_mod.get_output(0).numpy()
expected = (script_fn(x2t, y2t).sum() + script_fn(x3t, y3t).sum()).numpy()
print(tvm_output.dtype)
print(expected.dtype)
tvm.testing.assert_allclose(tvm_output, expected)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.