file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/contrib/test_arm_compute_lib/test_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration dense tests."""
import numpy as np
import tvm
from tvm import relay
from tvm import testing
from test_arm_compute_lib.infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
def _get_model(shape, weight_shape, units, dtype, var_names, has_bias=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.dense(a, weights, units=units, out_dtype=dtype)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(-128, 127, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc)
params["b"] = b
return out, params
def _get_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w):
"""Get output qnn parameters given input and kernel parameters."""
input_max = input_sc * (255 - input_zp)
input_min = -input_sc * input_zp
kernel_max = kernel_sc * (255 - kernel_zp)
kernel_min = -kernel_sc * kernel_zp
output_limits = [
kernel_max * kernel_h * kernel_w * input_max,
kernel_min * kernel_h * kernel_w * input_max,
kernel_min * kernel_h * kernel_w * input_min,
kernel_max * kernel_h * kernel_w * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / 255
output_zp = -int(output_min / output_sc)
return output_zp, output_sc
def _get_qnn_model(
shape,
weight_shape,
units,
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
var_names,
has_bias=False,
):
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.qnn.op.dense(
a,
weights,
units=units,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
out_dtype="int32",
)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(0, 255, weight_shape[0]).astype("int32"))
biasc = relay.const(b, "int32")
out = relay.nn.bias_add(out, biasc)
params["b"] = b
out = relay.qnn.op.requantize(
out,
relay.const(input_sc * kernel_sc, "float32"), # input scale
relay.const(0, "int32"), # input zero point
relay.const(output_sc, "float32"), # output scale
relay.const(output_zp, "int32"), # output zero point
out_dtype="uint8",
)
return out, params
def _get_expected_codegen(shape, weight_shape, units, dtype, has_bias=False):
output_shape = (shape[0], units)
out_dtype = "int32" if dtype == "uint8" else "float32"
node = {
"op": "kernel",
"name": "nn.dense",
"inputs": [],
"attrs": {
"num_outputs": "1",
"out_dtype": [[out_dtype]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"units": [[str(units)]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
# qnn.dense params, input and kernel
if dtype == "uint8":
node["name"] = "qnn.dense"
for param_dtype in ["int32", "float32"]:
for _ in range(2):
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[[]]], "dtype": [[param_dtype]]},
}
)
if has_bias:
bias_dtype = "int32" if dtype == "uint8" else "float32"
bias_shape = (
[1, weight_shape[0]]
if dtype == "float32" and weight_shape[0] != 1
else [weight_shape[0]]
)
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[bias_shape]], "dtype": [[bias_dtype]]},
}
)
# qnn.dense params, output
if dtype == "uint8":
for param_dtype in ["float32", "int32"]:
inputs.append(
{"op": "const", "name": "", "attrs": {"shape": [[[]]], "dtype": [[param_dtype]]}}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
def test_dense():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
[(11, 2), (2, 2), 2, True],
[(11, 2), (2, 2), 2, False],
]
for shape, weight_shape, units, composite in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype))}
func, params = _get_model(
shape, weight_shape, units, dtype, var_names=iter(inputs), has_bias=composite
)
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_acl=acl,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"composite operators (bias)": composite,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
def test_codegen_dense():
if skip_codegen_test():
return
np.random.seed(0)
dtype = "float32"
trials = [
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
[(11, 2), (2, 2), 2, True],
[(11, 2), (2, 2), 2, False],
]
for shape, weight_shape, units, composite in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
func, params = _get_model(*args, var_names=iter(inputs), has_bias=composite)
exp_codegen = _get_expected_codegen(*args, has_bias=composite)
verify_codegen(func, exp_codegen)
def test_qnn_dense():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
dtype = "uint8"
trials = [
[(1, 2), (2, 2), 2, True],
[(1, 2), (2, 2), 2, False],
[(4, 4), (4, 4), 4, True],
[(4, 4), (4, 4), 4, False],
[(16, 16), (4, 16), 4, True],
[(16, 16), (4, 16), 4, False],
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
]
for shape, weight_shape, units, composite in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(0, 255, shape).astype(dtype))}
input_zp = 100
input_sc = 0.5
kernel_zp = 50
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, weight_shape[0], weight_shape[1]
)
func, params = _get_qnn_model(
shape,
weight_shape,
units,
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
var_names=iter(inputs),
has_bias=composite,
)
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_acl=acl,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"composite operators (bias)": composite,
"input scale": input_sc,
"input zero point": input_zp,
"kernel scale": kernel_sc,
"kernel zero point": kernel_zp,
"output scale": output_sc,
"output zero point": output_zp,
}
verify(outputs, atol=1, rtol=0, config=config, verify_saturation=True)
def test_codegen_qnn_dense():
if skip_codegen_test():
return
np.random.seed(0)
dtype = "uint8"
trials = [
[(1, 2), (2, 2), 2, True],
[(1, 2), (2, 2), 2, False],
[(4, 4), (4, 4), 4, True],
[(4, 4), (4, 4), 4, False],
[(16, 16), (4, 16), 4, True],
[(16, 16), (4, 16), 4, False],
[(1, 128), (16, 128), 16, True],
[(1, 128), (16, 128), 16, False],
[(32, 32), (32, 32), 32, True],
[(32, 32), (32, 32), 32, False],
[(1, 64), (1, 64), 1, True],
[(1, 64), (1, 64), 1, False],
]
for shape, weight_shape, units, composite in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
input_zp = 100
input_sc = 0.5
kernel_zp = 25
kernel_sc = 0.03
output_zp, output_sc = _get_qnn_params(
input_zp, input_sc, kernel_zp, kernel_sc, weight_shape[0], weight_shape[1]
)
func, params = _get_qnn_model(
*args,
var_names=iter(inputs),
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
has_bias=composite,
)
exp_codegen = _get_expected_codegen(*args, has_bias=composite)
verify_codegen(func, exp_codegen)
if __name__ == "__main__":
test_dense()
test_qnn_dense()
test_codegen_dense()
test_codegen_qnn_dense()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_arm_compute_lib/test_maximum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration reshape tests."""
import numpy as np
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _get_model(input_shape, dtype, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
b = relay.var(next(var_names), shape=input_shape, dtype=dtype)
max = relay.maximum(a, b)
return max
def _get_expected_codegen(shape, dtype):
node = {
"op": "kernel",
"name": "maximum",
"inputs": [[0, 0, 0], [1, 0, 0]],
"attrs": {
"num_inputs": "2",
"num_outputs": "1",
"shape": [[list(shape)]],
"dtype": [[dtype]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}},
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}},
]
inputs.append(node)
return inputs
def test_maximum():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
for dtype, low, high, atol, rtol in [
("float32", -127, 128, 0.001, 0.001),
("float32", -1, 1, 0.001, 0.001),
]:
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, (100, 100)).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(low, high, (100, 100)).astype(dtype)),
}
outputs = []
func = _get_model(inputs["a"].shape, dtype, iter(inputs))
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0])
verify(outputs, atol=1e-7, rtol=1e-7)
def test_codegen_maximum():
if skip_codegen_test():
return
shape = (100, 100)
inputs = {"a", "b"}
for dtype in ["float32"]:
args = (shape, dtype)
func = _get_model(*args, iter(inputs))
exp_codegen = _get_expected_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_maximum()
test_codegen_maximum()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_arm_compute_lib/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library network tests."""
from distutils.version import LooseVersion
import numpy as np
import pytest
from tvm import relay
from test_arm_compute_lib.infrastructure import Device, skip_runtime_test, build_and_run, verify
def _build_and_run_network(mod, params, inputs, device, tvm_ops, acl_partitions, atol, rtol):
"""Helper function to build and run a network."""
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 255
else:
low, high = -127, 128
data[name] = np.random.uniform(low, high, shape).astype(dtype)
outputs = []
for acl in [False, True]:
outputs.append(
build_and_run(
mod,
data,
1,
params,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, verify_saturation=False)
def _get_tflite_model(tflite_model_path, inputs_dict):
"""Convert TFlite graph to relay."""
try:
import tflite.Model
except ImportError:
pytest.skip("Missing Tflite support")
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
try:
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for input in inputs_dict:
input_shape, input_dtype = inputs_dict[input]
shape_dict[input] = input_shape
dtype_dict[input] = input_dtype
return relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
def _get_keras_model(keras_model, inputs_dict):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
return relay.frontend.from_keras(keras_model, inputs, layout="NHWC")
def test_vgg16():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import VGG16
except ImportError:
pytest.skip("Missing Keras Package")
vgg16 = VGG16(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
inputs = {vgg16.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(vgg16, inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=4,
acl_partitions=21,
atol=0.002,
rtol=0.01,
)
def test_mobilenet():
keras = pytest.importorskip("keras")
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import MobileNet
except ImportError:
pytest.skip("Missing keras module")
mobilenet = MobileNet(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(mobilenet, inputs)
return mod, params, inputs
if keras.__version__ < LooseVersion("2.9"):
# This can be removed after we migrate to TF/Keras >= 2.9
expected_tvm_ops = 56
expected_acl_partitions = 31
else:
# In Keras >= 2.7, one reshape operator was removed
# from the MobileNet model, so it impacted this test
# which now needs to be reduce in by 1
# The change in Keras is `b6abfaed1326e3c`
expected_tvm_ops = 55
expected_acl_partitions = 30
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=expected_tvm_ops,
acl_partitions=expected_acl_partitions,
atol=0.002,
rtol=0.01,
)
def test_quantized_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing Tflite support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
inputs = {"input": ((1, 224, 224, 3), "uint8")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=3,
acl_partitions=30,
atol=10,
rtol=0,
)
def test_squeezenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing TF Support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"squeezenet.tflite",
)
inputs = {"Placeholder": ((1, 224, 224, 3), "float32")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=9,
acl_partitions=31,
atol=8,
rtol=0,
)
if __name__ == "__main__":
test_vgg16()
test_mobilenet()
test_quantized_mobilenet()
test_squeezenet()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_arm_compute_lib/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration pooling tests."""
import numpy as np
import tvm
from tvm import relay, testing
from test_arm_compute_lib.infrastructure import (
Device,
build_and_run,
skip_codegen_test,
skip_runtime_test,
verify,
verify_codegen,
)
def _calculate_output_shape(shape, sizes, padding, strides, dilation):
"""Calculate pooling output shape."""
height_receptive_field = (sizes[0] - 1) * dilation[0] + 1
width_receptive_field = (sizes[1] - 1) * dilation[1] + 1
output_height = ((shape[1] - height_receptive_field + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[2] - width_receptive_field + padding[1] + padding[3]) / strides[1]) + 1
return 1, int(output_height), int(output_width), shape[3]
def _get_pooling_model(
shape, dtype, typef, sizes, strides, dilation, padding, ceil_mode, count_include_pad, var_names
):
"""Return a model and any parameters it may have."""
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.max_pool2d":
out = relay.nn.max_pool2d(
out,
pool_size=sizes,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
layout="NHWC",
)
elif typef == "nn.avg_pool2d":
if dtype == "uint8":
out = relay.cast(out, "int32")
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
layout="NHWC",
)
if dtype == "uint8":
out = relay.cast(out, "uint8")
elif typef == "nn.l2_pool2d":
out = relay.power(out, relay.const(2.0))
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
layout="NHWC",
)
out = relay.sqrt(out)
else:
raise ValueError("Function not supported")
return out
def _get_global_pooling_model(shape, dtype, typef, var_names):
"""Return a model and any parameters it may have."""
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.global_max_pool2d":
out = relay.nn.global_max_pool2d(out, layout="NHWC")
elif typef == "nn.global_avg_pool2d":
if dtype == "uint8":
out = relay.cast(out, "int32")
out = relay.nn.global_avg_pool2d(out, layout="NHWC")
if dtype == "uint8":
out = relay.cast(out, "uint8")
else:
raise ValueError("Function not supported")
return out
def _get_expected_pooling_codegen(
shape, dtype, typef, sizes, strides, dilation, padding, ceil_mode, count_include_pad
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_shape = _calculate_output_shape(shape, sizes, padding, strides, dilation)
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NHWC"]],
"out_layout": [[""]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
"dilation": [[str(d) for d in dilation]],
"pool_size": [[str(s) for s in sizes]],
"ceil_mode": [[str(1 if ceil_mode else 0)]],
},
}
if typef == "nn.avg_pool2d" or typef == "nn.l2_pool2d":
node["attrs"]["count_include_pad"] = [["1" if count_include_pad else "0"]]
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def _get_expected_global_pooling_codegen(shape, dtype, typef):
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NHWC"]],
"out_layout": [[""]],
"shape": [[[1, 1, 1, shape[3]]]],
"dtype": [[dtype]],
},
}
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def test_pooling():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
fp32_dtype = ("float32", -127, 128, 0.001, 0.001)
uint8_dtype = ("uint8", 0, 255, 1, 0)
# fmt: off
trials = [
["nn.max_pool2d", fp32_dtype, (3, 3), (2, 2), (1, 1), (0, 0), False, False, (27, 27, 512), (0, 1),],
["nn.max_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),],
["nn.max_pool2d", fp32_dtype, (3, 3), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),],
["nn.max_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),],
["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", fp32_dtype, (3, 3), (2, 2), (3, 2), (0, 1), True, False, (15, 15, 16), (1, 0),],
# 20.05: "exclude_padding equal false is not supported for AVG Pooling with padding on quantized types"
# ["nn.avg_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), False, True, (16, 16, 16)],
["nn.avg_pool2d", uint8_dtype, (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 1), True, False, (16, 16, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (3, 3), (2, 2), (1, 1), (0, 0), False, False, (16, 16, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, True, (15, 15, 16), (0, 1),],
]
# fmt: on
for (
typef,
(dtype, low, high, atol, rtol),
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
input_shape,
(tvm_ops, acl_partitions),
) in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
}
func = _get_pooling_model(
shape,
dtype,
typef,
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
iter(inputs),
)
config = {
"size": size,
"stride": stride,
"shape": shape,
"pooling type": typef,
"dtype": dtype,
"padding": pad,
"dilation": dilation,
"ceil_mode": ceil_mode,
"count_include_pad": count_include_pad,
"inputs": inputs,
}
verify_saturation = True if dtype == "uint8" else False
for acl in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
None,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
config=config,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=verify_saturation)
def test_global_pooling():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
fp32_dtype = ("float32", -127, 128, 0.001, 0.001)
uint8_dtype = ("uint8", 0, 255, 1, 0)
trials = [
["nn.global_max_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_max_pool2d", fp32_dtype, (9, 9, 16)],
["nn.global_max_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_max_pool2d", uint8_dtype, (8, 8, 16)],
["nn.global_max_pool2d", uint8_dtype, (9, 9, 16)],
["nn.global_avg_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", fp32_dtype, (9, 9, 16)],
["nn.global_avg_pool2d", uint8_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", uint8_dtype, (8, 8, 16)],
]
for typef, (dtype, low, high, atol, rtol), input_shape in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(low, high, shape).astype(dtype)),
}
func = _get_global_pooling_model(shape, dtype, typef, iter(inputs))
config = {
"shape": shape,
"pooling type": typef,
"dtype": dtype,
}
verify_saturation = True if dtype == "uint8" else False
for acl in [False, True]:
outputs.append(
build_and_run(func, inputs, 1, None, device, enable_acl=acl, config=config)[0]
)
verify(outputs, atol=atol, rtol=rtol, config=config, verify_saturation=verify_saturation)
def test_codegen_pooling():
if skip_codegen_test():
return
fp32_dtype = ("float32", -127, 128)
uint8_dtype = ("uint8", 0, 255)
# fmt: off
trials = [
["nn.max_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),],
["nn.max_pool2d", fp32_dtype, (3, 3), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),],
["nn.max_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), (1, 1), True, True, (15, 15, 16), (0, 1),],
["nn.max_pool2d", uint8_dtype, (2, 2), (2, 2), (3, 2), (1, 1), True, True, (15, 15, 16), (1, 0),],
["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, False, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 0), False, True, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", fp32_dtype, (3, 3), (2, 2), (3, 2), (0, 1), True, False, (15, 15, 16), (1, 0),],
["nn.avg_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, True, (16, 16, 16), (0, 1),],
["nn.avg_pool2d", uint8_dtype, (3, 3), (2, 2), (1, 1), (0, 1), False, False, (16, 16, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (0, 1), True, False, (15, 15, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (3, 3), (2, 2), (1, 1), (0, 0), False, False, (16, 16, 16), (0, 1),],
["nn.l2_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), (1, 1), False, True, (15, 15, 16), (0, 1),],
]
# fmt: on
for (
typef,
(dtype, low, high),
size,
stride,
dilation,
pad,
ceil_mode,
count_include_pad,
input_shape,
(tvm_ops, acl_partitions),
) in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef, size, stride, dilation, pad, False, False)
func = _get_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_pooling_codegen(*args)
verify_codegen(func, exp_codegen, acl_partitions, tvm_ops)
def test_codegen_global_pooling():
if skip_codegen_test():
return
fp32_dtype = ("float32", -127, 128)
uint8_dtype = ("uint8", 0, 255)
trials = [
["nn.global_max_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_max_pool2d", fp32_dtype, (9, 9, 16)],
["nn.global_max_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_max_pool2d", uint8_dtype, (8, 8, 16)],
["nn.global_max_pool2d", uint8_dtype, (9, 9, 16)],
["nn.global_avg_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", fp32_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", fp32_dtype, (9, 9, 16)],
["nn.global_avg_pool2d", uint8_dtype, (8, 8, 16)],
["nn.global_avg_pool2d", uint8_dtype, (8, 8, 16)],
]
for typef, (dtype, low, high), input_shape in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef)
func = _get_global_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_global_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_pooling()
test_global_pooling()
test_codegen_pooling()
test_codegen_global_pooling()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_arm_compute_lib/test_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library integration reshape tests."""
import numpy as np
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _get_model(input_shape, output_shape, dtype, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
reshape = relay.reshape(a, output_shape)
return reshape
def _get_expected_codegen(input_shape, output_shape, dtype):
node = {
"op": "kernel",
"name": "reshape",
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"newshape": [[str(s) for s in output_shape]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"allowzero": [["0"]],
},
}
input = {
"op": "input",
"name": "",
"attrs": {"shape": [[list(input_shape)]], "dtype": [[dtype]]},
}
return [input, node]
def test_reshape():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
for dtype, low, high, atol, rtol in [
("float32", -127, 128, 0.001, 0.001),
("uint8", 0, 255, 0, 0),
]:
inputs = {"a": tvm.nd.array(np.random.uniform(low, high, (1, 1, 1, 1000)).astype(dtype))}
for new_shape in [(1, 1000), (10, 10, 10), (10, 100, 1), (1, 1000, 1)]:
outputs = []
func = _get_model(inputs["a"].shape, new_shape, dtype, iter(inputs))
for acl in [False, True]:
outputs.append(build_and_run(func, inputs, 1, None, device, enable_acl=acl)[0])
config = {
"new shape": inputs["a"].shape,
"shape": new_shape,
"dtype": dtype,
}
verify(outputs, atol=1e-7, rtol=1e-7, config=config)
def test_codegen_reshape():
if skip_codegen_test():
return
shape = (1, 1, 1, 1000)
inputs = {"a"}
for dtype in ["float32", "uint8"]:
for new_shape in [(1, 1000), (10, 10, 10), (10, 100, 1)]:
args = (shape, new_shape, dtype)
func = _get_model(*args, iter(inputs))
exp_codegen = _get_expected_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_reshape()
test_codegen_reshape()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_arm_compute_lib/test_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library runtime tests."""
import numpy as np
import tvm
from tvm import relay
from .infrastructure import skip_runtime_test, build_and_run, verify
from .infrastructure import Device
def test_multiple_ops():
"""
Test multiple operators destined for ACL.
The ACL runtime will expect these ops as 2 separate functions for
the time being.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
def get_model(input_shape, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype="float32")
out = relay.reshape(a, (1, 1, 1000))
out = relay.reshape(out, (1, 1000))
return out
inputs = {"a": tvm.nd.array(np.random.uniform(0, 1, (1, 1, 1, 1000)).astype("float32"))}
outputs = []
for acl in [False, True]:
func = get_model(inputs["a"].shape, iter(inputs))
outputs.append(
build_and_run(func, inputs, 1, None, device, enable_acl=acl, acl_partitions=2)[0]
)
verify(outputs, atol=0.002, rtol=0.01)
def test_heterogeneous():
"""
Test to check if offloading only supported operators works,
while leaving unsupported operators computed via tvm.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
np.random.seed(0)
def get_model(input_shape, var_names):
"""Return a model and any parameters it may have."""
a = relay.var(next(var_names), shape=input_shape, dtype="float32")
out = relay.reshape(a, (1, 1, 1000))
out = relay.sigmoid(out)
out = relay.reshape(out, (1, 1000))
return out
inputs = {"a": tvm.nd.array(np.random.uniform(-127, 128, (1, 1, 1, 1000)).astype("float32"))}
outputs = []
for acl in [False, True]:
func = get_model(inputs["a"].shape, iter(inputs))
outputs.append(
build_and_run(
func, inputs, 1, None, device, enable_acl=acl, tvm_ops=1, acl_partitions=2
)[0]
)
verify(outputs, atol=0.002, rtol=0.01)
def test_multiple_runs():
"""
Test that multiple runs of an operator work.
"""
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
a = relay.var("a", shape=(1, 28, 28, 512), dtype="float32")
w = tvm.nd.array(np.ones((256, 1, 1, 512), dtype="float32"))
weights = relay.const(w, "float32")
conv = relay.nn.conv2d(
a,
weights,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
)
params = {"w": w}
return conv, params
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, (1, 28, 28, 512)).astype("float32")),
}
func, params = get_model()
outputs = build_and_run(func, inputs, 1, params, device, enable_acl=True, no_runs=3)
verify(outputs, atol=0.002, rtol=0.01)
if __name__ == "__main__":
test_multiple_ops()
test_heterogeneous()
test_multiple_runs()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for BNNS"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
from tvm.contrib import graph_executor
from tvm.relay.op.contrib.bnns import partition_for_bnns
from tvm.contrib import utils
from tvm.autotvm.measure import request_remote
from tvm.relay.analysis import analysis
class Device:
"""
Common device configuration for python tests.
Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the BNNS
runtime tests locally if the runtime is available. Changing the configuration will allow these
runtime tests to be offloaded to a remote device with BNNS via a tracker for example.
Notes
-----
The test configuration will be loaded once when the class is created. If the configuration
changes between tests, any changes will not be picked up.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "local"
host = "127.0.0.1"
port = 9090
target = "llvm"
device_key = ""
cross_compile = ""
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
@classmethod
def load(cls, file_name):
"""Load test config
Load the test configuration by looking for file_name relative
to the test_bnns directory.
"""
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_file = os.path.join(location, file_name)
if not os.path.exists(config_file):
warnings.warn("Config file doesn't exist, resuming tests with default config.")
return
with open(config_file, mode="r") as config:
test_config = json.load(config)
cls.connection_type = test_config["connection_type"]
cls.host = test_config["host"]
cls.port = test_config["port"]
cls.target = test_config["target"]
cls.device_key = test_config.get("device_key") or ""
cls.cross_compile = test_config.get("cross_compile") or ""
Device.target = "llvm"
def skip_runtime_test():
"""Skip test if it requires the runtime and it's not present."""
# BNNS codegen not present.
if not tvm.get_global_func("relay.ext.bnns", True):
print("Skip because BNNS codegen is not available.")
return True
return False
def skip_codegen_test():
"""Skip test if it requires the BNNS codegen and it's not present."""
if not tvm.get_global_func("relay.ext.bnns", True):
print("Skip because BNNS codegen is not available.")
return True
def build_module(mod, target, params=None, enable_bnns=True, tvm_ops=0):
"""Build module with option to build for BNNS."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
if enable_bnns:
mod = partition_for_bnns(mod)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, params=params)
def build_and_run(
mod,
inputs,
outputs,
params,
device,
enable_bnns=True,
no_runs=1,
tvm_ops=0,
config=None,
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
lib = build_module(mod, device.target, params, enable_bnns, tvm_ops)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def extract_bnns_modules(module):
"""Get the BNNS module(s) from llvm module."""
return list(filter(lambda mod: mod.type_key == "bnns_json", module.get_lib().imported_modules))
def verify(answers, atol, rtol, verify_saturation=False, config=None):
"""Compare the array of answers. Each entry is a list of outputs."""
if config is None:
config = {}
if len(answers) < 2:
raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}")
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
try:
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
except AssertionError as e:
err_msg = "Results not within the acceptable tolerance.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise AssertionError(err_msg)
def verify_codegen(
module,
known_good_codegen,
num_bnns_modules,
tvm_ops=0,
target=Device.target,
):
"""Check BNNS codegen against a known good output."""
module = build_module(module, target, tvm_ops=tvm_ops)
bnns_modules = extract_bnns_modules(module)
assert len(bnns_modules) == num_bnns_modules, (
f"The number of BNNS modules produced ({len(bnns_modules)}) does not "
f"match the expected value ({num_bnns_modules})."
)
for mod in bnns_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
def compare_inference_with_ref(func, params, atol=0.002, rtol=0.007):
"""Compare scoring results for compilation with and without BNNS.
Provided function will be compiled two times with and without BNNS.
The scoring results for both type of compilation will be compared
with provided atol and rtol. The input data will be automatically
generated based of shape and dtype info provided for var nodes.
"""
# Generate input tensor values
inputs = {}
for free_param in analysis.free_vars(func):
name = free_param.name_hint
dtype = free_param.type_annotation.dtype
shape = [s.value for s in free_param.type_annotation.shape]
inputs[name] = tvm.nd.array(np.random.uniform(0, 127, shape).astype(dtype))
# Run for both type of compilation
device = Device()
outputs = []
for bnns in [False, True]:
outputs.append(build_and_run(func, inputs, 1, params, device, enable_bnns=bnns)[0])
# Compare result tensors
verify(outputs, atol=atol, rtol=rtol)
def generate_trials(space, r_factor=3):
"""Generates a series of trials.
This algorithm generates a series of non-deterministic trials given a
space of options to test. A trial is generated by pulling a value from
each option in the space. On some occasions the values are shuffled to
ensure a different trial on each r_factor iteration. The algorithm ensures
that each value from an option is used at least once. The total number of
trials is determined by the r_factor * the option with the largest number
of values.
Parameters
----------
space: List[List[Any]]
A list of different options with varying values to test.
r_factor: Optional[int]
The repeat factor.
Returns
-------
result: List[Tuple]
A list of trials specifying values for each option.
"""
np.random.seed(0)
max_len = 1
for option in space:
max_len = max(max_len, len(option))
num_trials = r_factor * max_len
trials = []
for i in range(num_trials):
trial = []
for option in space:
if i % len(option) == 0:
np.random.shuffle(option)
trial.append(option[i % len(option)])
trials.append(trial)
return trials
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration conv2d tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from .infrastructure import skip_runtime_test, compare_inference_with_ref, generate_trials
# TODO: Missed cases
# 1. Bias as add with 3d const tensor. Lead to additional unsqueeze op between
# 2. Check unsupported cases of fusion. Like bias add with axis != 1, add with broadcast by spatial dims
# 3. Check if bias/weights is not constants. Should fallback into LLVM or decompose it
# 4. Check if bias/weights is constants expr. Should works somehow.
def _get_model(
shape,
kernel=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
groups=1,
dtype="float32",
channels=-1, # -1 means same as input channels
bias_type="none",
activation_type="none",
):
"""Return a model and any parameters it may have"""
if channels == -1:
channels = shape[1]
a = relay.var("a", shape=shape, dtype=dtype)
weight_shape = (channels, shape[1] // groups, *kernel)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.conv2d(
a,
weights,
kernel_size=kernel,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype=dtype,
)
params = {"w": w}
if bias_type == "bias_add":
b = tvm.nd.array(np.random.uniform(-10, 10, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc, axis=1)
params["b"] = b
elif bias_type == "add_3d" or bias_type == "add_4d":
bias_shape = (
(weight_shape[0], 1, 1) if bias_type == "add_3d" else (1, weight_shape[0], 1, 1)
)
b = tvm.nd.array(np.random.uniform(-10, 10, bias_shape).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.add(out, biasc)
params["b"] = b
if activation_type == "relu":
out = relay.nn.relu(out)
elif activation_type == "sigmoid":
out = relay.op.sigmoid(out)
return out, params
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d():
np.random.seed(0)
kernel_hs = [1, 2, 3, 5]
kernel_ws = [1, 2, 3, 5]
pad = [(1, 1), (2, 2), (2, 1)]
strides = [(1, 1), (2, 2)]
dilation = [(1, 1)]
out_channels = [1, 4, 8, 16]
input_shapes = [(10, 10, 14), (12, 15, 16), (20, 20, 20)]
batches = [1, 2]
groups = [1, 2]
bias_kind = ["none", "add_3d", "add_4d", "bias.add"]
activation_kind = ["none", "relu", "sigmoid"]
trials = generate_trials(
[
kernel_hs,
kernel_ws,
pad,
strides,
dilation,
out_channels,
input_shapes,
groups,
batches,
bias_kind,
activation_kind,
],
3,
)
for (
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
input_shapes,
group,
batch,
bias,
activation,
) in trials:
if out_channels % group != 0:
continue
func, params = _get_model(
shape=(batch, *input_shapes),
kernel=(kernel_h, kernel_w),
padding=pad,
strides=stride,
dilation=dilation,
groups=group,
channels=out_channels,
bias_type=bias,
activation_type=activation,
)
compare_inference_with_ref(func, params)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d_dw():
if skip_runtime_test():
return
np.random.seed(0)
shape = [4, 5, 5]
for batch in [1, 2]:
mod, params = _get_model(shape=(batch, *shape), groups=shape[0])
compare_inference_with_ref(mod, params)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_conv2d_with_oc1():
if skip_runtime_test():
return
np.random.seed(0)
shape = [3, 5, 5]
for batch in [1, 2]:
for bias in ["none", "add_4d"]:
mod, params = _get_model(shape=(batch, *shape), channels=1, bias_type=bias)
compare_inference_with_ref(mod, params)
if __name__ == "__main__":
test_conv2d()
test_conv2d_dw()
test_conv2d_with_oc1()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_conv2d_patterns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS pattern detection check"""
import tvm
from tvm import relay
import numpy as np
from tvm.relay.op.contrib.bnns import partition_for_bnns
fp32 = "float32"
def partition(exp):
"""Apply BNNS specific partitioning transformation"""
mod = tvm.IRModule.from_expr(exp)
with tvm.transform.PassContext(opt_level=3):
mod = partition_for_bnns(mod)
return mod
def is_op_fused(func, op_name):
is_fused = False
def visit(op):
if (
isinstance(op, tvm.relay.function.Function)
and op_name in op.attrs["PartitionedFromPattern"]
):
nonlocal is_fused
is_fused = True
tvm.relay.analysis.post_order_visit(func.body, visit)
return is_fused
def test_pattern_conv2d_with_bias_add():
for axis in (1, 2):
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.const(np.random.uniform(-10, 10, 8).astype(fp32))
res = relay.nn.bias_add(res, b, axis=axis)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "nn.bias_add")
assert bias_is_fused if axis == 1 else not bias_is_fused
def test_pattern_conv2d_with_add():
workloads = {8: False, (8, 1): False, (8, 1, 1): True, (1, 8, 1, 1): True}
for b_shape, should_be_fused in workloads.items():
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.const(np.random.uniform(-10, 10, b_shape).astype(fp32))
res = relay.add(res, b)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "add")
assert bias_is_fused == should_be_fused
def test_pattern_conv2d_with_non_cons_weights():
for const_weights in (True, False):
a = relay.var("a", shape=(2, 7, 8, 8), dtype=fp32)
if const_weights:
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
else:
w = relay.var("w", shape=(8, 7, 3, 3), dtype=fp32)
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
mod = partition(res)
use_bnns = len(mod.get_global_vars()) == 2 # GlobalVar: "main" and "bnns_0"
assert use_bnns == const_weights
def test_pattern_conv2d_with_non_cons_bias():
a = relay.var("a", shape=[2, 7, 8, 8], dtype=fp32)
w = relay.const(np.random.uniform(-10, 10, (8, 7, 3, 3)).astype(fp32))
res = relay.nn.conv2d(a, w, kernel_size=(3, 3), padding=(1, 1), channels=8, out_dtype=fp32)
b = relay.var("b", shape=[8], dtype=fp32)
res = relay.nn.bias_add(res, b, axis=1)
mod = partition(res)
bias_is_fused = is_op_fused(mod["tvmgen_default_bnns_main_0"], "nn.bias_add")
assert not bias_is_fused
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration dense tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
generate_trials,
)
def _get_model(shape, weight_shape, units, dtype, var_names, has_bias=False, has_gelu=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=shape, dtype=dtype)
w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.dense(a, weights, units=units, out_dtype=dtype)
params = {"w": w}
if has_bias:
b = tvm.nd.array(np.random.randint(-128, 127, weight_shape[0]).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.op.add(out, biasc)
params["b"] = b
if has_gelu:
const1 = relay.const(0.044715)
const2 = relay.const(math.sqrt(2 / math.pi))
bias = out
out = relay.op.power(bias, relay.const(3.0, "float32"))
out = relay.op.multiply(out, const1)
out = relay.op.add(out, bias)
out = relay.op.multiply(out, const2)
out = relay.op.tanh(out)
out = relay.op.add(out, relay.const(1, "float32"))
out = relay.op.multiply(out, relay.const(0.5))
out = relay.op.multiply(out, bias)
return out, params
def _get_expected_codegen(shape, weight_shape, units, dtype, has_bias=False, has_gelu=False):
output_shape = (shape[0], units)
name = "nn.dense"
if has_bias is True:
name = "bnns.dense_bias"
if has_bias is True and has_gelu is True:
name = "bnns.dense_bias_gelu"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"num_outputs": "1",
"out_dtype": [["float32"]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"units": [[str(units)]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[list(weight_shape)]], "dtype": [[str(dtype)]]},
},
]
if has_bias:
inputs.append(
{
"op": "const",
"name": "",
"attrs": {"shape": [[[weight_shape[0]]]], "dtype": [["float32"]]},
}
)
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_dense():
device = Device()
np.random.seed(0)
dtype = ["float32"]
shape = [
((1, 128), (16, 128), 16),
((32, 32), (32, 32), 32),
((1, 64), (1, 64), 1),
((11, 2), (2, 2), 2),
((2, 2), (1, 2), 1),
]
composite = [False, True]
trials = generate_trials([dtype, shape, composite, composite], 3)
for dtype, (shape, weight_shape, units), with_bias, with_gelu in trials:
outputs = []
inputs = {"a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype))}
func, params = _get_model(
shape,
weight_shape,
units,
dtype,
var_names=iter(inputs),
has_bias=with_bias,
has_gelu=with_gelu,
)
for bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=bnns,
)[0]
)
config = {
"shape": shape,
"weight_shape": weight_shape,
"units": units,
"dtype": dtype,
"with_bias": with_bias,
"with_gelu": with_gelu,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_dense():
np.random.seed(0)
dtype = ["float32"]
shape = [
((1, 128), (16, 128), 16),
((32, 32), (32, 32), 32),
((1, 64), (1, 64), 1),
((11, 2), (2, 2), 2),
((2, 2), (1, 2), 1),
]
composite = [False, True]
trials = generate_trials([dtype, shape, composite, composite], 3)
for dtype, (shape, weight_shape, units), with_bias, with_gelu in trials:
inputs = {"a"}
args = (shape, weight_shape, units, dtype)
func, params = _get_model(
*args, var_names=iter(inputs), has_bias=with_bias, has_gelu=with_gelu
)
exp_codegen = _get_expected_codegen(*args, has_bias=with_bias, has_gelu=with_gelu)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_dense()
test_codegen_dense()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration dense tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
verify_codegen,
build_and_run,
verify,
generate_trials,
)
def _get_model(a_shape, b_shape, dtype, var_names, is_a_constant=False, is_b_constant=False):
"""Return a model and any parameters it may have"""
a = relay.var(next(var_names), shape=a_shape, dtype=dtype)
b = relay.var(next(var_names), shape=b_shape, dtype=dtype)
params = {}
if is_b_constant is True:
b = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = b
b = relay.const(b, dtype)
if is_a_constant is True:
a = tvm.nd.array(np.random.uniform(-128, 127, a_shape).astype(dtype))
params["a"] = a
a = relay.const(a, dtype)
out = relay.nn.batch_matmul(a, b)
return out, params
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_matmul():
device = Device()
np.random.seed(0)
dtype = "float32"
# C[N, I, J] = A[N, I, K] * B[N, J, K]
shapes_config = [
# B, I, J, K
[1, 4, 4, 3],
[1, 16, 32, 32],
[2, 1, 1, 3],
[2, 16, 32, 32],
[5, 1, 1, 3],
]
data_config = [
# A_is_constant, B_is_constant
[False, True],
[True, False],
[False, False],
]
for N, I, J, K in shapes_config:
a_shape = [N, I, K]
b_shape = [N, J, K]
for is_a_constant, is_b_constant in data_config:
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-128, 127, a_shape).astype(dtype)),
"b": tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype)),
}
func, params = _get_model(
a_shape,
b_shape,
dtype,
var_names=iter(inputs),
is_a_constant=is_a_constant,
is_b_constant=is_b_constant,
)
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=enable_bnns,
)[0]
)
config = {
"a_shape": a_shape,
"b_shape": b_shape,
"dtype": dtype,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
if __name__ == "__main__":
test_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_normalization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration normalization tests."""
import numpy as np
import math
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
Device,
skip_runtime_test,
skip_codegen_test,
verify_codegen,
build_and_run,
verify,
generate_trials,
)
def _get_model(
shape, b_shape, s_shape, dtype, var_names, axis=1, epsilon=1e-5, center=True, scale=True
):
"""Return a model and any parameters it may have"""
src = relay.var(next(var_names), shape=shape, dtype=dtype)
params = {}
b = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = b
b = relay.const(b, dtype)
s = tvm.nd.array(np.random.uniform(-128, 127, b_shape).astype(dtype))
params["b"] = s
s = relay.const(s, dtype)
out = relay.nn.instance_norm(src, s, b, axis, epsilon, center, scale)
return out, params
def _get_expected_codegen(shape, axis, center, scale, dtype, offload_on_bnns):
output_shape = shape
name = "nn.instance_norm"
node = {
"op": "kernel",
"name": name,
"inputs": [],
"attrs": {
"num_outputs": "1",
"axis": [[str(axis)]],
"center": [[str(int(center))]],
"scale": [[str(int(scale))]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"epsilon": [["1.0000000000000001e-05"]],
},
}
inputs = [
{"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[str(dtype)]]}},
{
"op": "const",
"name": "",
"attrs": {"shape": [[[shape[axis]]]], "dtype": [[str(dtype)]]},
},
{
"op": "const",
"name": "",
"attrs": {"shape": [[[shape[axis]]]], "dtype": [[str(dtype)]]},
},
]
input_idx = 0
for _ in range(len(inputs)):
node["inputs"].append([input_idx, 0, 0])
input_idx += 1
node["attrs"]["num_inputs"] = str(len(inputs))
inputs.append(node)
return inputs
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_normalization():
device = Device()
np.random.seed(0)
dtype = "float32"
shapes_config = [
[1, 2, 3, 4],
[3, 2, 3, 4],
[2, 2, 3],
[16, 32, 32],
[5, 3],
]
axes = [-1, 0, 1, 2]
for shape in shapes_config:
for axis in axes:
if len(shape) == 2 and axis != 0:
continue
for center in [False, True]:
for scale in [False, True]:
outputs = []
inputs = {
"src": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)),
}
func, params = _get_model(
shape,
[shape[axis]],
[shape[axis]],
dtype,
var_names=iter(inputs),
axis=axis,
center=center,
scale=scale,
)
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func,
inputs,
1,
params,
device,
enable_bnns=enable_bnns,
)[0]
)
config = {
"dtype": dtype,
}
verify(outputs, atol=0.001, rtol=0.01, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_normalization():
np.random.seed(0)
dtype = "float32"
shapes_config = [
[1, 2, 3, 4],
[3, 2, 3, 4],
[2, 2, 3],
[16, 32, 32],
[5, 3],
]
axes = [-1, 0, 1, 2]
def check_normalization(rank, axis):
if rank < 3 or rank > 4:
return False
if axis == 0 and rank == 3 or axis == 1 and rank == 4:
return True
return False
for shape in shapes_config:
for axis in axes:
if len(shape) == 2 and axis != 0:
continue
for center in [False, True]:
for scale in [False, True]:
inputs = {"src"}
args = (shape, axis, center, scale, dtype)
func, params = _get_model(
shape,
[shape[axis]],
[shape[axis]],
dtype,
var_names=iter(inputs),
axis=axis,
center=center,
scale=scale,
)
offload_on_bnns = check_normalization(len(shape), axis)
if offload_on_bnns is True:
bnns_blocks = 1
else:
bnns_blocks = 0
exp_codegen = _get_expected_codegen(*args, offload_on_bnns)
verify_codegen(func, exp_codegen, bnns_blocks)
if __name__ == "__main__":
test_normalization()
test_codegen_normalization()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_onnx_topologies.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS pattern detection check"""
import pytest
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.contrib import utils, graph_executor
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib.bnns import partition_for_bnns
import numpy as np
pytest.importorskip("onnx")
bnns_is_absent = tvm.get_global_func("relay.ext.bnns", True) is None
TARGET = "llvm"
INPUT_SHAPE = [1, 3, 224, 224]
BASE_MODEL_URL = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/"
MODEL_URL_COLLECTION = {
"BERT": "text/machine_comprehension/bert-squad/model/bertsquad-10.onnx",
"MobileNet-v2": "vision/classification/mobilenet/model/mobilenetv2-7.onnx",
"ResNet50-v1": "vision/classification/resnet/model/resnet50-v1-7.onnx",
"ResNet50-v2": "vision/classification/resnet/model/resnet50-v2-7.onnx",
"SqueezeNet-v1.1": "vision/classification/squeezenet/model/squeezenet1.1-7.onnx",
"SqueezeNet-v1.0": "vision/classification/squeezenet/model/squeezenet1.0-7.onnx",
"Inception-v1": "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-7.onnx",
"Inception-v2": "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-7.onnx",
}
def get_onnx_input_name(model):
inputs = [node.name for node in model.graph.input]
initializer = [node.name for node in model.graph.initializer]
inputs = list(set(inputs) - set(initializer))
return inputs
def get_model_url(model_name):
return BASE_MODEL_URL + MODEL_URL_COLLECTION[model_name]
def get_name_from_url(url):
return url[url.rfind("/") + 1 :].strip()
def find_of_download(model_name):
model_url = get_model_url(model_name)
model_file_name = get_name_from_url(model_url)
return download_testdata(model_url, model_file_name, module="models")
def get_model(model_name):
model_path = find_of_download(model_name)
onnx_model = onnx.load(model_path)
input_names = get_onnx_input_name(onnx_model)
input_dict = {}
for name in input_names:
input_dict[name] = INPUT_SHAPE # TODO: hardcode
mod, params = relay.frontend.from_onnx(onnx_model, input_dict, freeze_params=True)
return mod, params, input_dict
def simplify_model(mod):
"""
Simplify execution graph
At least merge BatchNorm into convolution. For this purpose decompose BN primitive
into simple operation which can be calculated as const expr and after that merged
into nearest conv/dense primitive.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.FoldConstant(),
transform.SimplifyInference(),
transform.FoldScaleAxis(),
]
)
return seq(mod)
def process(model_name):
temp = utils.tempdir()
model, params, input_dict = get_model(model_name)
def run(mod, target, simplify=True, with_bnns=False):
with tvm.transform.PassContext(opt_level=3):
if simplify:
mod = simplify_model(mod)
if with_bnns:
mod = partition_for_bnns(mod)
graph_module = relay.build(mod, target=target, params=params)
lib_name = "deploy.tar"
path_dso = temp.relpath(lib_name)
graph_module.export_library(path_dso)
dev = tvm.cpu(0)
loaded_lib = tvm.runtime.load_module(path_dso)
module = graph_executor.GraphModule(loaded_lib["default"](dev))
module.run()
return module.get_output(0).numpy()
res_llvm = run(model, TARGET, simplify=True, with_bnns=False)
res_bnns = run(model, TARGET, simplify=True, with_bnns=True)
tvm.testing.assert_allclose(
res_llvm,
res_bnns,
atol=0.002,
rtol=0.007,
)
@pytest.mark.skip(reason="Manually disabled because of huge complexity")
@pytest.mark.skipif(bnns_is_absent, reason="BNNS runtime is absent")
@pytest.mark.parametrize("model_name", MODEL_URL_COLLECTION.keys())
def test_topology(model_name):
process(model_name)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_bnns/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BNNS integration pooling tests."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm import testing
from .infrastructure import (
skip_runtime_test,
skip_codegen_test,
build_and_run,
verify,
verify_codegen,
)
from .infrastructure import Device
def _calculate_output_shape(shape, sizes, padding, strides):
"""Calculate pooling output shape."""
output_height = ((shape[2] - sizes[0] + padding[0] + padding[2]) / strides[0]) + 1
output_width = ((shape[3] - sizes[1] + padding[1] + padding[3]) / strides[1]) + 1
return 1, shape[1], int(output_height), int(output_width)
def _get_pooling_model(
shape, dtype, typef, sizes, strides, padding, ceil_mode, count_include_pad, var_names
):
"""Return a model and any parameters it may have."""
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.max_pool2d":
out = relay.nn.max_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
elif typef == "nn.avg_pool2d":
out = relay.nn.avg_pool2d(
out,
pool_size=sizes,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
raise ValueError("Function not supported")
return out
def _get_global_pooling_model(shape, dtype, typef, var_names):
"""Return a model and any parameters it may have."""
out = relay.var(next(var_names), shape=shape, dtype=dtype)
if typef == "nn.global_max_pool2d":
out = relay.nn.global_max_pool2d(out)
elif typef == "nn.global_avg_pool2d":
out = relay.nn.global_avg_pool2d(out)
else:
raise ValueError("Function not supported")
return out
def _get_expected_pooling_codegen(
shape, dtype, typef, sizes, strides, padding, ceil_mode, count_include_pad
):
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
output_shape = _calculate_output_shape(shape, sizes, padding, strides)
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NCHW"]],
"shape": [[list(output_shape)]],
"dtype": [[dtype]],
"padding": [[str(p) for p in padding]],
"strides": [[str(s) for s in strides]],
"pool_size": [[str(s) for s in sizes]],
"ceil_mode": [[str(1 if ceil_mode else 0)]],
},
}
if typef == "nn.avg_pool2d" or typef == "nn.l2_pool2d":
node["attrs"]["count_include_pad"] = [["1" if count_include_pad else "0"]]
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
def _get_expected_global_pooling_codegen(shape, dtype, typef):
node = {
"op": "kernel",
"name": typef,
"inputs": [[0, 0, 0]],
"attrs": {
"num_inputs": "1",
"num_outputs": "1",
"layout": [["NCHW"]],
"shape": [[[1, shape[1], 1, 1]]],
"dtype": [[dtype]],
},
}
input = {"op": "input", "name": "", "attrs": {"shape": [[list(shape)]], "dtype": [[dtype]]}}
return [input, node]
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_pooling():
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
["nn.max_pool2d", (3, 3), (2, 2), (0, 0), False, False, (27, 27, 512)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.max_pool2d", (3, 3), (2, 2), (1, 1), True, True, (15, 15, 16)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (1, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.avg_pool2d", (3, 3), (2, 2), (0, 1), True, False, (15, 15, 16)],
]
for (
typef,
size,
stride,
pad,
ceil_mode,
count_include_pad,
input_shape,
) in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, shape).astype(dtype)),
}
func = _get_pooling_model(
shape, dtype, typef, size, stride, pad, ceil_mode, count_include_pad, iter(inputs)
)
config = {
"size": size,
"stride": stride,
"shape": shape,
"pooling type": typef,
"dtype": dtype,
"padding": pad,
"ceil_mode": ceil_mode,
"count_include_pad": count_include_pad,
"inputs": inputs,
}
params = None
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func, inputs, 1, params, device, enable_bnns=enable_bnns, config=config
)[0]
)
verify(outputs, atol=0.001, rtol=0.001, config=config)
@pytest.mark.skipif(skip_runtime_test(), reason="Skip because BNNS codegen is not available")
def test_global_pooling():
device = Device()
np.random.seed(0)
dtype = "float32"
trials = [
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_max_pool2d", (9, 9, 16)],
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (9, 9, 16)],
]
for typef, input_shape in trials:
shape = (1, *input_shape)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-127, 128, shape).astype(dtype)),
}
func = _get_global_pooling_model(shape, dtype, typef, iter(inputs))
config = {
"shape": shape,
"pooling type": typef,
"dtype": dtype,
}
for enable_bnns in [False, True]:
outputs.append(
build_and_run(
func, inputs, 1, None, device, enable_bnns=enable_bnns, config=config
)[0]
)
verify(outputs, atol=0.001, rtol=0.001, config=config)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_pooling():
dtype = "float32"
trials = [
["nn.max_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.max_pool2d", (3, 3), (2, 2), (1, 1), True, True, (15, 15, 16)],
["nn.max_pool2d", (2, 2), (2, 2), (0, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (1, 1), False, False, (16, 16, 16)],
["nn.avg_pool2d", (2, 2), (2, 2), (0, 0), False, True, (16, 16, 16)],
["nn.avg_pool2d", (3, 3), (2, 2), (0, 1), True, False, (15, 15, 16)],
]
for (
typef,
size,
stride,
pad,
ceil_mode,
count_include_pad,
input_shape,
) in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef, size, stride, pad, False, False)
func = _get_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
@pytest.mark.skipif(skip_codegen_test(), reason="Skip because BNNS codegen is not available")
def test_codegen_global_pooling():
dtype = "float32"
trials = [
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_max_pool2d", (9, 9, 16)],
["nn.global_max_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (8, 8, 16)],
["nn.global_avg_pool2d", (9, 9, 16)],
]
for typef, input_shape in trials:
shape = (1, *input_shape)
inputs = {"a"}
args = (shape, dtype, typef)
func = _get_global_pooling_model(*args, iter(inputs))
exp_codegen = _get_expected_global_pooling_codegen(*args)
verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":
test_pooling()
test_global_pooling()
test_codegen_pooling()
test_codegen_global_pooling()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cblas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import numpy as np
import tvm.topi.testing
from tvm.contrib import cblas
from tvm.contrib import mkl
from tvm.contrib import dnnl
import tvm.testing
def verify_matmul_add(m, l, n, lib, transa=False, transb=False, dtype="float32"):
bias = te.var("bias", dtype=dtype)
ashape = (l, n) if transa else (n, l)
bshape = (m, l) if transb else (l, m)
A = te.placeholder(ashape, name="A", dtype=dtype)
B = te.placeholder(bshape, name="B", dtype=dtype)
C = lib.matmul(A, B, transa, transb)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, bb, transa, transb):
if transa:
a = a.transpose()
if transb:
b = b.transpose()
return np.dot(a, b) + bb
def compile(f, name="test_matmul_add", ext=".so"):
path = name + ext
f.export_library(path)
mod = tvm.runtime.load_module(path)
f = mod[name]
return f
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func(lib.__name__ + ".matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
name = "test_matmul_add"
f = tvm.build(s, [A, B, D, bias], target, name=name)
if target == "c":
f = compile(f, name)
a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(
d.numpy(), get_numpy(a.numpy(), b.numpy(), bb, transa, transb), rtol=1e-5
)
verify("llvm")
verify("c")
def test_matmul_add():
verify_matmul_add(235, 128, 1024, cblas)
verify_matmul_add(235, 128, 1024, cblas, True, False)
verify_matmul_add(235, 128, 1024, cblas, False, True)
verify_matmul_add(235, 128, 1024, cblas, True, True)
verify_matmul_add(235, 128, 1024, mkl)
verify_matmul_add(235, 128, 1024, mkl, True, False)
verify_matmul_add(235, 128, 1024, mkl, False, True)
verify_matmul_add(235, 128, 1024, mkl, True, True)
verify_matmul_add(235, 128, 1024, dnnl)
verify_matmul_add(235, 128, 1024, dnnl, True, False)
verify_matmul_add(235, 128, 1024, dnnl, False, True)
verify_matmul_add(235, 128, 1024, dnnl, True, True)
verify_matmul_add(1, 16, 4, cblas)
verify_matmul_add(1, 16, 3, cblas, True, False)
verify_matmul_add(1, 16, 3, cblas, False, False)
verify_matmul_add(1, 16, 3, cblas, True, True)
verify_matmul_add(1, 16, 4, mkl)
verify_matmul_add(1, 16, 3, mkl, True, False)
verify_matmul_add(1, 16, 3, mkl, False, False)
verify_matmul_add(1, 16, 3, mkl, True, True)
verify_matmul_add(1, 16, 4, dnnl)
verify_matmul_add(1, 16, 3, dnnl, True, False)
verify_matmul_add(1, 16, 3, dnnl, False, False)
verify_matmul_add(1, 16, 3, dnnl, True, True)
def verify_quantized_matmul_add(m, l, n, transa=False, transb=False):
if not tvm.get_global_func("tvm.contrib.mkl.matmul_u8s8s32", True):
pytest.skip("Quantized dense is supported only for MKL. TVM GPU CI uses openblas")
data_dtype = "uint8"
kernel_dtype = "int8"
out_dtype = "int32"
bias = te.var("bias", dtype=out_dtype)
ashape = (l, n) if transa else (n, l)
bshape = (m, l) if transb else (l, m)
A = te.placeholder(ashape, name="A", dtype=data_dtype)
B = te.placeholder(bshape, name="B", dtype=kernel_dtype)
C = mkl.matmul_u8s8s32(A, B, transa, transb, dtype=out_dtype)
D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, bb, transa, transb):
if transa:
a = a.transpose()
if transb:
b = b.transpose()
return np.dot(a, b) + bb
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.mkl.matmul_u8s8s32", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target)
a = tvm.nd.array(np.random.randint(low=0, high=50, size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.randint(low=0, high=50, size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
bb = 10
f(a, b, d, bb)
tvm.testing.assert_allclose(
d.numpy(),
get_numpy(a.numpy().astype("int32"), b.numpy().astype("int32"), bb, transa, transb),
rtol=1e-5,
)
verify()
def test_quantized_matmul_add():
verify_quantized_matmul_add(235, 128, 1024)
verify_quantized_matmul_add(235, 128, 1024, True, False)
verify_quantized_matmul_add(235, 128, 1024, False, True)
verify_quantized_matmul_add(235, 128, 1024, True, True)
verify_quantized_matmul_add(1, 16, 4)
verify_quantized_matmul_add(1, 16, 3, True, False)
verify_quantized_matmul_add(1, 16, 3, False, True)
verify_quantized_matmul_add(1, 16, 3, True, True)
def verify_batch_matmul(
batch_a, batch_b, m, l, n, lib, transa=False, transb=False, iterative=False, dtype="float32"
):
batch = max(batch_a, batch_b)
ashape = (batch_a, l, n) if transa else (batch_a, n, l)
bshape = (batch_b, m, l) if transb else (batch_b, l, m)
A = te.placeholder(ashape, name="A", dtype=dtype)
B = te.placeholder(bshape, name="B", dtype=dtype)
C = lib.batch_matmul(A, B, transa, transb)
D = te.compute(C.shape, lambda k, i, j: C[k, i, j], name="D")
s = te.create_schedule(D.op)
def get_numpy(a, b, transa, transb):
if transa:
a = a.transpose(0, 2, 1)
if not transb:
b = b.transpose(0, 2, 1)
return tvm.topi.testing.batch_matmul(a, b)
def compile(f, name="test_batch_matmul", ext=".so"):
path = name + ext
f.export_library(path)
mod = tvm.runtime.load_module(path)
f = mod[name]
return f
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func(lib.__name__ + ".matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
name = "test_batch_matmul"
f = tvm.build(s, [A, B, D], target, name=name)
if target == "c":
f = compile(f, name)
a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros((batch, n, m), dtype=D.dtype), dev)
f(a, b, d)
tvm.testing.assert_allclose(
d.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5
)
verify("llvm")
verify("c")
def test_batch_matmul():
verify_batch_matmul(16, 16, 235, 128, 1024, cblas)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, True, False)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, False, True)
verify_batch_matmul(16, 16, 235, 128, 1024, cblas, True, True)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, True, False)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, False, True)
verify_batch_matmul(16, 16, 235, 128, 1024, mkl, True, True)
verify_batch_matmul(16, 1, 235, 128, 1024, cblas)
verify_batch_matmul(1, 16, 235, 128, 1024, cblas)
verify_batch_matmul(16, 1, 235, 128, 1024, cblas, iterative=True)
verify_batch_matmul(1, 16, 235, 128, 1024, cblas, iterative=True)
verify_batch_matmul(16, 1, 235, 128, 1024, mkl)
verify_batch_matmul(1, 16, 235, 128, 1024, mkl)
verify_batch_matmul(16, 1, 235, 128, 1024, mkl, iterative=True)
verify_batch_matmul(1, 16, 235, 128, 1024, mkl, iterative=True)
verify_batch_matmul(1, 1, 1, 16, 3, cblas)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, False)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, False, False)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, True)
verify_batch_matmul(1, 1, 1, 16, 3, cblas, iterative=True)
verify_batch_matmul(1, 1, 1, 16, 3, mkl)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, False)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, False, False)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, True)
verify_batch_matmul(1, 1, 1, 16, 3, mkl, iterative=True)
if __name__ == "__main__":
test_matmul_add()
test_quantized_matmul_add()
test_batch_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_clml/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for CLML"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_clml/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
import pytest
from test_clml.infrastructure import Device
@pytest.fixture(scope="session")
def device():
return Device()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_clml/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
# from tvm.contrib.debugger import debug_runtime as graph_executor
from tvm.contrib import graph_executor
from tvm.relay.op.contrib import clml
from tvm.contrib import utils
from tvm import autotvm
from tvm.autotvm.measure import request_remote
from tvm.relay.expr_functor import ExprMutator, Call
class Device:
"""
Configuration for CLML tests.
Check tests/python/contrib/clml/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the Arm
Compute Library runtime tests locally if the runtime is available. Changing the configuration
will allow these runtime tests to be offloaded to a remote Arm device via a tracker for example.
Notes
-----
The test configuration will be loaded once when the the class is created. If the configuration
changes between tests, any changes will not be picked up.
Parameters
----------
device : RPCSession
Allows tests to connect to and use remote device.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "tracker"
host = os.getenv("TVM_TRACKER_HOST", "localhost")
port = int(os.getenv("TVM_TRACKER_PORT", 9090))
target = "opencl"
target_host = "llvm -mtriple=aarch64-linux-gnu"
device_key = "android"
cross_compile = os.getenv("TVM_NDK_CC", "aarch64-linux-android-g++")
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
def skip_codegen_test():
"""Skip test if it requires the CLML codegen and it's not present."""
if not tvm.get_global_func("relay.ext.clml", True):
print("Skip because CLML codegen is not available.")
return True
def build_module(mod, target, target_host, params=None, enable_clml=True, tune_log=""):
"""Build module with option to build for CLML."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with autotvm.apply_history_best(tune_log):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
if enable_clml:
mod = clml.partition_for_clml(mod, params)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, target_host=target_host, params=params)
def build_and_run(
mod, inputs, outputs, params, device, enable_clml=True, no_runs=1, config=None, tune_log=""
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
libm = build_module(mod, device.target, device.target_host, params, enable_clml, tune_log)
clml_modules = extract_clml_modules(libm)
for mod in clml_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(libm, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cl(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
time_f = gen_module.module.time_evaluator("run", device.device.cl(0), number=1)
cost = time_f().mean
print("%g secs/iteration\n" % cost)
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def extract_clml_modules(module):
"""Get the CLML module(s) from llvm module."""
return list(filter(lambda mod: mod.type_key == "clml", module.get_lib().imported_modules))
def verify_codegen(
module,
known_good_codegen,
num_clml_modules=1,
tvm_ops=0,
target="llvm -mtriple=aarch64-linux-gnu",
):
"""Check clml codegen against a known good output."""
module = build_module(module, target, tvm_ops=tvm_ops, clml_partitions=num_clml_modules)
clml_modules = extract_clml_modules(module)
assert len(clml_modules) == num_clml_modules, (
f"The number of CLML modules produced ({len(clml_modules)}) does not "
f"match the expected value ({num_clml_modules})."
)
for mod in clml_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_clml/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""OpenCL ML network tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from test_clml.infrastructure import build_and_run, Device
import pytest
def _build_and_run_network(mod, params, inputs, data, device, atol, rtol, tvm_log=""):
"""Helper function to build and run a network."""
outputs = []
for clml in [True, False]:
outputs.append(
build_and_run(mod, data, 1, params, device, enable_clml=clml, tune_log=tvm_log)[0][0]
)
return outputs
def _get_keras_model(keras_model, inputs_dict, data):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def get_bottom_top_model(model, layer_name):
layer = model.get_layer(layer_name)
bottom_input = model.layers[0].input
bottom_output = layer.output
bottom_model = Model(bottom_input, bottom_output)
return bottom_model
keras_model = get_bottom_top_model(keras_model, "predictions")
ref_output = keras_model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(keras_model, inputs, layout="NCHW")
return mod, params, ref_output
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_mobilenet(device, dtype):
def get_model():
from tensorflow.keras.applications import MobileNet
import tensorflow as tf
tf.keras.backend.clear_session()
mobilenet = MobileNet(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
# test
print("OpenCL:", outputs[0].asnumpy().shape)
print("CLML:", outputs[1].asnumpy().shape)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_inception_v3(device, dtype):
def get_model():
from tensorflow.keras.applications import InceptionV3
import tensorflow as tf
tf.keras.backend.clear_session()
inceptionV3 = InceptionV3(
include_top=True, weights=None, input_shape=(299, 299, 3), classes=1000
)
inputs = {inceptionV3.input_names[0]: ((1, 3, 299, 299), "float16")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -2, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(inceptionV3, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:5], clml_sort[:5], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_resnet50v2(device, dtype):
def get_model():
from tensorflow.keras.applications import ResNet50V2
import tensorflow as tf
tf.keras.backend.clear_session()
model = ResNet50V2(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)
inputs_dict = {model.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs_dict.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[model.input_names[0]] = shape
ref_outputs = model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
# test
print("OpenCL:", outputs[0].asnumpy().shape)
print("CLML:", outputs[1].asnumpy().shape)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_clml/test_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CLML integration conv2d tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.ir import IRModule
from tvm.contrib import utils
from test_clml.infrastructure import build_and_run, Device, skip_codegen_test
import pytest
def _get_conv_model(
shape,
kernel_h,
kernel_w,
padding,
strides,
dilation,
groups,
dtype,
channels,
var,
has_bias=False,
has_activation=False,
has_pad=False,
):
"""Return a model and any parameters it may have"""
a = relay.var(next(iter(var)), shape=shape, dtype=dtype)
input_arr = var[next(iter(var))]
if has_pad:
p = ((0, 0), (padding[0], padding[0]), (padding[1], padding[1]), (0, 0))
a = relay.nn.pad(a, pad_width=p)
padding = (0, 0, 0, 0)
else:
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
shape = (shape[0], shape[1], shape[2] + padding[0] * 2, shape[3] + padding[1] * 2)
is_depthwise = shape[1] == channels == groups
weight_format = "OIHW" if is_depthwise else "OIHW"
if weight_format == "IOHW":
weight_shape = (shape[1] // groups, channels, kernel_h, kernel_w)
else:
weight_shape = (channels, shape[1] // groups, kernel_h, kernel_w)
w = tvm.nd.array(np.random.uniform(-1, 1, weight_shape).astype(dtype))
weights = relay.const(w, dtype)
out = relay.nn.conv2d(
a,
weights,
kernel_size=(kernel_h, kernel_w),
data_layout="NCHW",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype=dtype,
)
params = {"w": w}
if has_bias:
bias_shape = weight_shape[2] if is_depthwise else weight_shape[0]
b = tvm.nd.array(np.random.uniform(-1, 1, bias_shape).astype(dtype))
biasc = relay.const(b, dtype)
out = relay.nn.bias_add(out, biasc, axis=1)
params["b"] = b
if has_activation:
out = relay.nn.relu(out)
print("Out:", out)
return out, params
@pytest.mark.parametrize("dtype", ["float32"])
@tvm.testing.requires_openclml
def test_conv2d(device, dtype):
trials = [
# Normal convolution
[3, 3, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, False, False)],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (15, 16, 12), (False, False, True)],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, False)],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, True)],
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, False, False)],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (16, 12, 15), (False, False, True)],
[3, 3, (2, 1), (1, 1), (1, 1), 4, (14, 10, 10), (False, True, False)],
[3, 3, (1, 1), (1, 1), (1, 1), 16, (16, 12, 15), (False, False, False)],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), (False, False, False)],
[1, 3, (1, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, True)],
[2, 2, (2, 2), (1, 1), (1, 1), 4, (20, 20, 20), (False, True, False)],
[5, 5, (1, 1), (2, 2), (1, 1), 4, (14, 10, 10), (False, False, False)],
[3, 3, (2, 1), (1, 1), (1, 1), 7, (20, 20, 20), (False, False, False)],
[3, 3, (1, 1), (2, 2), (1, 1), 16, (14, 10, 10), (False, True, True)],
]
for (
kernel_h,
kernel_w,
pad,
stride,
dilation,
out_channels,
shape,
composite,
) in trials:
shape = (1, *shape)
groups = 1
outputs = []
inputs = {
"a": tvm.nd.array(np.random.uniform(-1, 1, shape).astype(dtype)),
}
func, params = _get_conv_model(
shape,
kernel_h,
kernel_w,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
inputs,
has_pad=composite[0],
has_bias=composite[1],
has_activation=composite[2],
)
opencl_out = build_and_run(func, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(func, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-5, atol=1e-5
)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def _test_batchnorm(device, dtype):
in_shape = (1, 8, 64, 64)
channels = 8
input_arr = tvm.nd.array(np.random.uniform(-1, 1, in_shape).astype(dtype))
inp = relay.var("a", shape=in_shape, dtype=dtype)
gamma_arr = tvm.nd.array(np.random.uniform(-1, 1, (channels)).astype(dtype))
beta_arr = tvm.nd.array(np.random.uniform(-1, 1, (channels)).astype(dtype))
gamma = relay.const(gamma_arr, dtype)
beta = relay.const(beta_arr, dtype)
mean_arr = tvm.nd.array(np.mean(input_arr.asnumpy(), axis=(0, 2, 3), keepdims=False))
mean = relay.const(mean_arr)
variance_arr = tvm.nd.array(np.var(input_arr.asnumpy(), axis=(0, 2, 3), keepdims=False))
variance = relay.const(variance_arr)
params = {}
func = relay.nn.batch_norm(inp, gamma, beta, mean, variance, axis=1, epsilon=0.0001)[0]
mod = IRModule.from_expr(func)
inputs = {
"a": input_arr,
}
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-5, atol=1e-5
)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_concat(device, dtype):
in_shape_1 = (1, 16, 16, 16)
in_shape_2 = (1, 16, 16, 16)
a = relay.var("input_1", shape=in_shape_1, dtype=dtype)
b = relay.var("input_2", shape=in_shape_2, dtype=dtype)
low, high = -1, 1
inputs = {
"input_1": tvm.nd.array(np.random.uniform(-1, 1, in_shape_1).astype(dtype)),
"input_2": tvm.nd.array(np.random.uniform(-1, 1, in_shape_2).astype(dtype)),
}
params = {}
func = relay.concatenate((a, b), axis=1)
mod = IRModule.from_expr(func)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_avgpool(device, dtype):
trials = [
# input size pool_size stride paading
[(1, 64, 147, 147), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 192, 71, 71), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 288, 35, 35), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 768, 17, 17), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 2048, 17, 17), (3, 3), (2, 2), (0, 0, 0, 0), "max"],
[(1, 192, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 256, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 288, 35, 35), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 768, 17, 17), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
[(1, 1280, 8, 8), (3, 3), (1, 1), (0, 0, 1, 1), "avg"],
]
params = {}
for (
input_shape,
pool_size,
stride,
padding,
pooling_type,
) in trials:
a = relay.var("input_1", shape=input_shape, dtype=dtype)
input_arr = tvm.nd.array(np.random.uniform(-1, 1, input_shape).astype(dtype))
inputs = {
"input_1": input_arr,
}
if pooling_type == "max":
func = relay.nn.max_pool2d(a, pool_size=pool_size, strides=stride, padding=padding)
else:
func = relay.nn.avg_pool2d(a, pool_size=pool_size, strides=stride, padding=padding)
mod = IRModule.from_expr(func)
opencl_out = build_and_run(mod, inputs, 1, params, device, enable_clml=False)[0]
clml_out = build_and_run(mod, inputs, 1, params, device, enable_clml=True)[0]
tvm.testing.assert_allclose(
clml_out[0].asnumpy(), opencl_out[0].asnumpy(), rtol=1e-3, atol=1e-3
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for CMSIS-NN"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_binary_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: binary ops"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import generate_ref_data, AOTTestModel, compile_and_run
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import (
skip_if_no_reference_system,
make_module,
make_qnn_relu,
get_range_for_dtype_str,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def generate_tensor_constant():
rng = np.random.default_rng(12321)
dtype = "int8"
shape = (1, 16, 16, 3)
values = tvm.nd.array(
rng.integers(np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=shape, dtype=dtype)
)
return relay.const(values, dtype)
def generate_scalar_constant():
dtype = "int8"
return relay.const(-30, dtype)
def generate_variable(name, dtype="int8"):
return relay.var(name, shape=(1, 16, 16, 3), dtype=dtype)
def make_model(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
relu_type="NONE",
out_scale=1.0 / 256,
out_zero_point=-128,
):
"""Create a Relay Function / network model"""
binary_op = op(
input_0,
input_1,
relay.const(input_0_scale, "float32"),
relay.const(input_0_zero_point, "int32"),
relay.const(input_1_scale, "float32"),
relay.const(input_1_zero_point, "int32"),
relay.const(out_scale, "float32"),
relay.const(out_zero_point, "int32"),
)
return make_qnn_relu(binary_op, relu_type, out_scale, out_zero_point, "int8")
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize(
[
"input_0_scale",
"input_0_zero_point",
"input_1_scale",
"input_1_zero_point",
],
[[0.256, 33, 0.256, 33], [0.0128, -64, 0.0128, -64], [0.0128, -64, 0.256, 33]],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(
op,
relu_type,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
compiler_cpu,
cpu_flags,
):
"""Tests QNN binary operator for CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
shape = [1, 16, 16, 3]
model = make_model(
op,
generate_variable("input_0"),
generate_variable("input_1"),
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
inputs = {
"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
def test_same_input_to_binary_op(op, relu_type):
"""Tests QNN binary operator for CMSIS-NN where both inputs are the same"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
shape = [1, 16, 16, 3]
input_ = generate_variable("input")
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
input_,
input_,
input_scale,
input_zero_point,
input_scale,
input_zero_point,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# Check if the number of internal function parameter is 1
cmsisnn_global_func = cmsisnn_mod["tvmgen_default_cmsis_nn_main_0"]
assert (
isinstance(cmsisnn_global_func.body, tvm.relay.expr.Call)
and len(cmsisnn_global_func.body.args) == 1
), "Composite function for the binary op should have only 1 parameter."
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
def parameterize_for_constant_inputs(test):
"""Generates parameters in such a way so that at least one of the inputs is a constant,
both can't be variables, both can't be scalars.
"""
op = [relay.qnn.op.mul, relay.qnn.op.add]
input_0 = [generate_variable("input_0"), generate_tensor_constant(), generate_scalar_constant()]
input_1 = [generate_variable("input_1"), generate_tensor_constant(), generate_scalar_constant()]
all_combinations = itertools.product(op, input_0, input_1)
all_combinations = filter(
lambda parameters: not (
(
isinstance(parameters[1], tvm.relay.expr.Var)
and isinstance(parameters[2], tvm.relay.expr.Var)
)
or (
isinstance(parameters[1], tvm.relay.expr.Constant)
and isinstance(parameters[2], tvm.relay.expr.Constant)
and parameters[1].data.numpy().ndim == 0
and parameters[2].data.numpy().ndim == 0
)
),
all_combinations,
)
return pytest.mark.parametrize(
["op", "input_0", "input_1"],
all_combinations,
)(test)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@parameterize_for_constant_inputs
def test_constant_input_int8(op, input_0, input_1):
"""Tests binary ops where one of the operands is a constant"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
shape = [1, 16, 16, 3]
input_0_scale = 0.256
input_0_zero_point = 33
input_1_scale = 0.128
input_1_zero_point = -24
model = make_model(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
inputs = {}
if isinstance(input_0, tvm.relay.expr.Var):
inputs.update({"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)})
if isinstance(input_1, tvm.relay.expr.Var):
inputs.update({"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)})
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
def test_both_scalar_inputs_int8(
op,
):
"""Tests binary ops where both operands are scalars"""
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
generate_scalar_constant(),
generate_scalar_constant(),
input_scale,
input_zero_point,
input_scale,
input_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.qnn.op.mul, relay.qnn.op.add])
@pytest.mark.parametrize(["input_dtype"], [["uint8"], ["int16"]])
def test_invalid_parameters(
op,
input_dtype,
):
"""Tests binary ops for non int8 dtypes"""
input_scale = 0.256
input_zero_point = 33
model = make_model(
op,
generate_variable("input_0", input_dtype),
generate_variable("input_1", input_dtype),
input_scale,
input_zero_point,
input_scale,
input_zero_point,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Conv2D"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
generate_ref_data,
AOTTestModel,
compile_models,
compile_and_run,
run_and_check,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_range_for_dtype_str,
get_same_padding,
get_conv2d_qnn_params,
get_kernel_bias_dtype,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
CheckForPadsWithinCompositeFunc,
)
def make_model(
shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=None,
):
"""Return a model and any parameters it may have"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
h_index = kernel_layout.index("H")
w_index = kernel_layout.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
rng = np.random.default_rng(12321)
kernel = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
size=kernel_shape,
dtype=kernel_dtype,
)
)
kernel_const = relay.const(kernel, kernel_dtype)
conv2d_kernel_sc = kernel_scale[0] if out_channels == 1 else kernel_scale
conv = relay.qnn.op.conv2d(
op,
kernel_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(conv2d_kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=kernel_layout,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype=bias_dtype,
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype=bias_dtype))
bias_const = relay.const(bias, bias_dtype)
last_op = relay.nn.bias_add(conv, bias_const, axis=3) if enable_bias else conv
requant_input_sc = [sc * input_scale for sc in kernel_scale]
requant_input_sc = requant_input_sc[0] if out_channels == 1 else requant_input_sc
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": kernel, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2)],
)
def test_conv2d_number_primfunc_args(
padding,
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests number of arguments in Conv2D primfunc"""
interface_api = "c"
use_unpacked_api = True
ifm_shape = (1, 64, 100, 4)
kernel_size = (3, 3)
strides = (1, 1)
dilation = (1, 1)
dtype = "int8"
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
relu_type = "RELU"
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# compile the model
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compiled_models = compile_models(
AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
)
# validate number of TIR primfunc args
expected_num_params = 6 if enable_bias else 5
cmsisnn_tir_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "cmsis-nn":
cmsisnn_tir_mod = mod
cmsisnn_func = cmsisnn_tir_mod["tvmgen_default_cmsis_nn_main_0"]
assert (
len(cmsisnn_func.params) == expected_num_params
), "Generated unexpected number of function arguments."
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int8", "int16"])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("relu_type", ["RELU"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_conv2d_symmetric_padding(
dtype,
padding,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
compiler_cpu,
cpu_flags,
):
"""Tests QNN Conv2D where the padding is symmetric on both sides of input"""
interface_api = "c"
use_unpacked_api = True
ifm_shape = (1, 64, 100, 4)
kernel_size = (3, 3)
strides = (1, 1)
dilation = (1, 1)
groups = 1
# input_zero_point is not handled by TFLM when int16
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
def test_conv2d_asymmetric_padding(
padding,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
groups = 1
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("ifm_shape", [(1, 25, 25, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize(
"pad_width",
[
((0, 0), (0, 1), (1, 2), (0, 0)),
((0, 0), (1, 1), (1, 1), (0, 0)),
((0, 0), (2, 2), (3, 4), (0, 0)),
],
)
def test_pad_conv2d_fusion_int8(
ifm_shape,
pad_width,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
padding = "SAME"
dtype = "int8"
enable_bias = True
relu_type = "NONE"
input_zero_point = 10
input_scale = 0.0128
kernel_scale = [0.11, 0.22]
out_channels = 2
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
invar = relay.var("input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
invar,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=input_zero_point,
pad_mode="constant",
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=pad,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod, False)
# check pad is not present inside CMSIS-NN partitioned function
cmsisnn_func = None
for var in cmsisnn_mod.get_global_vars():
if "cmsis_nn_main_0" in var.name_hint:
cmsisnn_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(cmsisnn_func)
pad_verifier.assert_no_pads_within_func()
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding",
[
[(1, 25, 25, 12), ((0, 0), (0, 2), (1, 2), (0, 0)), "SAME"],
[(1, 64, 100, 4), ((0, 0), (1, 3), (1, 1), (0, 0)), "VALID"],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 5), (0, 0)), "SAME"],
],
)
def test_invalid_pad_conv2d_fusion_int8(
ifm_shape,
pad_width,
conv2d_padding,
):
"""Tests QNN Conv2D where the padding is asymmetric on different sides of input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
ifm_shape = (1, 25, 25, 12)
kernel_size = (5, 5)
strides = (2, 2)
dilation = (1, 1)
dtype = "int8"
enable_bias = True
relu_type = "NONE"
input_zero_point = 10
input_scale = 0.0128
kernel_scale = [0.11, 0.22]
out_channels = 2
groups = 1
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
)
invar = relay.var("input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
invar,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=input_zero_point,
pad_mode="constant",
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
conv2d_padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
input_op=pad,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# check pad is only present inside main function
cmsisnn_func = None
for var in cmsisnn_mod.get_global_vars():
if "cmsis_nn_main_0" in var.name_hint:
cmsisnn_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(cmsisnn_func)
pad_verifier.assert_no_pads_within_func()
else:
main_func = cmsisnn_mod[var]
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(main_func)
pad_verifier.assert_pads_within_func()
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
# pylint: disable=import-outside-toplevel
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_conv2d_int8_tflite(ifm_shape, kernel_shape, strides, dilation, padding, activation):
"""Compares TVM output against TFLite output"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
from tvm.relay.testing.tflite import TFLiteModel
tfl_model = TFLiteModel(dtype)
conv2d_function = tfl_model.create_conv2d_single(
kernel_shape, strides, padding, dilation, activation
)
tfl_model.create_tflite_model(conv2d_function, [ifm_shape])
relay_mod, relay_params = tfl_model.convert_to_relay()
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(relay_mod, relay_params)
# validate pattern matching
assert_partitioned_function(relay_mod, cmsisnn_mod)
# validate CMSIS-NN output against TFLite output
input_map, output_map, output_tolerance = tfl_model.generate_reference_data()
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=input_map,
outputs=output_map,
params=relay_params,
output_tolerance=output_tolerance,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("dtype", ["int8", "int16"])
@pytest.mark.parametrize("ifm_shape", [(1, 28, 28, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize("kernel_size", [(3, 3)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (1, 1))])
@pytest.mark.parametrize("relu_type", ["RELU"])
@pytest.mark.parametrize(
"depth_multiplier, enable_bias",
[(1, True), (3, True)],
)
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_depthwise(
dtype,
ifm_shape,
kernel_size,
padding,
strides,
dilation,
enable_bias,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
depth_multiplier,
compiler_cpu,
cpu_flags,
):
"""Tests QNN Depthwise int8 op via CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
groups = 1
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
groups = ifm_shape[3]
kernel_layout = "HWOI"
kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
out_channels = ifm_shape[3] * depth_multiplier
ks_len = len(kernel_scale)
kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
is_depthwise=True,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (1, 1))])
@pytest.mark.parametrize("relu_type", ["RELU", "NONE"])
@pytest.mark.parametrize("depth_multiplier", [1, 3])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale",
[
(
10,
0.0128,
[0.11, 0.22],
),
(
-64,
1,
[1, 0.0256, 1.37],
),
],
)
def test_relay_conv2d_cmsisnn_depthwise_int8(
padding,
strides,
dilation,
relu_type,
input_zero_point,
input_scale,
kernel_scale,
depth_multiplier,
):
"""Tests QNN Depthwise int8 op via CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
ifm_shape = (1, 24, 24, 1)
groups = ifm_shape[3]
input_zero_point = input_zero_point if dtype == "int8" else 0
kernel_layout = "HWIO"
(kernel_h, kernel_w) = (3, 3)
kernel_shape = (kernel_h, kernel_w, ifm_shape[3], depth_multiplier)
out_channels = ifm_shape[3] * depth_multiplier
enable_bias = True
ks_len = len(kernel_scale)
kernel_zero_point = 0
kernel_scale = [kernel_scale[i % ks_len] for i in range(out_channels)]
kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
input_dtype=dtype,
kernel_dtype=kernel_dtype,
output_dtype=dtype,
is_depthwise=True,
)
model, params = make_model(
ifm_shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
bias_dtype,
out_channels,
kernel_layout,
enable_bias,
relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# generate reference output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=ifm_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
# validate presence of depthwise convolution
compiled_models = compile_models(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
interface_api,
use_unpacked_api,
pass_config=test_runner.pass_config,
)
cmsisnn_tir_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "cmsis-nn":
cmsisnn_tir_mod = mod
cmsisnn_func = cmsisnn_tir_mod["tvmgen_default_cmsis_nn_main_0"]
call_extern = None
# This happens when context buffer is init in case depthM != 1
if isinstance(cmsisnn_func.body, tvm.tir.stmt.Evaluate):
call_extern = cmsisnn_func.body.value
else:
call_extern = cmsisnn_func.body.body.value
assert (
call_extern.args[0].value == "arm_depthwise_conv_wrapper_s8"
), "Relay Conv2D should be mapped to CMSIS-NN Depthwise Convolution."
# validate the output
run_and_check(
models=compiled_models,
runner=test_runner,
interface_api=interface_api,
)
def parameterize_for_invalid_model(test):
"""Generates non-int8 non-int16 inputs"""
in_dtype = ["uint8", "int8", "int16"]
kernel_dtype = ["uint8", "int8"]
kernel_zero_point = [-33, 10, 0]
input_zero_point = [64, 0]
all_combinations = itertools.product(
in_dtype, kernel_dtype, kernel_zero_point, input_zero_point
)
all_combinations = filter(
lambda parameters: not (
(parameters[0] == "int8" or (parameters[0] == "int16" and parameters[3] == 0))
and parameters[1] == "int8"
and parameters[2] == 0
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "kernel_dtype", "kernel_zero_point", "input_zero_point"],
all_combinations,
)(test)
@tvm.testing.requires_cmsisnn
@parameterize_for_invalid_model
def test_invalid_parameters(
in_dtype,
kernel_dtype,
kernel_zero_point,
input_zero_point,
):
"""Tests Depthwise op for non int8 inputs"""
ifm_shape = (1, 28, 28, 12)
out_channels = 2
input_scale = 1
kernel_scale = [0.11, 0.0237]
kernel_layout = "HWIO"
kernel_shape = [3, 3, ifm_shape[3], out_channels]
_, bias_dtype = get_kernel_bias_dtype(in_dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
in_dtype,
kernel_dtype,
in_dtype,
is_depthwise=False,
)
model, params = make_model(
shape=ifm_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
input_scale=input_scale,
kernel_zero_point=kernel_zero_point,
kernel_scale=kernel_scale,
output_zero_point=output_zero_point,
output_scale=output_scale,
padding="SAME",
strides=(1, 1),
dilation=(1, 1),
groups=1,
dtype=in_dtype,
kernel_dtype=kernel_dtype,
bias_dtype=bias_dtype,
out_channels=out_channels,
kernel_layout=kernel_layout,
enable_bias=True,
relu_type="NONE",
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_extract_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: extract_constants pass"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
"""Provides methods to test number of constants present in a function"""
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def check_num_constants(self):
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@tvm.testing.requires_cmsisnn
def test_external_function():
"""Tests the pass ExternConstants when the function is a global function"""
input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input0 = relay.var("input0", shape=(8, 8))
input1_const = relay.const(input1_data, "float32")
binary_op = input0 + input1_const
extern_func = relay.Function([input0], binary_op, relay.TensorType((8, 8), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
arg = relay.var("arg", shape=(8, 8))
call_extern_func = relay.Call(global_var, [arg])
main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_nested_function():
"""Tests the pass ExternConstants when a composite function
is present within global function
"""
input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input0 = relay.var("input0", shape=(8, 8))
input1_const = relay.const(input1_data, "float32")
binary_op0 = input0 + input1_const
binary_op1 = binary_op0 * relay.const(5.0, "float32")
local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32"))
local_func = set_composite_func_attr(local_func, "cmsis-nn")
arg = relay.var("arg", shape=(8, 8))
call_local_func = relay.Call(local_func, [arg])
extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32"))
global_arg = relay.var("garg", shape=(8, 8))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [global_arg])
main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_internal_function_with_duplicate_arguments():
"""Tests the pass ExternConstants when a composite function
is present within global function with repeating arguments
to one of the binary ops.
"""
input0 = relay.var("input0", shape=(8, 8))
binary_op0 = input0 + input0
binary_op1 = binary_op0 * relay.const(5.0, "float32")
local_func = relay.Function([input0], binary_op1, relay.TensorType((8, 8), "float32"))
local_func = set_composite_func_attr(local_func, "cmsis-nn")
arg = relay.var("arg", shape=(8, 8))
call_local_func = relay.Call(local_func, [arg])
extern_func = relay.Function([arg], call_local_func, relay.TensorType((8, 8), "float32"))
global_arg = relay.var("global_var", shape=(8, 8))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [global_arg])
main_func = relay.Function([global_arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_multiple_functions():
"""Tests the pass ExternConstants when global function
contains multiple composite functions inside it
"""
f0_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
f0_input0 = relay.var("f0_in0", shape=(8, 8))
f0_input1_const = relay.const(f0_input1_data, "float32")
f0_binary_op = f0_input0 + f0_input1_const
f0_func = relay.Function([f0_input0], f0_binary_op, relay.TensorType((8, 8), "float32"))
f0_func = set_composite_func_attr(f0_func, "cmsis-nn")
f1_input1_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
f1_input0 = relay.var("f1_in0", shape=(8, 8))
f1_input1_const = relay.const(f1_input1_data, "float32")
f1_binary_op = f1_input0 + f1_input1_const
f1_func = relay.Function([f1_input0], f1_binary_op, relay.TensorType((8, 8), "float32"))
f1_func = set_composite_func_attr(f1_func, "cmsis-nn")
arg0 = relay.var("arg0", shape=(8, 8))
call_local_func0 = relay.Call(f0_func, [arg0])
call_local_func1 = relay.Call(f1_func, [call_local_func0])
extern_func = relay.Function([arg0], call_local_func1, relay.TensorType((8, 8), "float32"))
input0 = relay.var("input0", shape=(8, 8))
global_var = relay.GlobalVar("cmsis-nn")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [input0])
main_func = relay.Function([input0], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
constant_verifier = CheckFunctionsForConstants()
constant_verifier.visit_function(mod[global_var])
constant_verifier.check_num_constants()
relay.transform.InferType()(mod)
@tvm.testing.requires_cmsisnn
def test_main_function():
"""Tests the pass ExternConstants on main function"""
input0 = relay.var("input0", shape=(8, 8))
input1 = relay.var("input1", shape=(8, 8))
binary_op = input0 + input1
extern_func = relay.Function([input0, input1], binary_op, relay.TensorType((8, 8), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
arg = relay.var("arg", shape=(8, 8))
input_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
input_const = relay.const(input_data, "float32")
binary_op = arg + input_const
call_extern_func = relay.Call(global_var, [arg, binary_op])
main_func = relay.Function([arg], call_extern_func, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[main_var].body)
assert (
check_for_constants.num_constants_ == 1
), "main() should have same number of arguments as before"
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("external_compiler", ["cmsis-nn", "other_compiler"])
def test_multiple_functions_non_cmsisnn_compiler(external_compiler):
"""Tests the pass ExternConstants on non CMSIS-NN targets"""
y20_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x20 = relay.var("x20", shape=(8, 8))
y20_const = relay.const(y20_data, "float32")
z20 = x20 + y20_const
f20 = relay.Function([x20], z20, relay.TensorType((8, 8), "float32"))
f20 = set_composite_func_attr(f20, "cmsis-nn.qnn_op_1")
x10 = relay.var("x10", shape=(8, 8))
call_local_func0 = relay.Call(f20, [x10])
extern_func0 = relay.Function([x10], call_local_func0, relay.TensorType((8, 8), "float32"))
y21_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x21 = relay.var("x21", shape=(8, 8))
y21_const = relay.const(y21_data, "float32")
z21 = x21 + y21_const
f21 = relay.Function([x21], z21, relay.TensorType((8, 8), "float32"))
f21 = set_composite_func_attr(f21, "cmsis-nn.qnn_op_2")
x11 = relay.var("x11", shape=(8, 8))
call_local_func1 = relay.Call(f21, [x11])
extern_func1 = relay.Function([x11], call_local_func1, relay.TensorType((8, 8), "float32"))
input0 = relay.var("input0", shape=(8, 8))
global_var0 = relay.GlobalVar("external_function_0")
extern_func0 = set_external_func_attr(extern_func0, external_compiler, global_var0.name_hint)
call_extern_func0 = relay.Call(global_var0, [input0])
global_var1 = relay.GlobalVar("external_function_1")
extern_func1 = set_external_func_attr(extern_func1, external_compiler, global_var1.name_hint)
call_extern_func1 = relay.Call(global_var1, [call_extern_func0])
main_func = relay.Function([input0], call_extern_func1, relay.TensorType((8, 8), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var0] = extern_func0
mod[global_var1] = extern_func1
mod[main_var] = main_func
mod = ExtractConstantsFromPartitionedFunction()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[main_var].body)
num_extracted_constants = 0
if external_compiler == "cmsis-nn":
num_extracted_constants = 2
assert (
check_for_constants.num_constants_ == num_extracted_constants
), "main() should have same number of arguments as before"
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_fully_connected.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Fully Connected"""
import itertools
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import generate_ref_data, AOTTestModel, compile_and_run
from .utils import (
make_module,
get_range_for_dtype_str,
get_conv2d_qnn_params,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def make_model(
in_shape, # [batchsize, in_channels]
kernel_shape, # [out_channels, num_inputs]
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
output_zero_point,
output_scale,
dtype,
kernel_dtype,
out_channels,
enable_bias,
relu_type="NONE",
):
"""Return a model and any parameters it may have"""
input_ = relay.var("input", shape=in_shape, dtype=dtype)
rng = np.random.default_rng(12321)
weight = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
size=kernel_shape,
dtype=kernel_dtype,
)
)
weight_const = relay.const(weight, kernel_dtype)
dense = relay.qnn.op.dense(
input_,
weight_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
units=out_channels,
out_dtype="int32",
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype="int32"))
bias_const = relay.const(bias, "int32")
last_op = relay.nn.bias_add(dense, bias_const) if enable_bias else dense
requant_input_sc = input_scale * kernel_scale
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": weight, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("in_shape", [(2, 28), (1, 64)])
@pytest.mark.parametrize("out_channels", [12, 128])
@pytest.mark.parametrize("enable_bias", [False, True])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale",
[(10, 0.0128, 0.11), (-64, 0.0256, 1.37)],
)
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(
in_shape,
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
compiler_cpu,
cpu_flags,
):
"""Test QNN fully connected layer"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
kernel_zero_point = 0
kernel_shape = [out_channels, in_shape[1]]
conv2d_kernel_shape = (1, 1, kernel_shape[0], kernel_shape[1])
in_min, in_max = get_range_for_dtype_str(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
conv2d_kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
dtype,
)
model, params = make_model(
in_shape,
kernel_shape,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
output_zero_point,
output_scale,
dtype,
dtype,
out_channels,
enable_bias,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
rng = np.random.default_rng(12345)
inputs = {"input": rng.integers(in_min, high=in_max, size=in_shape, dtype=dtype)}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
def parameterize_for_invalid_model(test):
"""Generates parameters for non int8 inputs to fully connected layer"""
in_dtype = ["uint8", "int8"]
kernel_dtype = ["uint8", "int8"]
kernel_zero_point = [-33, 10, 0]
all_combinations = itertools.product(in_dtype, kernel_dtype, kernel_zero_point)
all_combinations = filter(
lambda parameters: not (
parameters[0] == "int8" and parameters[1] == "int8" and parameters[2] == 0
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "kernel_dtype", "kernel_zero_point"],
all_combinations,
)(test)
@tvm.testing.requires_cmsisnn
@parameterize_for_invalid_model
def test_invalid_parameters(
in_dtype,
kernel_dtype,
kernel_zero_point,
):
"""Tests fully connected layer with non int8 inputs"""
in_shape = (2, 28)
out_channels = 2
input_scale = 1
input_zero_point = 24
kernel_scale = [0.11, 0.0237]
kernel_shape = [out_channels, in_shape[1]]
conv2d_kernel_shape = [1, 1, kernel_shape[0], kernel_shape[1]]
output_scale, output_zero_point = get_conv2d_qnn_params(
conv2d_kernel_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
in_dtype,
kernel_dtype,
in_dtype,
)
model, params = make_model(
in_shape=in_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=input_scale,
kernel_scale=kernel_scale,
output_zero_point=output_zero_point,
output_scale=output_scale,
dtype=in_dtype,
kernel_dtype=kernel_dtype,
out_channels=out_channels,
enable_bias=True,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate pattern matching
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_fuse_pads.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: fuse_pads pass"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from .utils import CheckForPadsWithinCompositeFunc
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 2), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 3), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 5), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_invalid_padding_for_fusion(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Negative tests for pads preceding Conv2D that cannot be fused."""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
error_regex = r"Difference on each side of a dimension should be either 0 or 1"
with pytest.raises(tvm.TVMError, match=error_regex):
mod = CMSISNNFusePads()(mod)
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 1), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 1), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 2), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_pad_conv2d_fusion_noncmsisnn_target(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Tests the pads and conv2d fusion for non-cmsisnn targets.
It is expected that pad will not be fused with Conv2D in this case.
"""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "noncmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "noncmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_pads_within_func()
@pytest.mark.parametrize(
"ifm_shape, pad_width, conv2d_padding, ofm_shape",
[
[(1, 25, 25, 12), ((0, 0), (0, 1), (1, 2), (0, 0)), (1, 1, 1, 1), (1, 26, 28, 2)],
[(1, 64, 100, 4), ((0, 0), (1, 1), (1, 1), (0, 0)), (0, 0, 0, 0), (1, 64, 100, 2)],
[(1, 55, 55, 3), ((0, 0), (2, 1), (3, 2), (0, 0)), (0, 0, 1, 1), (1, 57, 59, 2)],
],
)
def test_pad_conv2d_fusion(ifm_shape, pad_width, conv2d_padding, ofm_shape):
"""Tests the pads and conv2d fusion."""
dtype = "int8"
kernel_size = (3, 3)
ofm_channels = 2
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
pad = relay.nn.pad(
local_input,
pad_width=pad_width, # ((), (top, bottom), (left, right), ())
pad_value=10,
pad_mode="constant",
)
rng = np.random.default_rng(12321)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
pad,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=ofm_channels,
kernel_size=(3, 3),
padding=conv2d_padding,
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
local_func = relay.Function(relay.analysis.free_vars(requantize), requantize)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_no_pads_within_func()
def test_without_preceding_pad():
"""Tests the pass FusePads when padding is not present before qnn.conv2d."""
dtype = "int8"
ifm_shape = (1, 56, 56, 64)
ofm_shape = (1, 56, 56, 64)
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
rng = np.random.default_rng(12321)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
size=(64, 3, 3, 64),
dtype=dtype,
)
)
local_weight = relay.const(local_weight, dtype)
conv2d = relay.qnn.op.conv2d(
local_input,
local_weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
data_layout="NHWC",
kernel_layout="OHWI",
channels=64,
kernel_size=(3, 3),
padding=(1, 1, 1, 1),
out_dtype="int32",
)
requantize = relay.qnn.op.requantize(
conv2d,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=0,
out_dtype=dtype,
)
relu = relay.nn.relu(requantize)
local_func = relay.Function(relay.analysis.free_vars(relu), relu)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_conv2d")
mod = tvm.IRModule()
ext_input = relay.var("ext_input", shape=ifm_shape, dtype=dtype)
call_local_func = relay.Call(local_func, [ext_input])
extern_func = relay.Function(relay.analysis.free_vars(call_local_func), call_local_func)
extern_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", extern_var.name_hint)
mod[extern_var] = extern_func
main_input = relay.var("main_input", shape=ifm_shape, dtype=dtype)
call_extern_func = relay.Call(extern_var, [main_input])
main_func = relay.Function(relay.analysis.free_vars(call_extern_func), call_extern_func)
main_func = relay.Function([main_input], call_extern_func, relay.TensorType(ofm_shape, dtype))
main_var = relay.GlobalVar("main")
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = CMSISNNFusePads()(mod)
pad_verifier = CheckForPadsWithinCompositeFunc()
pad_verifier.visit_function(mod[extern_var])
pad_verifier.assert_no_pads_within_func()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_generate_constants.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: generate_constants pass"""
import math
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from .utils import (
make_module,
get_same_padding,
get_conv2d_qnn_params,
make_qnn_relu,
)
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def quantize_scale(scale):
multiplier, shift = math.frexp(scale)
multiplier_q31 = round(multiplier * (1 << 31))
return multiplier_q31, shift
class CheckGeneratedConstants(tvm.relay.ExprVisitor):
"""Provides methods to compare against expected quantization parameters"""
def __init__(self, enable_bias, multiplier, shift):
super().__init__()
self.num_constant_args_ = 0
self.enable_bias_ = enable_bias
self.multiplier_ = multiplier
self.shift_ = shift
def visit_call(self, call):
"""Tests if the multiplier and shift constants required by CMSIS-NN API were generated"""
super().visit_call(call)
if isinstance(call.op, tvm.ir.expr.GlobalVar):
multiplier = call.args[2]
shift = call.args[6] if self.enable_bias_ else call.args[5]
assert isinstance(
multiplier, relay.expr.Constant
), "Expected quantized multiplier at argument#3"
assert isinstance(
shift, relay.expr.Constant
), "Expected a constant while looking for quantized shift"
multiplier = multiplier.data.numpy()
shift = shift.data.numpy()
tvm.testing.assert_allclose(multiplier, self.multiplier_, atol=100, rtol=1e-10)
tvm.testing.assert_allclose(shift, self.shift_, atol=1, rtol=1e-5)
def make_model(
shape,
kernel_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
kernel_dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
):
"""Return a model and any parameters it may have"""
h_index = weight_format.index("H")
w_index = weight_format.index("W")
kernel_h = kernel_shape[h_index]
kernel_w = kernel_shape[w_index]
a = relay.var("input", shape=shape, dtype=dtype)
p = (0, 0, 0, 0)
if padding == "SAME":
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zero_point,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
rng = np.random.default_rng(12321)
weight = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
size=weight_shape,
dtype=kernel_dtype,
)
)
weight_const = relay.const(weight, kernel_dtype)
conv = relay.qnn.op.conv2d(
a,
weight_const,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p,
out_dtype="int32",
)
bias = tvm.nd.array(rng.integers(0, high=10, size=(out_channels,), dtype="int32"))
bias_const = relay.const(bias, "int32")
last_op = relay.nn.bias_add(conv, bias_const, axis=3) if enable_bias else conv
requant_input_sc = [sc * input_scale for sc in kernel_scale]
last_op = relay.qnn.op.requantize(
last_op,
relay.const(requant_input_sc, "float32"),
relay.const(0, "int32"),
relay.const(output_scale, "float32"),
relay.const(output_zero_point, "int32"),
out_dtype=dtype,
)
last_op = make_qnn_relu(last_op, relu_type, output_scale, output_zero_point, dtype)
params = {"w": weight, "b": bias}
return last_op, params
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("enable_bias", [True, False])
@pytest.mark.parametrize(
"input_zero_point, input_scale, kernel_scale, out_channels",
[(10, 0.0128, [0.11, 0.22], 2), (-64, 1, [1, 0.0256, 1.37], 3)],
)
def test_op_int8(
enable_bias,
input_zero_point,
input_scale,
kernel_scale,
out_channels,
):
"""Tests for CMSIS-NN constants when the dtype is int8"""
ifm_shape = (1, 28, 28, 3)
padding = "VALID"
strides = (1, 1)
dilation = (1, 1)
kernel_size = (3, 3)
kernel_zero_point = 0
groups = 1
weight_format = "HWIO"
kernel_h = kernel_size[0]
kernel_w = kernel_size[1]
dtype = "int8"
relu_type = "RELU"
weight_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
output_scale, output_zero_point = get_conv2d_qnn_params(
weight_shape,
input_scale,
input_zero_point,
kernel_scale,
kernel_zero_point,
dtype,
dtype,
dtype,
False,
)
model, params = make_model(
ifm_shape,
weight_shape,
input_zero_point,
input_scale,
kernel_zero_point,
kernel_scale,
output_zero_point,
output_scale,
padding,
strides,
dilation,
groups,
dtype,
dtype,
out_channels,
weight_format,
enable_bias,
relu_type,
)
mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(mod, params)
multiplier_array = []
shift_array = []
for i in range(out_channels):
multiplier, shift = quantize_scale(input_scale * kernel_scale[i] / output_scale)
multiplier_array.append(multiplier)
shift_array.append(shift)
CheckGeneratedConstants(enable_bias, multiplier_array, shift_array).visit_function(
cmsisnn_mod["main"]
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_invalid_graphs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Tests invalid graphs"""
import numpy as np
import tvm
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import (
skip_if_no_reference_system,
get_range_for_dtype_str,
)
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
def test_empty_function():
"""Test partitioned function without composite function"""
original_model = """
#[version = "0.0.5"]
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
add(%data, %data)
}
"""
cmsisnn_model = """
#[version = "0.0.5"]
def @tvmgen_default_cmsis_nn_main_1(%i1: Tensor[(16, 29), int8], Inline=1, Compiler="cmsis-nn", global_symbol="tvmgen_default_cmsis_nn_main_1", Primitive=1) -> Tensor[(16, 29), int8] {
add(%i1, %i1)
}
def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
%1 = @tvmgen_default_cmsis_nn_main_1(%data) /* ty=Tensor[(16, 29), int8] */;
%1
}
"""
orig_mod = tvm.parser.fromtext(original_model)
cmsisnn_mod = tvm.parser.fromtext(cmsisnn_model)
params = {}
# validate the output
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
rng = np.random.default_rng(12345)
inputs = {"data": rng.integers(in_min, high=in_max, size=(16, 29), dtype=dtype)}
outputs = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=outputs,
params=params,
output_tolerance=0,
),
test_runner,
interface_api,
use_unpacked_api,
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN: testing with networks"""
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import skip_if_no_reference_system, get_range_for_dtype_str
# pylint: disable=import-outside-toplevel
def _convert_to_relay(
tflite_model_buf,
input_data,
input_node,
):
"""Converts TFLite model to Relay module and params"""
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, name in enumerate(input_node):
shape_dict[name] = input_data[i].shape
dtype_dict[name] = input_data[i].dtype.name
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict
)
return mod, params
@skip_if_no_reference_system
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("test_runner", [AOT_CORSTONE300_RUNNER, AOT_USMP_CORSTONE300_RUNNER])
def test_cnn_small(test_runner):
"""Download a small network and tests TVM via CMSIS-NN output against TFLite output"""
# download the model
base_url = (
"https://github.com/ARM-software/ML-zoo/raw/"
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
with open(model_file, "rb") as f:
tflite_model_buf = f.read()
input_shape = (1, 490)
dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
rng = np.random.default_rng(12345)
input_data = rng.integers(in_min, high=in_max, size=input_shape, dtype=dtype)
orig_mod, params = _convert_to_relay(tflite_model_buf, input_data, "input")
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod, params)
# validate CMSIS-NN output against CPU output
interface_api = "c"
use_unpacked_api = True
inputs = {"input": input_data}
params = {}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=params,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Pooling"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
generate_ref_data,
AOTTestModel,
compile_and_run,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_range_for_dtype_str,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def make_model(
pool_op,
shape=(1, 28, 28, 12),
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
dtype="int8",
scale=1,
zero_point=-33,
relu_type="RELU",
layout="NHWC",
input_op=None,
):
"""Return a model and any parameters it may have,
all parameters are defaulted to known good values
"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
pad_ = (0, 0, 0, 0)
if padding == "SAME":
dilation = (1, 1)
pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)
op = relay.nn.pad(
op,
pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
pad_value=zero_point,
pad_mode="constant",
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, "int32")
op = pool_op(
op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=layout
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, dtype)
op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
return op
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("in_shape", [(1, 28, 28, 12), (1, 64, 100, 4)])
@pytest.mark.parametrize(
"pool_size, strides, padding", [((3, 3), (2, 2), "SAME"), ((2, 2), (1, 1), "VALID")]
)
@pytest.mark.parametrize("relu_type", ["NONE", "RELU"])
@pytest.mark.parametrize("pool_type", [relay.nn.max_pool2d, relay.nn.avg_pool2d])
@pytest.mark.parametrize("zero_point, scale", [(-34, 0.0256)])
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(
in_shape,
pool_size,
strides,
padding,
relu_type,
pool_type,
zero_point,
scale,
compiler_cpu,
cpu_flags,
):
"""Tests QNN pooling op for int8 inputs"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
model = make_model(
pool_op=pool_type,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
np.random.seed(0)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=in_shape, dtype="int8"),
}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(
"pool_size, strides, padding", [((3, 3), (2, 2), "SAME"), ((2, 2), (1, 1), "VALID")]
)
@pytest.mark.parametrize("relu_type", ["NONE", "RELU"])
def test_int8_pool_with_float32_input(
pool_size,
strides,
padding,
relu_type,
):
"""Tests QNN maxpool partitions with float32 input"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
in_shape = (1, 28, 28, 12)
zero_point, scale = (-34, 0.0256)
input_ = relay.var("input", shape=in_shape, dtype="float32")
op = relay.op.add(input_, input_)
op = relay.qnn.op.quantize(op, relay.const(scale), relay.const(zero_point), -1, "int8")
model = make_model(
pool_op=relay.nn.max_pool2d,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
input_op=op,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
np.random.seed(0)
inputs = {"input": np.random.uniform(0, 1, in_shape).astype("float32")}
output_list = generate_ref_data(orig_mod["main"], inputs)
compile_and_run(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
test_runner,
interface_api,
use_unpacked_api,
)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_datatype(op):
"""Checks CMSIS-NN partitioning for non int8 dtype"""
model = make_model(pool_op=op, dtype="int64")
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_batch_size(op):
"""Checks CMSIS-NN partitioning when batch size is not 1"""
model = make_model(
pool_op=op,
shape=(2, 28, 28, 12),
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("op", [relay.nn.avg_pool2d, relay.nn.max_pool2d])
def test_invalid_layout(op):
"""Checks CMSIS-NN partitioning when layout is not NHWC"""
model = make_model(pool_op=op, layout="NCHW")
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_remove_reshapes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Reshape removal"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import (
generate_ref_data,
AOTTestModel,
compile_models,
run_and_check,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_range_for_dtype_str,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
)
def make_model(
pool_op,
shape=(1, 28, 28, 12),
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
dtype="int8",
scale=1,
zero_point=-33,
relu_type="RELU",
layout="NHWC",
input_op=None,
):
"""Return a model and any parameters it may have,
all parameters are defaulted to known good values
"""
if input_op:
op = input_op
else:
op = relay.var("input", shape=shape, dtype=dtype)
pad_ = (0, 0, 0, 0)
if padding == "SAME":
dilation = (1, 1)
pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation, strides)
op = relay.nn.pad(
op,
pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
pad_value=zero_point,
pad_mode="constant",
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, "int32")
op = pool_op(
op, pool_size=pool_size, strides=strides, padding=pad_, ceil_mode=True, layout=layout
)
if pool_op.__name__ == relay.nn.avg_pool2d.__name__:
op = relay.cast(op, dtype)
op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
return op
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
def test_reshape_removal(padding):
"""Tests reshape is removed from the network"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
in_shape = (1, 28, 28, 12)
pool_size = (3, 3)
strides = (2, 2)
relu_type = "NONE"
zero_point, scale = (-34, 0.0256)
max_pool = make_model(
pool_op=relay.nn.max_pool2d,
shape=in_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
)
new_shape = (1, 28, 28, 3) if padding == "VALID" else (1, 30, 30, 3)
reshape = relay.reshape(max_pool, newshape=new_shape)
model = make_model(
pool_op=relay.nn.avg_pool2d,
shape=new_shape,
pool_size=pool_size,
strides=strides,
padding=padding,
scale=scale,
zero_point=zero_point,
relu_type=relu_type,
input_op=reshape,
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# generate reference output
rng = np.random.default_rng(12345)
in_min, in_max = get_range_for_dtype_str("int8")
inputs = {"input": rng.integers(in_min, high=in_max, size=in_shape, dtype="int8")}
output_list = generate_ref_data(orig_mod["main"], inputs, params=None)
# validate presence of depthwise convolution
compiled_models = compile_models(
AOTTestModel(
module=cmsisnn_mod,
inputs=inputs,
outputs=output_list,
params=None,
output_tolerance=1,
),
interface_api,
use_unpacked_api,
pass_config=test_runner.pass_config,
)
main_mod = None
for target, mod in compiled_models[0].executor_factory.lowered_ir_mods.items():
if target.kind.name == "c":
main_mod = mod
# when padding="SAME", extra padding is introduced which causes Reshape to be fused with the
# Pad. RemoveReshapes pass cannot remove a fused Reshape. Whereas padding="VALID" doesn't need
# an extra Pad layer. In this case, the pass removes the Reshape from the graph.
reshapes_present = any(["reshape" in gv.name_hint for gv in main_mod.get_global_vars()])
check_reshapes = reshapes_present if padding == "SAME" else not reshapes_present
expected_reshapes = "a" if padding == "SAME" else "No"
assert check_reshapes, "Expeting {} reshape layer(s).".format(expected_reshapes)
# validate the output
run_and_check(
models=compiled_models,
runner=test_runner,
interface_api=interface_api,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_scalar_to_tensor_constant.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: scalar_to_tensor_constant pass"""
import numpy as np
import tvm
import tvm.testing
from tvm import relay
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def generate_variable(name, shape, dtype="int8"):
return relay.var(name, shape=shape, dtype=dtype)
def make_binary_op(
op,
input_0,
input_1,
input_0_scale,
input_0_zero_point,
input_1_scale,
input_1_zero_point,
out_scale=1.0 / 256,
out_zero_point=-128,
):
"""Create a Relay Function / network model"""
return op(
input_0,
input_1,
relay.const(input_0_scale, "float32"),
relay.const(input_0_zero_point, "int32"),
relay.const(input_1_scale, "float32"),
relay.const(input_1_zero_point, "int32"),
relay.const(out_scale, "float32"),
relay.const(out_zero_point, "int32"),
)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
"""Provides method to test number of scalar constants present in a function"""
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def check_num_constants(self):
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_0():
"""Tests conversion to tensor constant when first operand is a scalar"""
dtype = "int8"
shape = (8, 8)
operand0 = generate_variable("operand0", None, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(3, dtype)
arg1 = relay.var("arg1", shape=shape, dtype=dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([arg1], call_local_func, relay.TensorType(shape, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [x])
main_func = relay.Function([x], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
mod = relay.transform.InferType()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[global_var].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_1():
"""Tests conversion to tensor constant when second operand is a scalar"""
dtype = "int8"
shape = (8, 8)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", None, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.var("arg0", shape=shape, dtype=dtype)
arg1 = relay.expr.const(3, dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([arg0], call_local_func, relay.TensorType(shape, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [x])
main_func = relay.Function([x], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
mod = relay.transform.InferType()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[global_var].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_primary_operands_all_scalars():
"""Tests conversion to tensor constants all operands are scalars"""
dtype = "int8"
shape = None
operand0 = generate_variable("operand0", None, dtype)
operand1 = generate_variable("operand1", None, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(7, dtype)
arg1 = relay.expr.const(3, dtype)
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_all_primary_operands_tensor_constants():
"""Tests conversion to tensor constants all operands are tensors"""
dtype = "int8"
shape = (1, 3, 3, 32)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand1,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
rng = np.random.default_rng(12345)
arg0 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
arg1 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_duplicate_constant_arguments():
"""Tests the pass when repeating operands are arguments to the binary op"""
dtype = "int8"
shape = (1, 3, 3, 32)
operand0 = generate_variable("operand0", shape, dtype)
operand1 = generate_variable("operand1", shape, dtype)
binary_op = make_binary_op(
relay.qnn.op.add,
operand0,
operand0,
input_0_scale=0.0128,
input_0_zero_point=32,
input_1_scale=0.256,
input_1_zero_point=-64,
)
local_func = relay.Function([operand0, operand1], binary_op, relay.TensorType(shape, dtype))
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
rng = np.random.default_rng(12345)
arg0 = relay.const(rng.integers(-128, high=127, size=shape, dtype=dtype))
call_local_func = relay.Call(local_func, [arg0, arg0])
extern_func = relay.Function([], call_local_func, relay.TensorType(shape, dtype))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "cmsis-nn", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType(shape, dtype))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
new_mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(mod[global_var].body, new_mod[global_var].body)
@tvm.testing.requires_cmsisnn
def test_non_cmsisnn_ext_func():
"""Non CMSISNN functions should not be altered."""
def get_mod():
operand1 = relay.var("operand1", shape=None)
operand2 = relay.var("operand2", shape=None)
binary_op = operand1 + operand2
local_func = relay.Function(
[operand1, operand2], binary_op, relay.TensorType((), "float32")
)
local_func = set_composite_func_attr(local_func, "cmsis-nn.qnn_add")
arg0 = relay.expr.const(5, "float32")
arg1 = relay.expr.const(3, "float32")
call_local_func = relay.Call(local_func, [arg0, arg1])
extern_func = relay.Function([], call_local_func, relay.TensorType((), "float32"))
global_var = relay.GlobalVar("external_function")
extern_func = set_external_func_attr(extern_func, "foo", global_var.name_hint)
call_extern_func = relay.Call(global_var, [])
main_func = relay.Function([], call_extern_func, relay.TensorType((), "float32"))
main_var = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[global_var] = extern_func
mod[main_var] = main_func
mod = relay.transform.InferType()(mod)
return mod
expected = get_mod()["external_function"].body
actual = ScalarToTensorConstants()(get_mod())["external_function"].body
assert tvm.ir.structural_equal(expected, actual)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/test_softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Softmax"""
import itertools
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from .utils import (
skip_if_no_reference_system,
make_module,
get_range_for_dtype_str,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
)
def make_model(
shape, in_dtype, out_dtype, in_zero_point, in_scale, out_zero_point=-128, out_scale=1.0 / 256
):
"""Create a Relay Function / network model"""
a = relay.var("in0", shape=shape, dtype=in_dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(in_scale, "float32"),
input_zero_point=relay.const(in_zero_point, "int32"),
)
softmax = relay.nn.softmax(dequantize)
model = relay.qnn.op.quantize(
softmax,
output_scale=relay.const(out_scale, "float32"),
output_zero_point=relay.const(out_zero_point, "int32"),
out_dtype=out_dtype,
)
return model
@skip_if_no_reference_system
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize(["zero_point", "scale"], [[33, 0.256], [-64, 0.0128]])
@pytest.mark.parametrize(
"compiler_cpu, cpu_flags", [("cortex-m55", "+nomve"), ("cortex-m55", ""), ("cortex-m7", "")]
)
def test_op_int8(zero_point, scale, compiler_cpu, cpu_flags):
"""Tests int8 QNN Softmax for CMSIS-NN"""
interface_api = "c"
use_unpacked_api = True
dtype = "int8"
shape = [1, 16, 16, 3]
model = make_model(shape, dtype, dtype, zero_point, scale)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
assert_partitioned_function(orig_mod, cmsisnn_mod)
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
np.random.seed(0)
input_data = np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)
inputs = {"in0": input_data}
params = {}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
create_test_runner(compiler_cpu, cpu_flags),
interface_api,
use_unpacked_api,
)
def parameterize_for_invalid_model(test):
"""Generates parameters for non int8 input and output of Softmax"""
in_dtype = ["uint8", "int8"]
out_dtype = ["uint8", "int8"]
zero_point = [-128, 64]
scale = [1.0 / 256, 0.2]
out_zero_point = [-128, 33]
out_scale = [1.0 / 256, 0.2]
all_combinations = itertools.product(
in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
all_combinations = filter(
lambda parameters: not (
parameters[0] == "int8"
and parameters[1] == "int8"
and parameters[4] == -128
and parameters[5] == 1.0 / 256
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "out_dtype", "zero_point", "scale", "out_zero_point", "out_scale"],
all_combinations,
)(test)
@parameterize_for_invalid_model
@tvm.testing.requires_cmsisnn
def test_invalid_parameters(in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale):
"""Tests for non int8 input and output of Softmax"""
model = make_model(
[1, 16, 16, 3], in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
assert_no_external_function(cmsisnn_mod)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cmsisnn/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN functions for testing networks"""
import math
from typing import List, Union, Tuple
import numpy as np
import tvm
from tvm import relay
from tvm.testing.aot import AOTTestRunner
def skip_if_no_reference_system(func):
return tvm.testing.skip_if_32bit(reason="Reference system unavailable in i386 container")(func)
def count_num_calls(mod):
"""Counts number of CallNode(s) in the IRModule"""
class CallCounter(relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
counter = CallCounter()
for var in mod.get_global_vars():
counter.visit(mod[var.name_hint])
return counter.count
def assert_partitioned_function(orig_mod, cmsisnn_mod, expected_ops_unchanged=True):
"""
if KCompiler attribute is missing, this function raises an assertion.
Parameters
----------
orig_mod : IRModule
Pre-partitioning module
cmsisnn_mod : IRModule
Post-partitioning module
is_num_calls_same: bool
Are number of CallNode(s) before and after partitioning expected to be the same
"""
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert any(attrs), "At least one function with external attributes was expected."
compilers = [
key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
]
assert any(compilers), "Module does not contain function for cmsisnn target."
if expected_ops_unchanged:
assert count_num_calls(orig_mod) == count_num_calls(
cmsisnn_mod
), "Number of calls changed during partitioning"
def assert_no_external_function(mod):
attrs = [mod[var.name_hint].attrs for var in mod.get_global_vars() if mod[var.name_hint].attrs]
assert not any(attrs), "No function should have an external attribute."
def get_range_for_dtype_str(dtype):
"""
Produces the min,max for a give data type.
Parameters
----------
dtype : str
a type string (e.g., int8)
Returns
-------
type_info.min : int
the minimum of the range
type_info.max : int
the maximum of the range
"""
try:
type_info = np.iinfo(dtype)
except ValueError:
type_info = np.finfo(dtype)
return type_info.min, type_info.max
def make_module(func):
"""Creates IRModule from Function"""
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod
def get_same_padding(in_shape, kernel, dilation, stride):
"""
Provides CMSIS-NN padding when output dim == input dim.
This is TFLu's "SAME" padding case.
"""
dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1
out = int(math.ceil(float(in_shape[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - in_shape[0])
pad_top = pad // 2
pad_bottom = pad - pad_top
dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1
out = int(math.ceil(float(in_shape[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - in_shape[1])
pad_left = pad // 2
pad_right = pad - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def get_kernel_bias_dtype(input_dtype):
"""
Returns (kernel_dtype, bias_dtype) based on input's dtype.
"""
# uint8 corresponds to an invalid case, so returning int types
# does not cause tests to break
if input_dtype in ("int8", "uint8"):
return ("int8", "int32")
elif input_dtype == "int16":
return ("int8", "int64")
raise ValueError("Invalid dtype provided to get_kernel_bias_dtype()")
def get_conv2d_qnn_params(
kernel_shape: List[int],
input_scale: float,
input_zp: int,
kernel_scale: Union[float, List[float]],
kernel_zp: int,
input_dtype: str = "int8",
kernel_dtype: str = "int8",
output_dtype: str = "int8",
is_depthwise: bool = False,
) -> Tuple[float, int]:
"""
Calculate the output quantization parameters for convolution based on the input and
kernel quantization paramters and the data types.
Parameters
----------
kernel_shape : List[int]
shape of the kernel
input_scale : float
scale of the input tensor
input_zp : int
zero point of the input tensor
kernel_scale : Union[float, List[float]]
scale(s) of the kernel tensor
kernel_zp : int
zero point of the kernel tensor
is_depthwise : bool
whether it is a depthwise convolution
input_dtype : str
data type of the input tensor
kernel_dtype : str
data type of the kernel tensor
output_dtype : str
data type of the output tensor
Returns
-------
output_scale : float
scale of the output tensor
output_zp : int
zero point of the output tensor
"""
input_dtype_min, input_dtype_max = get_range_for_dtype_str(input_dtype)
input_max = input_scale * (input_dtype_max - input_zp)
input_min = input_scale * (input_dtype_min - input_zp)
kernel_dtype_min, kernel_dtype_max = get_range_for_dtype_str(kernel_dtype)
kernel_sc_max = np.max(kernel_scale)
kernel_max = kernel_sc_max * (kernel_dtype_max - kernel_zp)
kernel_sc_min = np.min(kernel_scale)
kernel_min = kernel_sc_min * (kernel_dtype_min - kernel_zp)
kernel_h = kernel_shape[1]
kernel_w = kernel_shape[2]
channels = kernel_shape[3]
num_elements = kernel_h * kernel_w * channels
# Adjust the result if it is a depthwise convolution
if is_depthwise:
num_elements = num_elements / channels
# The smallest and largest possible values in the unquantized output tensor
output_limits = [
kernel_max * input_max * num_elements,
kernel_min * input_max * num_elements,
kernel_min * input_min * num_elements,
kernel_max * input_min * num_elements,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_dtype_min, output_dtype_max = get_range_for_dtype_str(output_dtype)
output_scale = (output_max - output_min) / (output_dtype_max - output_dtype_min)
output_zp = int(output_dtype_min - (output_min / output_scale))
return output_scale, output_zp
def make_qnn_relu(expr, fused_activation_fn, scale, zero_point, dtype):
"""Mimics convert_qnn_fused_activation_function from TFLite frontend"""
quantize = lambda x: float(int(round(x / scale)) + zero_point)
# Get min/max of the output dtype. This will be used to ensure that clip a_min/a_max are not
# beyond the dtype range.
qmin, qmax = get_range_for_dtype_str(dtype)
# The input expr is a quantized tensor with its scale and zero point. We calculate the
# suitable clip off points based on these scale and zero point.
if fused_activation_fn == "NONE":
return expr
if fused_activation_fn == "RELU6":
return tvm.relay.op.clip(expr, a_min=max(qmin, quantize(0)), a_max=min(qmax, quantize(6.0)))
if fused_activation_fn == "RELU_N1_TO_1":
return tvm.relay.op.clip(
expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0))
)
if fused_activation_fn == "RELU":
return tvm.relay.op.clip(expr, a_min=max(qmin, quantize(0.0)), a_max=qmax)
raise ValueError("Invalid argument provided with fused_activation_fn")
class CheckForPadsWithinCompositeFunc(tvm.relay.ExprVisitor):
"""Provides method to test number of pads present inside the function being visited."""
def __init__(self):
super().__init__()
self.num_pads_ = 0
def visit_call(self, call):
super().visit_call(call)
if (
isinstance(call, tvm.relay.Call)
and isinstance(call.op, tvm.ir.op.Op)
and call.op.name == "nn.pad"
):
self.num_pads_ += 1
def assert_no_pads_within_func(self):
assert self.num_pads_ == 0, "CMSIS-NN composite function should not have pads."
def assert_pads_within_func(self):
assert self.num_pads_ > 0, "Composite function should have pads within it."
def create_test_runner(compiler_cpu="cortex-m55", cpu_flags=""):
"""
Creates AOT test runner for CMSIS-NN tests.
Parameters
----------
compiler_cpu : str
Equivalent of gcc option mcpu
Options: cortex-m55, cortex-m7
cpu_flags: str
Disable Arm(R) Cortex(R)-M profile vector extension (mve)
Options:
Arm(R) Cortex(R)-M55: when null +mve is set by default.
+nomve disables vector extensions.
Arm(R) Cortex(R)-M7 does not support mve.
"""
# cmsis_cpu is used to find out start up code inside CMSIS package
cmsis_cpu = "ARMCM7" if compiler_cpu == "cortex-m7" else "ARMCM55"
mfloat_abi = "soft" if compiler_cpu == "cortex-m7" else "hard"
return AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
""",
includes=["uart.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": compiler_cpu + cpu_flags,
},
"tir.usmp.enable": True,
"tir.disable_storage_rewrite": True,
},
parameters={
"ARM_CPU": cmsis_cpu,
"MCPU": compiler_cpu,
"MCPU_FLAGS": cpu_flags,
"MFLOAT_ABI": mfloat_abi,
},
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_coreml_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from unittest import mock
import tvm
import tvm.testing
from tvm import relay
from tvm.relay import transform
from tvm.contrib.target import coreml as _coreml
requires_coremltools = tvm.testing.requires_package("coremltools")
def _has_xcode():
try:
tvm.contrib.xcode.xcrun([])
return True
except FileNotFoundError:
pass
return False
def _create_graph():
shape = (10, 10)
mod = tvm.IRModule()
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
z = x + x
p = y * y
func = relay.Function([x, y], p - z)
mod["main"] = func
return mod
def _create_graph_annotated():
shape = (10, 10)
target = "coremlcompiler"
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=shape)
func0 = relay.Function([f0_i0], f0_i0 * f0_i0)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", target + "_0")
gv0 = relay.GlobalVar(target + "_0")
mod[gv0] = func0
# function 2
f2_i0 = relay.var(target + "_2_i0", shape=shape)
func2 = relay.Function([f2_i0], f2_i0 + f2_i0)
func2 = func2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func2 = func2.with_attr("Compiler", target)
func2 = func2.with_attr("global_symbol", target + "_2")
gv2 = relay.GlobalVar(target + "_2")
mod[gv2] = func2
mod = relay.transform.InferType()(mod)
# body
x = relay.var("x", shape=shape)
y = relay.var("y", shape=shape)
func = relay.Function([x, y], gv0(y) - gv2(x))
mod["main"] = func
mod = relay.transform.InferType()(mod)
return mod
@pytest.mark.xfail(
reason="Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901"
)
@tvm.testing.uses_gpu
@requires_coremltools
def test_annotate():
mod = _create_graph()
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
expected = _create_graph_annotated()
assert tvm.ir.structural_equal(mod, expected, map_free_vars=True)
@pytest.mark.skipif(not _has_xcode(), reason="Xcode is not available")
@tvm.testing.uses_gpu
@requires_coremltools
def test_compile_and_run():
dev = tvm.cpu()
target = "llvm"
tol = 1e-3
with relay.build_config(opt_level=3):
lib = relay.build(_create_graph_annotated(), target=target)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
shape = (10, 10)
x_data = np.random.rand(*shape).astype("float32")
y_data = np.random.rand(*shape).astype("float32")
m.set_input("x", x_data)
m.set_input("y", y_data)
m.run()
out = tvm.nd.empty(shape, device=dev)
out = m.get_output(0, out)
expected = (y_data * y_data) - (x_data + x_data)
tvm.testing.assert_allclose(out.numpy(), expected, rtol=tol, atol=tol)
@mock.patch("tvm.contrib.coreml_runtime.create")
@mock.patch("tvm.contrib.xcode.compile_coreml")
def _construct_model(func, m1, m2):
mod = tvm.IRModule()
mod["main"] = func
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)
fcompile = tvm._ffi.get_global_func("relay.ext.coremlcompiler")
for var, func in mod.functions.items():
if func.attrs and "Compiler" in func.attrs and func.attrs["Compiler"] == "coremlcompiler":
fcompile(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_add():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x + x
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_multiply():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = x * x
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_clip():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.clip(x, a_min=0.0, a_max=1.0)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_batch_flatten():
shape = (10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.batch_flatten(x)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_expand_dims():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.expand_dims(x, axis=0)
func = relay.Function([x], y)
_construct_model(func)
y = relay.expand_dims(x, axis=-1)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_relu():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.relu(x)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_softmax():
shape = (10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.softmax(x, axis=1)
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_conv2d():
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.const(np.zeros((16, 3, 3, 3), dtype="float32"))
y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
func = relay.Function([x], y)
_construct_model(func)
@tvm.testing.uses_gpu
@requires_coremltools
def test_global_avg_pool2d():
shape = (10, 10, 10, 10)
x = relay.var("x", shape=shape)
y = relay.nn.global_avg_pool2d(x)
func = relay.Function([x], y)
_construct_model(func)
if __name__ == "__main__":
test_annotate()
test_compile_and_run()
test_add()
test_multiply()
test_clip()
test_expand_dims()
test_relu()
test_batch_flatten()
test_softmax()
test_conv2d()
test_global_avg_pool2d()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_coreml_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import rpc
from tvm.contrib import utils, xcode, coreml_runtime
import pytest
import os
proxy_host = os.environ.get("TVM_IOS_RPC_PROXY_HOST", "127.0.0.1")
proxy_port = os.environ.get("TVM_IOS_RPC_PROXY_PORT", 9090)
destination = os.environ.get("TVM_IOS_RPC_DESTINATION", "")
key = "iphone"
@pytest.mark.skip("skip because coremltools is not available in CI")
def test_coreml_runtime():
import coremltools
from coremltools.models.neural_network import NeuralNetworkBuilder
def create_coreml_model():
shape = (2,)
alpha = 2
inputs = [
("input0", coremltools.models.datatypes.Array(*shape)),
("input1", coremltools.models.datatypes.Array(*shape)),
]
outputs = [
("output0", coremltools.models.datatypes.Array(*shape)),
("output1", coremltools.models.datatypes.Array(*shape)),
]
builder = NeuralNetworkBuilder(inputs, outputs)
builder.add_elementwise(
name="Add", input_names=["input0", "input1"], output_name="output0", mode="ADD"
)
builder.add_elementwise(
name="Mul", alpha=alpha, input_names=["input0"], output_name="output1", mode="MULTIPLY"
)
return coremltools.models.MLModel(builder.spec)
def verify(coreml_model, model_path, dev):
coreml_model = create_coreml_model()
out_spec = coreml_model.output_description._fd_spec
out_names = [spec.name for spec in out_spec]
# inference via coremltools
inputs = {}
for in_spec in coreml_model.input_description._fd_spec:
name = in_spec.name
shape = in_spec.type.multiArrayType.shape
inputs[name] = np.random.random_sample(shape)
coreml_outputs = [coreml_model.predict(inputs)[name] for name in out_names]
# inference via tvm coreml runtime
runtime = coreml_runtime.create("main", model_path, dev)
for name in inputs:
runtime.set_input(name, tvm.nd.array(inputs[name], dev))
runtime.invoke()
tvm_outputs = [runtime.get_output(i).numpy() for i in range(runtime.get_num_outputs())]
for c_out, t_out in zip(coreml_outputs, tvm_outputs):
np.testing.assert_almost_equal(c_out, t_out, 3)
def check_remote(coreml_model):
temp = utils.tempdir()
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
xcode.popen_test_rpc(
proxy_host, proxy_port, key, destination=destination, libs=[compiled_model]
)
compiled_model = os.path.basename(compiled_model)
remote = rpc.connect(proxy_host, proxy_port, key=key)
dev = remote.cpu(0)
verify(coreml_model, compiled_model, dev)
def check_local(coreml_model):
temp = utils.tempdir()
compiled_model = xcode.compile_coreml(coreml_model, out_dir=temp.temp_dir)
dev = tvm.cpu(0)
verify(coreml_model, compiled_model, dev)
coreml_model = create_coreml_model()
check_remote(coreml_model)
check_local(coreml_model)
if __name__ == "__main__":
test_coreml_runtime()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cublas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
from tvm import relay
import numpy as np
from tvm.contrib import cublas
from tvm.contrib import cublaslt
from tvm.contrib import graph_executor
import tvm.testing
from tvm.relay.op.contrib import get_pattern_table
from tvm.relay.op.contrib.cublas import partition_for_cublas
def verify_matmul_add(in_dtype, out_dtype, rtol=1e-5):
n = 1024
l = 128
m = 236
A = te.placeholder((n, l), name="A", dtype=in_dtype)
B = te.placeholder((l, m), name="B", dtype=in_dtype)
C = cublas.matmul(A, B, dtype=out_dtype)
s = te.create_schedule(C.op)
def verify(target="cuda"):
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], target)
a = tvm.nd.array(np.random.uniform(0, 128, size=(n, l)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(0, 128, size=(l, m)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)), rtol=rtol
)
verify()
def roundoff(v, d):
return int(np.floor((v + d - 1) / d) * d)
def verify_matmul_add_igemm(in_dtype, out_dtype, rtol=1e-5):
n = 1024
l = 1024
m = 1024
L = roundoff(l, 32)
N = roundoff(n, 8)
N_out = roundoff(n, 32)
A = te.placeholder((N, L), name="A", dtype=in_dtype)
B = te.placeholder((m, L), name="B", dtype=in_dtype)
# C has CUBLASLT_ORDER_COL32 layout, thus a different shape
C = cublaslt.matmul(A, B, False, True, m, N_out, dtype=out_dtype)
s = te.create_schedule(C.op)
def verify(target="cuda"):
if not tvm.get_global_func("tvm.contrib.cublaslt.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], target)
a_old = np.random.uniform(0, 128, size=(n, l))
b_old = np.random.uniform(0, 128, size=(l, m))
# Transform a to become CUBLASLT_ORDER_COL4_4R2_8C layout
a_new = np.hstack((a_old.astype(A.dtype), np.zeros([n, L - l])))
a_new = np.vstack((a_new.astype(A.dtype), np.zeros([N - n, L])))
a_even = np.vsplit(a_new[::2], N / 8)
a_odd = np.vsplit(a_new[1::2], N / 8)
a_new = [None] * (len(a_even) + len(a_odd))
a_new[::2] = a_even
a_new[1::2] = a_odd
a_new = np.vstack(a_new)
a_new = np.vstack(
np.vstack(np.vstack(np.hsplit(i, 8)).reshape([4, 32]) for i in np.vsplit(j, N / 4))
for j in np.hsplit(a_new, L / 32)
)
a_new = a_new.reshape([N, L])
# Transform b to become CUBLASLT_ORDER_COL32 layout
b_new = np.vstack(
np.hsplit(np.hstack((b_old.T.astype(B.dtype), np.zeros([m, L - l]))), L / 32)
)
b_new = b_new.reshape([m, L])
a = tvm.nd.array(a_new.astype(A.dtype), dev)
b = tvm.nd.array(b_new.astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((m, N_out), dtype=C.dtype), dev)
f(a, b, c)
# Transform output c from layout CUBLASLT_ORDER_COL32 to row major layout
c_out = c.numpy()
c_out = c_out.reshape([int(m * N_out / 32), 32])
c_out = np.hstack(np.vsplit(c_out, int(N_out / 32)))
c_out = c_out[:, :n]
c_out = c_out.T
tvm.testing.assert_allclose(
c_out, np.dot(a_old.astype(C.dtype), b_old.astype(C.dtype)), rtol=rtol
)
verify()
def verify_batch_matmul(Ashape, Bshape, Cshape, in_dtype, out_dtype, rtol=1e-5):
A = te.placeholder(Ashape, name="A", dtype=in_dtype)
B = te.placeholder(Bshape, name="B", dtype=in_dtype)
C = cublas.batch_matmul(A, B, dtype=out_dtype)
s = te.create_schedule(C.op)
dev = tvm.cuda(0)
f = tvm.build(s, [A, B, C], "cuda")
if "int" in in_dtype:
a = tvm.nd.array(np.random.uniform(1, 10, size=Ashape).astype(in_dtype), dev)
b = tvm.nd.array(np.random.uniform(1, 10, size=Bshape).astype(in_dtype), dev)
else:
a = tvm.nd.array(np.random.uniform(size=Ashape).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=Bshape).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(Cshape, dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(),
np.matmul(a.numpy().astype(C.dtype), b.numpy().astype(C.dtype)).astype(C.dtype),
rtol=rtol,
)
@tvm.testing.requires_cuda
def test_matmul_add():
verify_matmul_add("float", "float", rtol=1e-3)
verify_matmul_add("float16", "float")
verify_matmul_add("float16", "float16", rtol=1e-2)
verify_matmul_add("int8", "int32")
@tvm.testing.requires_cuda
def test_matmul_add_igemm():
verify_matmul_add_igemm("int8", "int32")
@tvm.testing.requires_cuda
def test_batch_matmul():
if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("skip because extern function is not available")
return
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float", "float")
verify_batch_matmul((16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float", "float")
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float16", "float")
verify_batch_matmul((16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float16", "float")
verify_batch_matmul(
(16, 1024, 128), (16, 128, 236), (16, 1024, 236), "float16", "float16", rtol=1e-2
)
verify_batch_matmul(
(16, 1024, 128), (1, 128, 236), (16, 1024, 236), "float16", "float16", rtol=1e-2
)
verify_batch_matmul((16, 1024, 128), (16, 128, 236), (16, 1024, 236), "int8", "int32")
def _verify_cublas_relay(expr):
np.random.seed(42)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
func = mod["main"]
cublas_mod = partition_for_cublas(mod)
assert len(cublas_mod.get_global_vars()) == 2
input_data = []
for param in func.params:
shape = [int(x) for x in param.checked_type.shape]
input_data.append(
(param.name_hint, np.random.uniform(0, 32, size=shape).astype(param.checked_type.dtype))
)
# Test against CPU reference
cuda_config = (tvm.target.cuda(), tvm.cuda(), cublas_mod)
cpu_config = (tvm.target.Target("llvm"), tvm.cpu(), mod)
outputs = []
for target, dev, test_mod in [cuda_config, cpu_config]:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(test_mod, target=target, target_host=cpu_config[0])
module = graph_executor.GraphModule(lib["default"](dev))
for name, data in input_data:
module.set_input(name, tvm.nd.array(data, dev))
module.run()
out_type = func.body.checked_type
outputs.append(
module.get_output(0, tvm.nd.empty(out_type.shape, dtype=out_type.dtype)).numpy()
)
tvm.testing.assert_allclose(
outputs[0],
outputs[1],
rtol=1e-2,
)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k,transpose_a,transpose_b",
[
(64, 128, 32, False, False),
(17, 32, 16, True, False),
(24, 17, 12, False, True),
(96, 4, 17, True, True),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_matmul(n, m, k, in_dtype, out_dtype, transpose_a, transpose_b):
unsupported_configs = [
(17, 32, 16, "int8", "float32", True, False),
(96, 4, 17, "int8", "float32", True, True),
(17, 32, 16, "int8", "int32", True, False),
(96, 4, 17, "int8", "int32", True, True),
]
if (n, m, k, in_dtype, out_dtype, transpose_a, transpose_b) in unsupported_configs:
pytest.skip("Unsupported parameters.")
a_shape = (k, n) if transpose_a else (n, k)
b_shape = (m, k) if transpose_b else (k, m)
a = tvm.relay.var("A", tvm.relay.TensorType(a_shape, in_dtype))
b = tvm.relay.var("B", tvm.relay.TensorType(b_shape, in_dtype))
# Directly use matmul because nn.matmul sometimes defers to nn.dense
matmul = relay.op.nn._make.matmul(a, b, None, out_dtype, transpose_a, transpose_b)
_verify_cublas_relay(matmul)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k",
[
(64, 128, 32),
(17, 32, 16),
(24, 17, 12),
(96, 4, 17),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_dense(n, m, k, in_dtype, out_dtype):
unsupported_configs = [
(96, 4, 17, "int8", "float32"),
(96, 4, 17, "int8", "int32"),
]
if (n, m, k, in_dtype, out_dtype) in unsupported_configs:
pytest.skip("Unsupported parameters.")
data = tvm.relay.var("data", tvm.relay.TensorType((n, k), in_dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((m, k), in_dtype))
dense = relay.op.nn.dense(data, weight, out_dtype=out_dtype)
_verify_cublas_relay(dense)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k,batch_a,batch_b,transpose_a,transpose_b",
[
(64, 128, 32, 16, 16, False, False),
(17, 32, 16, 16, 1, True, False),
(24, 17, 12, 17, 17, False, True),
(96, 4, 17, 53, 1, True, True),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_batch_matmul(
n, m, k, batch_a, batch_b, in_dtype, out_dtype, transpose_a, transpose_b
):
unsupported_configs = [
(17, 32, 16, 16, 1, "int8", "float32", True, False),
(96, 4, 17, 53, 1, "int8", "float32", True, True),
(17, 32, 16, 16, 1, "int8", "int32", True, False),
(96, 4, 17, 53, 1, "int8", "int32", True, True),
]
if (
n,
m,
k,
batch_a,
batch_b,
in_dtype,
out_dtype,
transpose_a,
transpose_b,
) in unsupported_configs:
pytest.skip("Unsupported parameters.")
a_shape = (batch_a, k, n) if transpose_a else (batch_a, n, k)
b_shape = (batch_b, m, k) if transpose_b else (batch_b, k, m)
a = tvm.relay.var("A", tvm.relay.TensorType(a_shape, in_dtype))
b = tvm.relay.var("B", tvm.relay.TensorType(b_shape, in_dtype))
batch_matmul = relay.op.nn.batch_matmul(a, b, out_dtype, transpose_a, transpose_b)
_verify_cublas_relay(batch_matmul)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,m,k",
[
(64, 128, 32),
(17, 32, 16),
(24, 17, 12),
(96, 4, 17),
],
)
@pytest.mark.parametrize(
"in_dtype,out_dtype",
[
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
],
)
def test_relay_cublas_dense(n, m, k, in_dtype, out_dtype):
unsupported_configs = [
(96, 4, 17, "int8", "float32"),
(96, 4, 17, "int8", "int32"),
]
if (n, m, k, in_dtype, out_dtype) in unsupported_configs:
pytest.skip("Unsupported parameters.")
data = tvm.relay.var("data", tvm.relay.TensorType((n, k), in_dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((m, k), in_dtype))
dense = relay.op.nn.dense(data, weight, out_dtype=out_dtype)
_verify_cublas_relay(dense)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cudnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.contrib import cudnn
from tvm.contrib.nvcc import have_fp16
from tvm.contrib import graph_executor
import numpy as np
import tvm.topi.testing
import tvm.testing
from tvm.relay.op.contrib.cudnn import partition_for_cudnn
requires_cudnn = pytest.mark.skipif(
tvm.get_global_func("tvm.contrib.cudnn.conv2d.forward", True) is None,
reason="CuDNN is not enabled",
)
def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):
in_channel = 4
out_channel = 16
filter_h = 3
filter_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
dilation_h = 1
dilation_w = 1
batch = 3
height = 32
width = 32
if data_dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# schedule
if tensor_format == 0:
xshape = [batch, in_channel, height, width]
wshape = [out_channel, in_channel // groups, filter_h, filter_w]
else:
xshape = [batch, height, width, in_channel]
wshape = [out_channel, filter_h, filter_w, in_channel // groups]
X = te.placeholder(xshape, name="X", dtype=data_dtype)
W = te.placeholder(wshape, name="W", dtype=data_dtype)
Y = cudnn.conv_forward(
X,
W,
[pad_h, pad_w],
[stride_h, stride_w],
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
algo=-1,
groups=groups,
)
yshape = [x.value for x in Y.shape]
s = te.create_schedule(Y.op)
# validation
dev = tvm.cuda(0)
f = tvm.build(s, [X, W, Y], "cuda --host=llvm", name="conv2d")
x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
y_np = np.zeros(yshape).astype(data_dtype)
x = tvm.nd.array(x_np, dev)
w = tvm.nd.array(w_np, dev)
y = tvm.nd.array(y_np, dev)
if tensor_format == 0:
c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)
elif tensor_format == 1:
wt = w_np.transpose((1, 2, 3, 0)) # OHWI => HWIO
c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)
f(x, w, y)
tvm.testing.assert_allclose(y.numpy(), c_np, atol=1e-2, rtol=1e-2)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d():
verify_conv2d("float32", "float32", tensor_format=0)
verify_conv2d("float16", "float32", tensor_format=1)
verify_conv2d("float16", "float16", tensor_format=0)
verify_conv2d("float16", "float16", tensor_format=1)
verify_conv2d("int8", "int32", tensor_format=1)
verify_conv2d("float32", "float32", tensor_format=0, groups=2)
verify_conv2d("float16", "float32", tensor_format=1, groups=2)
verify_conv2d("float16", "float16", tensor_format=0, groups=2)
verify_conv2d("int8", "int32", tensor_format=1, groups=2)
def verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1):
in_channel = 4
out_channel = 16
filter_d = 3
filter_h = 3
filter_w = 3
pad_d = 1
pad_h = 1
pad_w = 1
stride_d = 1
stride_h = 1
stride_w = 1
dilation_d = 1
dilation_h = 1
dilation_w = 1
batch = 3
depth = 32
height = 32
width = 32
# schedule
xshape = [batch, in_channel, depth, height, width]
wshape = [out_channel, in_channel // groups, filter_d, filter_h, filter_w]
X = te.placeholder(xshape, name="X", dtype=data_dtype)
W = te.placeholder(wshape, name="W", dtype=data_dtype)
Y = cudnn.conv_forward(
X,
W,
[pad_d, pad_h, pad_w],
[stride_d, stride_h, stride_w],
[dilation_d, dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=-1,
conv_dtype=conv_dtype,
groups=groups,
)
yshape = [x.value for x in Y.shape]
s = te.create_schedule(Y.op)
# validation
dev = tvm.cuda(0)
f = tvm.build(s, [X, W, Y], target="cuda --host=llvm", name="conv3d")
x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
y_np = np.zeros(yshape).astype(data_dtype)
x = tvm.nd.array(x_np, dev)
w = tvm.nd.array(w_np, dev)
y = tvm.nd.array(y_np, dev)
if tensor_format == 0:
c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)
else:
raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)")
f(x, w, y)
tvm.testing.assert_allclose(y.numpy(), c_np, atol=3e-5, rtol=1e-4)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv3d():
verify_conv3d("float32", "float32", tensor_format=0)
verify_conv3d("float32", "float32", tensor_format=0, groups=2)
def verify_softmax(shape, axis, dtype="float32", log_softmax=False):
cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = cudnn_op(A, axis)
s = te.create_schedule([B.op])
dev = tvm.cuda(0)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
def verify_softmax_4d(shape, dtype="float32", log_softmax=False):
cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax
testing_op = (
tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python
)
A = te.placeholder(shape, dtype=dtype, name="A")
B = cudnn_op(A, axis=1)
s = te.create_schedule([B.op])
dev = tvm.cuda(0)
n, c, h, w = shape
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = testing_op(a_np.transpose(0, 2, 3, 1).reshape(h * w, c))
b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
f = tvm.build(s, [A, B], target="cuda --host=llvm", name="softmax")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@requires_cudnn
def test_softmax():
verify_softmax((32, 10), -1)
verify_softmax((3, 4), -1)
verify_softmax((1, 5), -1, "float64")
verify_softmax_4d((1, 16, 256, 256))
verify_softmax_4d((1, 16, 256, 256), "float64")
verify_softmax((32, 10), -1, log_softmax=True)
verify_softmax((3, 4), -1, log_softmax=True)
verify_softmax((1, 5), -1, "float64", log_softmax=True)
verify_softmax_4d((1, 16, 256, 256), log_softmax=True)
verify_softmax_4d((1, 16, 256, 256), "float64", log_softmax=True)
def verify_conv2d_backward_data(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):
batch = 3
in_channel = 4
out_channel = 16
filter_h, filter_w = 3, 3
pad_h, pad_w = 1, 1
stride_h, stride_w = 1, 1
height, width = 32, 32
if tensor_format == 0:
xshape = [batch, in_channel, height, width]
wshape = [out_channel, in_channel, filter_h, filter_w]
oshape = xshape
oshape[1] = out_channel
ref_func = tvm.topi.testing.conv2d_transpose_nchw_python
else:
xshape = [batch, height, width, in_channel]
wshape = [out_channel, filter_h, filter_w, in_channel]
oshape = xshape
oshape[3] = out_channel
ref_func = lambda dy_np, w_np, strides, padding, out_pad: tvm.topi.testing.conv2d_transpose_nhwc_python(
dy_np, np.transpose(w_np, [1, 2, 3, 0]), "HWOI", strides, padding, out_pad
)
dy_np = np.random.uniform(-1, 1, oshape).astype(data_dtype)
w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
if data_dtype == "float16":
dx_np = ref_func(
dy_np.astype("float32"),
w_np.astype("float32"),
(stride_h, stride_w),
(pad_h, pad_w),
(0, 0),
)
dx_np = dx_np.astype("float16")
else:
dx_np = ref_func(dy_np, w_np, (stride_h, stride_w), (pad_h, pad_w), (0, 0))
dy = te.placeholder(oshape, name="dy", dtype=data_dtype)
w = te.placeholder(wshape, name="dw", dtype=data_dtype)
dx = cudnn.conv_backward_data(
dy,
w,
[pad_h, pad_w],
[stride_h, stride_w],
[1, 1],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
groups=1,
)
s = te.create_schedule(dx.op)
dev = tvm.cuda(0)
f = tvm.build(s, [dy, w, dx], "cuda --host=llvm", name="conv2d_backward_data")
dy = tvm.nd.array(dy_np, dev)
w = tvm.nd.array(w_np, dev)
dx = tvm.nd.array(dx_np, dev)
f(dy, w, dx)
tvm.testing.assert_allclose(dx.numpy(), dx_np, atol=tol, rtol=tol)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d_backward_data():
verify_conv2d_backward_data("float32", "float32", tensor_format=0, tol=1e-5)
verify_conv2d_backward_data("float32", "float32", tensor_format=1, tol=1e-2)
# The scipy convolve function does not support fp16, so the reference will be computed with
# fp32. Use larger tolerance to be on the safe side (1e-2 also seems mostly ok).
verify_conv2d_backward_data("float16", "float16", tensor_format=1, tol=1e-1)
def verify_conv2d_backward_filter(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):
batch = 3
in_channel = 4
out_channel = 16
filter_h, filter_w = 3, 3
pad_h, pad_w = 1, 1
stride_h, stride_w = 1, 1
height, width = 32, 32
if tensor_format == 0:
x_shape = [batch, in_channel, height, width]
dy_shape = [batch, out_channel, height, width]
else:
x_shape = [batch, height, width, in_channel]
dy_shape = [batch, height, width, out_channel]
x_np = np.random.uniform(-1, 1, x_shape).astype(data_dtype)
dy_np = np.random.uniform(-1, 1, dy_shape).astype(data_dtype)
dw_np = tvm.topi.testing.conv2d_backward_weight_python(
dy_np,
x_np,
(filter_h, filter_w),
(stride_h, stride_w),
(pad_h, pad_w),
"NCHW" if tensor_format == 0 else "NHWC",
)
x = te.placeholder(x_shape, name="x", dtype=data_dtype)
dy = te.placeholder(dy_shape, name="dy", dtype=data_dtype)
dw = cudnn.conv_backward_filter(
dy,
x,
(filter_h, filter_w),
[pad_h, pad_w],
[stride_h, stride_w],
[1, 1],
conv_mode=1,
tensor_format=tensor_format,
conv_dtype=conv_dtype,
)
s = te.create_schedule(dw.op)
dev = tvm.cuda(0)
f = tvm.build(s, [dy, x, dw], "cuda --host=llvm", name="conv2d_backward_filter")
x = tvm.nd.array(x_np, dev)
dy = tvm.nd.array(dy_np, dev)
dw = tvm.nd.array(dw_np, dev)
f(dy, x, dw)
tvm.testing.assert_allclose(dw.numpy(), dw_np, atol=tol, rtol=tol)
@tvm.testing.requires_gpu
@requires_cudnn
def test_conv2d_backward_filter():
verify_conv2d_backward_filter("float32", "float32", tensor_format=0, tol=1e-2)
verify_conv2d_backward_filter("float32", "float32", tensor_format=1, tol=1e-2)
test_kwargs_default_2d = {
"tensor_format": 0,
"pad": [1, 1],
"stride": [1, 1],
"dilation": [1, 1],
"x_shape": [16, 4, 32, 32],
"w_shape": [8, 4, 3, 3],
"groups": 1,
"conv_dtype": "float32",
"data_dtype": "float32",
}
test_kwargs_default_3d = {
"tensor_format": 0,
"pad": [1, 1, 1],
"stride": [1, 1, 1],
"dilation": [1, 1, 1],
"x_shape": [16, 4, 32, 32, 32],
"w_shape": [8, 4, 3, 3, 3],
"groups": 1,
"conv_dtype": "float32",
"data_dtype": "float32",
}
conv_output_shape_conditions = {
"2d_small": test_kwargs_default_2d,
"2d_large": {
**test_kwargs_default_2d,
"x_shape": [16, 32, 512, 1024],
"w_shape": [8, 32, 5, 5],
},
"2d_pad": {**test_kwargs_default_2d, "pad": [2, 3]},
"2d_stride": {**test_kwargs_default_2d, "stride": [2, 3]},
"2d_dilation": {**test_kwargs_default_2d, "dilation": [2, 3]},
"2d_groups": {**test_kwargs_default_2d, "groups": 4, "w_shape": [8, 1, 3, 3]},
"2d_NHWC": {
**test_kwargs_default_2d,
"tensor_format": 1,
"x_shape": [16, 32, 32, 4],
"w_shape": [8, 3, 3, 4],
},
"2d_NCHW_VECT_C": {
**test_kwargs_default_2d,
"tensor_format": 2,
"w_shape": [8, 16, 3, 3],
"data_dtype": "int8x4",
},
"3d_small": test_kwargs_default_3d,
"3d_large": {
**test_kwargs_default_3d,
"x_shape": [16, 32, 64, 128, 256],
"w_shape": [8, 32, 5, 5, 5],
},
"3d_pad": {**test_kwargs_default_3d, "pad": [2, 3, 4]},
"3d_stride": {**test_kwargs_default_3d, "stride": [2, 3, 4]},
"3d_dilation": {**test_kwargs_default_3d, "dilation": [2, 3, 4]},
"3d_groups": {**test_kwargs_default_3d, "groups": 4, "w_shape": [8, 1, 3, 3, 3]},
"3d_NCHW_VECT_C": {
**test_kwargs_default_3d,
"tensor_format": 2,
"w_shape": [8, 16, 3, 3, 3],
"data_dtype": "int8x4",
},
}
@pytest.fixture(
params=[pytest.param(kwargs, id=name) for name, kwargs in conv_output_shape_conditions.items()]
)
def conv_output_shape_kwargs(request):
return request.param
def _verify_cudnn_relay(expr):
np.random.seed(42)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
func = mod["main"]
cudnn_mod = partition_for_cudnn(mod)
assert len(cudnn_mod.get_global_vars()) == 2
input_data = []
for param in func.params:
shape = [int(x) for x in param.checked_type.shape]
input_data.append(
(
param.name_hint,
np.random.uniform(-32, 32, size=shape).astype(param.checked_type.dtype),
)
)
cuda_config = (tvm.target.cuda(), tvm.cuda(), cudnn_mod)
cpu_config = (tvm.target.Target("llvm"), tvm.cpu(), mod)
outputs = []
for target, dev, test_mod in [cuda_config, cpu_config]:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(test_mod, target=target, target_host=cpu_config[0])
module = graph_executor.GraphModule(lib["default"](dev))
for name, data in input_data:
module.set_input(name, tvm.nd.array(data, dev))
module.run()
out_type = func.body.checked_type
outputs.append(
module.get_output(0, tvm.nd.empty(out_type.shape, dtype=out_type.dtype)).numpy()
)
tvm.testing.assert_allclose(
outputs[0],
outputs[1],
rtol=1e-3,
atol=30,
)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"shape,axis",
[
((200,), 0),
((13, 27), 0),
((44, 12, 67), 1),
((1, 16, 16, 8), 2),
((2, 4, 6, 8, 10), 3),
],
)
@pytest.mark.parametrize(
"dtype",
[
"float32",
"float16",
"float64",
],
)
def test_relay_cudnn_softmax(shape, axis, dtype):
x = tvm.relay.var("x", tvm.relay.TensorType(shape, dtype))
softmax = relay.op.nn.softmax(x, axis=axis)
_verify_cudnn_relay(softmax)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"shape,axis",
[
((32, 16), -1),
((13, 27), 1),
],
)
@pytest.mark.parametrize(
"dtype",
[
"float32",
"float16",
"float64",
],
)
def test_relay_cudnn_log_softmax(shape, axis, dtype):
x = tvm.relay.var("x", tvm.relay.TensorType(shape, dtype))
log_softmax = relay.op.nn.log_softmax(x, axis=axis)
_verify_cudnn_relay(log_softmax)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,h,w,ci,co,groups",
[
(1, 16, 20, 8, 16, 1),
(10, 17, 19, 16, 8, 4),
],
)
@pytest.mark.parametrize(
"kh,kw,padding",
[
(1, 1, (3, 1, 3, 1)),
(3, 3, (1, 2)),
(7, 2, (0, 0)),
],
)
@pytest.mark.parametrize(
"strides,dilation,dtype",
[
((1, 1), (1, 1), "float32"),
((2, 1), (2, 2), "float16"),
((3, 3), (1, 2), "float64"),
],
)
def test_relay_cudnn_conv2d(n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype):
data = tvm.relay.var("data", tvm.relay.TensorType((n, ci, h, w), dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))
conv2d = relay.op.nn.conv2d(
data,
weight,
groups=groups,
channels=co,
kernel_size=(kh, kw),
strides=strides,
dilation=dilation,
padding=padding,
data_layout="NCHW",
kernel_layout="OIHW",
)
_verify_cudnn_relay(conv2d)
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"n,h,w,ci,co,groups",
[
(1, 16, 20, 8, 16, 1),
(10, 17, 19, 16, 8, 4),
],
)
@pytest.mark.parametrize(
"kh,kw,padding,strides,dilation,dtype",
[
(1, 1, (3, 1, 3, 1), (1, 1), (1, 1), "float32"),
(3, 3, (1, 2), (2, 1), (2, 2), "float16"),
(7, 2, (0, 0), (3, 3), (1, 2), "float64"),
],
)
@pytest.mark.parametrize("activation", [True, False])
def test_relay_cudnn_conv2d_bias_act(
n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype, activation
):
data = tvm.relay.var("data", tvm.relay.TensorType((n, ci, h, w), dtype))
weight = tvm.relay.var("weight", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))
bias = relay.var("bias", relay.TensorType((co,), dtype))
conv2d = relay.op.nn.conv2d(
data,
weight,
groups=groups,
channels=co,
kernel_size=(kh, kw),
strides=strides,
dilation=dilation,
padding=padding,
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.op.nn.bias_add(conv2d, bias)
if activation:
out = relay.op.nn.relu(out)
_verify_cudnn_relay(out)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_cutlass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import math
import tvm
from tvm import relay
from tvm.contrib.cudnn import conv_output_shape
import numpy as np
from tvm.runtime.vm import VirtualMachine
from tvm.relay.op.contrib.cutlass import partition_for_cutlass
from tvm.relay.transform import FirstOrderGradient, ToMixedPrecision, InferType
from tvm.contrib.cutlass import (
has_cutlass,
num_cutlass_partitions,
finalize_modules,
finalize_modules_vm,
)
import tvm.testing
logging.basicConfig(level=logging.INFO)
def has_cublas():
return tvm.get_global_func("tvm.contrib.cublas.matmul", True) != None
def get_ref_rt_mod(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
dev = tvm.device(target, 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev
def get_ref_vm(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target=target, params=params)
code, lib = vm_exec.save()
dev = tvm.device(target, 0)
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
return VirtualMachine(vm_exec, dev), dev
def get_output(rt_mod, names, inputs):
for name, inp in zip(names, inputs):
rt_mod.set_input(name, inp)
rt_mod.run()
return rt_mod.get_output(0).asnumpy()
def get_output_vm(vm, names, inputs):
params = dict(zip(names, inputs))
return vm.invoke("main", **params).numpy()
def get_dense_with_shape(
data_shape, weight_shape, out_dtype="float16", data_dtype="float16", weight_dtype="float16"
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype=weight_dtype)
return relay.nn.dense(data, weight, out_dtype=out_dtype)
def get_dense(M, N, K, out_dtype="float16", data_dtype="float16", weight_dtype="float16"):
return get_dense_with_shape((M, K), (N, K), out_dtype, data_dtype, weight_dtype)
def get_dense_bias(M, N, K, out_dtype="float16"):
dense = get_dense(M, N, K, out_dtype=out_dtype)
bias = relay.var("bias", shape=(N,), dtype=out_dtype)
return relay.nn.bias_add(dense, bias)
def get_dense_bias_relu(M, N, K, out_dtype="float16"):
return relay.nn.relu(get_dense_bias(M, N, K, out_dtype=out_dtype))
def get_dense_bias_gelu(M, N, K, out_dtype="float16"):
bias_add = get_dense_bias(M, N, K, out_dtype)
mul = bias_add * relay.const((1.0 / math.sqrt(2.0)), dtype=out_dtype)
if out_dtype == "float16":
erf = relay.cast(relay.op.erf(relay.cast(mul, "float32")), "float16")
else:
erf = relay.op.erf(mul)
mul_half = erf * relay.const(0.5, dtype=out_dtype)
add = mul_half + relay.const(0.5, dtype=out_dtype)
return add * bias_add
def get_batch_matmul_with_shape(x_shape, y_shape, out_dtype="float16"):
x = relay.var("x", shape=x_shape, dtype="float16")
y = relay.var("y", shape=y_shape, dtype="float16")
return relay.nn.batch_matmul(x, y, out_dtype=out_dtype)
def get_batch_matmul(batch, M, N, K, out_dtype="float16"):
return get_batch_matmul_with_shape((batch, M, K), (batch, N, K), out_dtype="float16")
def get_conv2d_nchw(
d_shape,
w_shape,
padding,
strides=(1, 1),
out_dtype="float16",
data_dtype="float16",
weight_dtype="float16",
):
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
def get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype="float16"):
conv2d = get_conv2d_nchw(d_shape, w_shape, padding, out_dtype=out_dtype)
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
return relay.nn.bias_add(conv2d, bias)
def silu(x):
return x * relay.sigmoid(x)
def hardswish(x, out_dtype="float16"):
return x * (
relay.clip(x + relay.const(3, dtype=out_dtype), a_min=0, a_max=6)
/ relay.const(6, dtype=out_dtype)
)
def get_conv2d_nchw_bias_relu(d_shape, w_shape, padding, out_dtype="float16"):
return relay.nn.relu(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16"):
return relay.sigmoid(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return silu(conv_out)
def get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return hardswish(conv_out, out_dtype)
def get_conv2d_nchw_bias_residual(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
bias_add = relay.nn.bias_add(conv2d, bias)
return bias_add, data
def get_conv2d_transpose_nchw(
d_shape,
w_shape,
padding,
output_padding,
strides,
out_dtype="float32",
data_dtype="float32",
weight_dtype="float32",
):
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[1]
return relay.nn.conv2d_transpose(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
output_padding=output_padding,
strides=strides,
out_dtype=out_dtype,
)
def get_conv2d_backward_weight(
d_shape,
w_shape,
o_shape,
padding,
strides,
out_dtype="float32",
data_dtype="float32",
weight_dtype="float32",
):
grad = relay.var("grad", shape=o_shape, dtype=weight_dtype)
data = relay.var("data", shape=d_shape, dtype=data_dtype)
out_channel = o_shape[1]
return relay.nn.conv2d_backward_weight(
grad=grad,
data=data,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
def convert_conv2d_layout(mod, desired_layouts):
with tvm.transform.PassContext(opt_level=3):
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
return seq(mod)
def get_random_ndarray(shape, dtype):
if dtype == "int8":
return np.random.randint(-128, 128, shape).astype(dtype)
elif dtype == "uint8":
return np.random.randint(0, 256, shape).astype(dtype)
return np.random.uniform(-1, 1, shape).astype(dtype)
def profile_and_build(
mod,
params,
sm,
split_k_slices=[1],
tmp_dir="./tmp",
use_fast_math=False,
use_3xtf32=True,
):
logging.info("before partitioning:\n%s", mod)
mod = partition_for_cutlass(mod)
logging.info("after partitioning:\n%s", mod)
num_cutlass_partition = num_cutlass_partitions(mod)
host = tvm.target.Target("llvm")
cuda = tvm.target.Target("cuda", host=host)
cutlass = tvm.target.Target(
{
"kind": "cutlass",
"sm": sm,
"use_3xtf32": use_3xtf32,
"split_k_slices": split_k_slices,
"profile_all_alignments": False,
"find_first_valid": True,
"use_multiprocessing": True,
"use_fast_math": use_fast_math,
"tmp_dir": tmp_dir,
},
host=host,
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=[cuda, cutlass], params=params)
lib = finalize_modules(lib, "compile.so", tmp_dir)
dev = tvm.device("cuda", 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev, num_cutlass_partition
def profile_and_build_vm(
mod,
params,
sm,
split_k_slices=[1],
tmp_dir="./tmp",
use_fast_math=False,
use_3xtf32=True,
):
mod = partition_for_cutlass(mod)
num_cutlass_partition = num_cutlass_partitions(mod)
host = tvm.target.Target("llvm")
cuda = tvm.target.Target("cuda", host=host)
cutlass = tvm.target.Target(
{
"kind": "cutlass",
"sm": sm,
"use_3xtf32": use_3xtf32,
"split_k_slices": split_k_slices,
"profile_all_alignments": False,
"find_first_valid": True,
"use_multiprocessing": True,
"use_fast_math": use_fast_math,
"tmp_dir": tmp_dir,
},
host=host,
)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target=[cuda, cutlass], params=params)
vm_exec = finalize_modules_vm(vm_exec, "compile.so", tmp_dir)
dev = tvm.device("cuda", 0)
return VirtualMachine(vm_exec, dev), dev, num_cutlass_partition
def verify_dense(
func,
M,
N,
K,
ref_target="cuda",
sm=80,
atol=1e-5,
rtol=1e-5,
run_benchmark=False,
data_dtype="float16",
weight_dtype="float16",
use_3xtf32=True,
):
assert has_cutlass()
if sm < 80 and data_dtype == "float32":
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
out_dtype = typ.dtype
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
np_data = get_random_ndarray((M, K), data_dtype)
np_weight = get_random_ndarray((N, K), weight_dtype)
np_bias = get_random_ndarray((N,), out_dtype)
params = {"weight": np_weight, "bias": np_bias}
if use_vm:
if ref_target == "cuda" and out_dtype == "float16":
# Uncomment "return" below to see the accuracy difference of static vs dynamic TVM native fp16 dense
# The static one can use a tensorcore schedule, but the dynamic one cannot
rt_mod, dev = get_ref_vm(tvm.IRModule.from_expr(get_dense(M, N, K)), params)
num_partition = 1
logging.warning(
"The reference fp16 dense with dynamic shape using fp16 accumulation has accuracy issues."
)
return
else:
rt_mod, dev, num_partition = profile_and_build_vm(
mod, params, sm, use_3xtf32=use_3xtf32
)
rt_mod_ref, dev = get_ref_vm(mod, params, target=ref_target)
x = tvm.nd.array(np_data, device=dev)
out = get_output_vm(rt_mod, ["data"], [x])
ref_out = get_output_vm(rt_mod_ref, ["data"], [x])
else:
rt_mod_ref, dev = get_ref_rt_mod(mod, params, target=ref_target)
rt_mod, dev, num_partition = profile_and_build(mod, params, sm, use_3xtf32=use_3xtf32)
x = tvm.nd.array(np_data, device=dev)
out = get_output(rt_mod, ["data"], [x])
ref_out = get_output(rt_mod_ref, ["data"], [x])
assert num_partition > 0
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM with target %s:" % ref_target, rt_mod_ref.benchmark(dev, number=1, repeat=600))
def verify_batch_matmul(
func, batch, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
assert has_cutlass()
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
x_np = np.random.uniform(-1, 1, (batch, M, K)).astype("float16")
y_np = np.random.uniform(-1, 1, (batch, N, K)).astype("float16")
if use_vm:
rt_mod, dev, num_partition = profile_and_build_vm(mod, {}, sm)
rt_mod_ref, dev = get_ref_vm(mod, {}, target=ref_target)
assert num_partition > 0
x = tvm.nd.array(x_np, device=dev)
y = tvm.nd.array(y_np, device=dev)
out = get_output_vm(rt_mod, ["x", "y"], [x, y])
ref_out = get_output_vm(rt_mod_ref, ["x", "y"], [x, y])
else:
rt_mod, dev, num_partition = profile_and_build(mod, {}, sm)
rt_mod_ref, dev = get_ref_rt_mod(mod, {})
assert num_partition > 0
x = tvm.nd.array(x_np, device=dev)
y = tvm.nd.array(y_np, device=dev)
out = get_output(rt_mod, ["x", "y"], [x, y])
ref_out = get_output(rt_mod_ref, ["x", "y"], [x, y])
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM Tensorcore (no tuning):", rt_mod_ref.benchmark(dev, number=1, repeat=600))
M = 96
N = 64
K = 64
@tvm.testing.requires_cutlass
def test_dense():
verify_dense(get_dense(M, N, K), M, N, K)
verify_dense(get_dense(M, N, K, out_dtype="float32"), M, N, K)
# Test align1 case
verify_dense(get_dense_bias(M, N + 1, K), M, N + 1, K)
# int8
verify_dense(
get_dense(M, N, K, "int32", "int8", "int8"), M, N, K, data_dtype="int8", weight_dtype="int8"
)
dense_fp32 = get_dense(M, N, K, "float32", "float32", "float32")
# tf32
verify_dense(
dense_fp32,
M,
N,
K,
data_dtype="float32",
weight_dtype="float32",
use_3xtf32=False,
atol=1e-2,
rtol=1e-2,
)
# 3xtf32
verify_dense(
dense_fp32,
M,
N,
K,
data_dtype="float32",
weight_dtype="float32",
)
@tvm.testing.requires_cutlass
def test_dense_bias():
verify_dense(get_dense_bias(M, N, K), M, N, K)
verify_dense(get_dense_bias(M, N, K, out_dtype="float32"), M, N, K)
@tvm.testing.requires_cutlass
def test_dense_bias_relu():
verify_dense(get_dense_bias_relu(M, N, K), M, N, K)
verify_dense(get_dense_bias_relu(M, N, K, out_dtype="float32"), M, N, K)
@tvm.testing.requires_cutlass
def test_dense_bias_gelu():
verify_dense(get_dense_bias_gelu(M, N, K), M, N, K, atol=1e-3, rtol=1e-3)
verify_dense(get_dense_bias_gelu(M, N, K, out_dtype="float32"), M, N, K, atol=1e-3, rtol=1e-3)
@tvm.testing.requires_cutlass
def test_dense_dynamic():
data_shape = (relay.Any(), K)
weight_shape = (relay.Any(), K)
if has_cublas():
# TVM native fp16 dense (without tensorcore), using fp16 accum, seems to have accuracy issues
# Use cublas as a reference
verify_dense(
get_dense_with_shape(data_shape, weight_shape),
M,
N,
K,
ref_target="cuda -libs=cublas",
)
verify_dense(
get_dense_with_shape(data_shape, weight_shape, out_dtype="float32"),
M,
N,
K,
atol=1e-4,
rtol=1e-4,
)
@tvm.testing.requires_cutlass
def test_batch_matmul():
batch = 8
verify_batch_matmul(get_batch_matmul(batch, M, N, K), batch, M, N, K)
verify_batch_matmul(get_batch_matmul(batch, M, N, K, out_dtype="float32"), batch, M, N, K)
if has_cublas():
# Test dynamic shape batch_matmul
# AutoTVM does not seem to support it
x_shape = (relay.Any(), relay.Any(), K)
y_shape = (relay.Any(), relay.Any(), K)
verify_batch_matmul(
get_batch_matmul_with_shape(x_shape, y_shape),
batch,
M,
N,
K,
ref_target="cuda -libs=cublas",
)
def verify_conv2d_common(
expr_nchw, # can be dynamic batch
expr_ref, # always static batch
input_names,
inputs,
params,
sm=80,
split_k_slices=[1],
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=False,
run_benchmark=False,
use_fast_math=False,
ref_target="cuda",
use_vm=False,
):
assert has_cutlass()
if sm < 80 and inputs[0].dtype == "float32":
return
mod_nchw = tvm.IRModule.from_expr(expr_nchw)
mod_ref = tvm.IRModule.from_expr(expr_ref)
if use_vm:
profile_and_build_func = profile_and_build_vm
get_output_func = get_output_vm
ref_build_func = get_ref_vm
else:
profile_and_build_func = profile_and_build
get_output_func = get_output
ref_build_func = get_ref_rt_mod
mod_weight_ohwi = convert_conv2d_layout(
mod_nchw,
{
"nn.conv2d": ["NHWC", "OHWI"],
"nn.conv2d_transpose": ["NHWC", "IHWO"],
"nn.conv2d_backward_weight": ["NHWC", "OHWI"],
},
)
rt_mod, _, num_cutlass_partition = profile_and_build_func(
mod_weight_ohwi, params, sm, split_k_slices, use_fast_math=use_fast_math
)
out = get_output_func(rt_mod, input_names, inputs)
assert num_cutlass_partition > 0
if use_cudnn_ref:
rt_mod_ref, dev = ref_build_func(
convert_conv2d_layout(mod_ref, {"nn.conv2d": ["NHWC", "OHWI"]}),
params,
target="cuda -libs=cudnn",
)
else:
rt_mod_ref, dev = ref_build_func(
convert_conv2d_layout(mod_ref, {"nn.conv2d": ["NHWC", "HWIO"]}),
params,
target=ref_target,
)
ref_out = get_output_func(rt_mod_ref, input_names, inputs)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM Tensorcore (no tuning):", rt_mod_ref.benchmark(dev, number=1, repeat=600))
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
def verify_conv2d(
expr_nchw, # can be dynamic batch
expr_ref, # always static batch
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=False,
run_benchmark=False,
use_fast_math=False,
data_dtype="float16",
weight_dtype="float16",
ref_target="cuda",
use_vm=False,
):
mod_nchw = tvm.IRModule.from_expr(expr_nchw)
typ = relay.transform.InferType()(mod_nchw)["main"].body.checked_type
use_vm = use_vm or any(isinstance(s, tvm.tir.Any) for s in typ.shape)
np_data = get_random_ndarray(d_shape, data_dtype)
np_weight = get_random_ndarray(w_shape, weight_dtype)
np_bias = get_random_ndarray((w_shape[0],), typ.dtype)
params = {"weight": np_weight, "bias": np_bias}
split_k_slices = [1]
return verify_conv2d_common(
expr_nchw,
expr_ref,
["data"],
[np_data],
params,
sm,
split_k_slices,
atol,
rtol,
use_cudnn_ref,
run_benchmark,
use_fast_math,
ref_target,
use_vm,
)
def verify_conv2d_backward_weight(
expr_nchw, # can be dynamic batch
expr_ref, # always static batch
grad_shape,
data_shape,
sm=80,
split_k_slices=[1],
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=False,
use_fast_math=False,
grad_dtype="float16",
data_dtype="float16",
ref_target="cuda",
use_vm=False,
):
np_grad = get_random_ndarray(grad_shape, grad_dtype)
np_data = get_random_ndarray(data_shape, data_dtype)
params = {}
input_names = ["grad", "data"]
return verify_conv2d_common(
expr_nchw,
expr_ref,
input_names,
[np_grad, np_data],
params,
sm,
split_k_slices,
atol,
rtol,
use_cudnn_ref,
False,
use_fast_math,
ref_target,
use_vm,
)
@tvm.testing.requires_cutlass
def test_conv2d():
padding = (1, 1)
for IC in [3, 16]:
d_shape = (16, IC, 32, 32)
w_shape = (32, IC, 3, 3)
mod_nchw = get_conv2d_nchw(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw,
mod_nchw,
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
use_cudnn_ref=(IC == 3), # The autotvm kernel has an accuracy issue with IC == 3 case
run_benchmark=False,
)
d_shape = (16, 16, 32, 32)
w_shape = (32, 16, 3, 3)
padding = (1, 1)
dyn_batch_shape = (relay.Any(),) + d_shape[1:]
mod_nchw = get_conv2d_nchw(d_shape, w_shape, padding)
mod_dyn = get_conv2d_nchw(dyn_batch_shape, w_shape, padding)
verify_conv2d(
mod_dyn, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
for data_dtype, weight_dtype, out_dtype in [
("float32", "float32", "float32"), # 3xtf32
("int8", "int8", "int32"),
("uint8", "int8", "int32"),
]:
expr = get_conv2d_nchw(
d_shape,
w_shape,
padding,
out_dtype=out_dtype,
data_dtype=data_dtype,
weight_dtype=weight_dtype,
)
verify_conv2d(
expr,
expr,
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
run_benchmark=False,
data_dtype=data_dtype,
weight_dtype=weight_dtype,
ref_target="llvm",
)
# align1 + int8 case
d_shape = (16, 3, 32, 32)
w_shape = (32, 3, 3, 3)
mod_nchw = get_conv2d_nchw(
d_shape, w_shape, padding, out_dtype="int32", data_dtype="uint8", weight_dtype="int8"
)
verify_conv2d(
mod_nchw,
mod_nchw,
d_shape,
w_shape,
sm=80,
atol=1e-5,
rtol=1e-5,
ref_target="llvm",
data_dtype="uint8",
weight_dtype="int8",
)
@tvm.testing.requires_cutlass
def test_conv2d_fusion():
d_shape = (16, 16, 32, 32)
w_shape = (32, 16, 3, 3)
padding = (1, 1)
mod_nchw = get_conv2d_nchw_bias(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_relu(d_shape, w_shape, padding)
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
verify_conv2d(
mod_nchw,
mod_nchw,
d_shape,
w_shape,
sm=80,
atol=1e-3,
rtol=1e-3,
run_benchmark=False,
use_fast_math=True,
)
mod_nchw = get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float32")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float32")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
mod_nchw = get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16")
verify_conv2d(
mod_nchw, mod_nchw, d_shape, w_shape, sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
)
@tvm.testing.requires_cutlass
def test_conv2d_residual_block():
d_shape = (16, 16, 32, 32)
w_shape = (16, 16, 3, 3)
padding = (1, 1)
bias_add, residual_input = get_conv2d_nchw_bias_residual(d_shape, w_shape, padding)
for func, tol in [
(relay.nn.relu(bias_add + residual_input), 1e-5),
(relay.nn.relu(bias_add) + residual_input, 1e-5),
(relay.sigmoid(bias_add) * residual_input, 1e-5),
(relay.nn.relu(silu(bias_add) * residual_input), 1e-5),
# HardSwish requires higher tolerance since vectoring the residual block epilogue
# in cutlass.
# TODO(masahi): Invesitigate this issue
(relay.nn.relu(hardswish(bias_add) + residual_input), 1e-3),
]:
verify_conv2d(func, func, d_shape, w_shape, sm=80, atol=tol, rtol=tol, run_benchmark=False)
@tvm.testing.requires_cutlass
def test_conv2d_transpose():
OC = 8
IC = 16
d_shape = (16, IC, 32, 32)
w_shape = (OC, IC, 3, 3)
padding = (1, 1)
dtype = "float32"
for strides in [(1, 1), (2, 2)]:
o_shape = conv_output_shape(
0, padding, strides, (1, 1), d_shape, (OC, IC, 3, 3), "float32", "float32"
)
output_padding = (1, 1) if strides[0] > 1 else (0, 0)
mod_nchw = get_conv2d_transpose_nchw(
o_shape,
w_shape,
padding,
output_padding,
strides,
out_dtype=dtype,
data_dtype=dtype,
weight_dtype=dtype,
)
verify_conv2d(
mod_nchw,
mod_nchw,
o_shape,
w_shape,
sm=80,
atol=1e-3,
rtol=1e-3,
use_cudnn_ref=False,
run_benchmark=False,
data_dtype=dtype,
weight_dtype=dtype,
)
@tvm.testing.requires_cutlass
def test_conv2d_backward_weight():
OC = 8
IC = 16
d_shape = (16, IC, 32, 32)
w_shape = (OC, IC, 3, 3)
dtype = "float16"
for strides in [(1, 1), (2, 2)]:
o_shape = (16, OC, 32 // strides[0], 32 // strides[1])
padding = (1, 1)
mod_nchw = get_conv2d_backward_weight(
d_shape,
w_shape,
o_shape,
padding,
strides,
out_dtype="float32",
data_dtype=dtype,
weight_dtype=dtype,
)
for split_k_slices in [1, 8]:
verify_conv2d_backward_weight(
mod_nchw,
mod_nchw,
o_shape,
d_shape,
sm=80,
split_k_slices=[split_k_slices],
atol=1e-3,
rtol=1e-3,
use_cudnn_ref=False,
grad_dtype=dtype,
data_dtype=dtype,
)
@tvm.testing.requires_cutlass
def test_conv2d_bwd():
IC = 16
OC = 8
dshape = (16, IC, 32, 32)
wshape = (OC, IC, 3, 3)
padding = (0, 0)
strides = (1, 1)
conv = get_conv2d_nchw(
dshape,
wshape,
padding,
strides=strides,
out_dtype="float32",
data_dtype="float32",
weight_dtype="float32",
)
fwd_mod = InferType()(tvm.IRModule.from_expr(conv))
# Note: large difference in tvm and cutlass Wgrad results if use fp16.
# Cutlass wgrad uses fp32 accumulation even if the output is fp16.
use_fp16 = False
verify_dgrad = False # False to verify wgrad
tol = 1e-5 if verify_dgrad else 1e-4 # Wgrad slightly less accurate
if use_fp16:
fwd_mod = ToMixedPrecision("float16")(fwd_mod)
fwd_bwd_func = FirstOrderGradient()(fwd_mod)["main"]
bwd_func = relay.Function(
fwd_bwd_func.params,
relay.TupleGetItem(relay.TupleGetItem(fwd_bwd_func.body, 1), 0 if verify_dgrad else 1),
)
verify_conv2d(
bwd_func,
bwd_func,
dshape,
wshape,
sm=80,
atol=1e-2 if use_fp16 else tol,
rtol=1e-2 if use_fp16 else tol,
use_cudnn_ref=False,
data_dtype="float32",
weight_dtype="float32",
use_vm=True,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.contrib.dlpack import to_pytorch_func
def verify_torch_dlpack():
a = np.random.randn(1337)
tvm_a = tvm.nd.array(a)
np.testing.assert_equal(tvm.nd.from_dlpack(tvm_a.to_dlpack()).numpy(), a)
try:
import torch
import torch.utils.dlpack
x = torch.rand(56, 56)
tvm_x = tvm.nd.from_dlpack(torch.utils.dlpack.to_dlpack(x))
np.testing.assert_equal(x.numpy(), tvm_x.numpy())
y = tvm.nd.from_dlpack(tvm_x)
np.testing.assert_equal(y.numpy(), tvm_x.numpy())
np.testing.assert_equal(
torch.utils.dlpack.from_dlpack(y.to_dlpack()).numpy(), tvm_x.numpy()
)
n = tvm.runtime.convert(137)
xx = torch.rand(137, 137)
yy = torch.rand(137, 137)
zz2 = torch.empty(137, 137)
zz = xx.mm(yy)
XX = te.placeholder((n, n), name="X")
YY = te.placeholder((n, n), name="Y")
k = te.reduce_axis((0, n), name="k")
ZZ = te.compute((n, n), lambda i, j: te.sum(XX[i, k] * YY[k, j], axis=k))
s = te.create_schedule(ZZ.op)
# No need to speficy target_host if it's llvm
# Otherwise you will need to specify the target and target_host
f = tvm.build(s, [XX, YY, ZZ], name="f")
f_pytorch = to_pytorch_func(f)
zz2 = torch.empty(137, 137)
f_pytorch(xx, yy, zz2)
tvm.testing.assert_allclose(zz.numpy(), zz2.numpy(), rtol=1e-4, atol=1e-4)
except ImportError:
pass
def test_torch_dlpack():
# Run dlpack interoperability test a few times to make sure it's stable.
for i in range(5):
verify_torch_dlpack()
if __name__ == "__main__":
test_torch_dlpack()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_dnnl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import itertools
import numpy as np
import sys
import subprocess
import math
import collections
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.op.contrib import dnnl
import tvm.testing
has_dnnl_codegen = pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True), reason="DNNL codegen not available"
)
run_module = tvm.testing.parameter(
pytest.param(False, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm.marks()]),
pytest.param(True, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm.marks()]),
ids=["compile", "run"],
)
_bf16_supported = None
def bf16_supported():
global _bf16_supported
if _bf16_supported is None:
_bf16_supported = False
if sys.platform.startswith("darwin"):
cpu_info = subprocess.check_output("sysctl -a", shell=True).strip().decode()
for line in cpu_info.split("\n"):
if line.startswith("hw.optional.avx512f"):
_bf16_supported = bool(int(line.split(":", 1)[1]))
elif sys.platform.startswith("linux"):
_bf16_supported = "avx512" in open("/proc/cpuinfo", "r").read()
return _bf16_supported
def partition_for_dnnl(mod, params=None, alter_layout=True, prune_subgraphs=True):
"""Partition the graph greedily offloading supported operators to DNNL.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
mod : Module
Annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
with TempOpAttr("nn.conv2d", "FTVMLegalize", dnnl.legalize_group_conv):
with TempOpAttr("nn.conv2d_transpose", "FTVMLegalize", dnnl.legalize_group_conv):
seq = tvm.transform.Sequential(
[
transform.CanonicalizeOps(),
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
# fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
transform.SimplifyExpr(),
transform.FoldConstant(),
# alter group conv /conv_transpose layout to `GOIHW` / `GIOHW`
transform.Legalize(),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
if alter_layout:
with TempOpAttr("nn.conv1d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr("nn.conv3d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr(
"nn.conv2d_transpose", "FTVMAlterOpLayout", dnnl.alter_conv_transpose
):
with TempOpAttr(
"nn.conv3d_transpose", "FTVMAlterOpLayout", dnnl.alter_conv_transpose
):
alter_layout_seq = tvm.transform.Sequential(
[
transform.AlterOpLayout(),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = alter_layout_seq(mod)
mod = dnnl.rewrite_layer_norm(mod)
mod = dnnl.rewrite_dense_bias_gelu_reshape_last(mod)
mod = dnnl.legalize_qnn_for_dnnl(mod)
byoc_seq = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl.pattern_table()),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = byoc_seq(mod)
if prune_subgraphs:
mod = dnnl.prune_dnnl_subgraphs(mod)
return mod
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
o_np = o.numpy()
if o_np.dtype == np.uint16:
o_np = np.left_shift(o_np.astype("uint32"), 16).view("<f4")
return [o_np]
elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list):
return [vmobj_to_list(f) for f in o]
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_result_dict_holds(result_dict):
for k1, k2 in itertools.combinations(result_dict, 2):
res1 = vmobj_to_list(result_dict[k1])
res2 = vmobj_to_list(result_dict[k2])
for r1, r2 in zip(res1, res2):
# ignore the accuracy checking if only one bf16 result presents
if ("bf16" in k1) == ("bf16" in k2):
tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=1e-3)
def check_dnnl_used(mod, subgraph_num=None):
num_dnnl_subgraphs = sum([1 if "dnnl" in gv.name_hint else 0 for gv in mod.get_global_vars()])
if subgraph_num:
assert num_dnnl_subgraphs == subgraph_num
else:
assert num_dnnl_subgraphs >= 1
def run_and_verify(mod, input, params, target, run_module, subgraph_num=None, test_bf16=True):
dev = tvm.cpu()
result_dict = dict()
for mode in ["graph", "vm"]:
configs = [
(False, False, False),
(True, False, False),
(True, True, False),
]
if test_bf16 and bf16_supported():
configs += [(True, False, True), (True, True, True)]
for use_dnnl, alter_layout, use_bf16 in configs:
result_key = (
mode
+ ("_dnnl" if use_dnnl else "")
+ ("_layout" if alter_layout else "")
+ ("_bf16" if use_bf16 else "_fp32")
)
processed_mod = mod
if use_bf16:
processed_mod = relay.transform.ToMixedPrecision("bfloat16")(processed_mod)
if tvm.ir.structural_equal(processed_mod, mod):
print("can not convert to bfloat16, skipping...")
continue
if use_dnnl:
processed_mod = partition_for_dnnl(processed_mod, params, alter_layout)
check_dnnl_used(processed_mod)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=processed_mod, device=dev, target=target
).evaluate()
if run_module:
if isinstance(input, dict):
result_dict[result_key] = func(**input, **params)
else:
result_dict[result_key] = func(input, **params)
if run_module:
assert_result_dict_holds(result_dict)
def run_and_verify_func(
config, run_module, subgraph_num=None, target="llvm", dtype="float32", test_bf16=True
):
"""Test a Relay func by compiling, running, and comparing TVM and DNNL outputs.
Parameters
----------
config : Tuple[relay.Function, Dict[str, NDArray], List[str]]
A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and
3) A list of which vars should be considered params.
run_module: bool
If True, the built module will be run after being compiled.
"""
f, input_shapes, is_param = config
params = {x: np.random.uniform(-1, 1, input_shapes[x]).astype(dtype) for x in is_param}
input_dict = {
k: np.random.uniform(-1, 1, v).astype(dtype)
for k, v in input_shapes.items()
if k not in is_param
}
run_and_verify(
f,
input_dict,
params,
subgraph_num=subgraph_num,
target=target,
run_module=run_module,
test_bf16=test_bf16,
)
def add_activation(activation, out, dic, param_lst):
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
elif activation == "clip":
return relay.clip(out, 0.0, 6.0), dic, param_lst
elif activation == "swish":
sig_out = relay.sigmoid(out)
out = relay.multiply(out, sig_out)
return out, dic, param_lst
elif activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
elif activation == "mish":
exp = relay.exp(out)
add = relay.add(exp, relay.const(1.0))
log = relay.log(add)
tanh = relay.tanh(log)
out = relay.multiply(out, tanh)
return out, dic, param_lst
else:
return out, dic, param_lst
def get_conv1d(
x_shape=((1, 3, 224)),
k_shape=(16, 3, 3),
groups=1,
padding=(1, 1),
strides=(1),
dilation=(1),
channels=None,
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv1d(
x,
kernel,
kernel_size=k_shape[2:3],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv1d_bias(x_shape=(1, 3, 224), k_shape=(10, 3, 3), activation=None, dtype="float32"):
conv, dic, param_lst = get_conv1d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv1d_bias_bn_relu(x_shape=(1, 3, 224), k_shape=(10, 3, 3), dtype="float32"):
conv1d_bias, dic, param_lst = get_conv1d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv1d_bias_bn, _, _ = relay.nn.batch_norm(
conv1d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv1d_bias_bn), dic, param_lst
def get_conv2d(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d(
x,
kernel,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_transpose(
x_shape=(1, 32, 8, 8),
k_shape=(32, 16, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d_transpose(
x,
kernel,
channels=k_shape[1] * groups,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_weights_const(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape}
param_lst = []
return out, dic, param_lst
def get_conv2d_bias(
x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d_weights_const(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_transpose_bias(
x_shape=(1, 32, 8, 8), k_shape=(32, 16, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d_transpose(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[1],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[1],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_bias_bn_relu(x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), dtype="float32"):
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv2d_bias_bn, _, _ = relay.nn.batch_norm(
conv2d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv2d_bias_bn), dic, param_lst
def get_layer_norm(x_shape=(1, 49, 64), dtype="float32"):
dic = {"input": x_shape}
param_lst = []
input = relay.var("input", shape=x_shape)
beta = relay.const(np.zeros(x_shape[2]).astype(dtype))
gamma = relay.const(np.ones(x_shape[2]).astype(dtype))
out = relay.nn.layer_norm(input, gamma=gamma, beta=beta)
return out, dic, param_lst
def get_conv3d(
x_shape=(1, 32, 8, 8, 8),
k_shape=(16, 32, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
dilation=(1, 1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv3d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_transpose(
x_shape=(1, 32, 8, 8, 8),
k_shape=(32, 16, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
output_padding=(0, 0, 0),
activation=None,
dtype="float32",
data_layout="NCDHW",
kernel_layout="OIDHW",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv3d_transpose(
x,
kernel,
channels=k_shape[1],
kernel_size=k_shape[2:5],
groups=groups,
padding=padding,
strides=strides,
output_padding=output_padding,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_bias(
x_shape=(1, 32, 8, 8, 8), k_shape=(16, 32, 3, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv3d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_transpose_bias(
x_shape=(1, 32, 8, 8, 8), k_shape=(32, 16, 3, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv3d_transpose(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[1],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[1],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def gelu_helper(data):
const1 = relay.const(math.sqrt(2.0))
const2 = relay.const(1.0)
const3 = relay.const(0.5)
divisor = relay.op.divide(data, const1)
val_erf = relay.op.erf(divisor)
added_erf = relay.op.add(val_erf, const2)
mul1 = relay.op.multiply(data, added_erf)
out = relay.op.multiply(mul1, const3)
return out
def get_dense(
x_shape=(1, 16), k_shape=(32, 16), activation=None, has_reshape=False, dtype="float32"
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.dense(x, kernel, units=k_shape[0])
# out = relay.nn.dense(x, kernel, units=None)
if has_reshape:
out = relay.reshape(out, newshape=(1, x_shape[0], k_shape[0]))
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
if activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
def get_bmm(
x_shape=(1, 16, 8), k_shape=(1, 4, 8), dtype="float32", transpose_a=False, transpose_b=True
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.batch_matmul(
x, kernel, out_dtype=dtype, transpose_a=transpose_a, transpose_b=transpose_b
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return out, dic, param_lst
def test_bmm(run_module, dtype="float32"):
x_shape = (1, 2, 4)
k_shape = (1, 3, 4)
dense, dic, param_lst = get_bmm(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
k_shape_t = (1, 4, 3)
dense, dic, param_lst = get_bmm(x_shape, k_shape_t, dtype=dtype, transpose_b=False)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def get_dense_bias(
x_shape=(1, 16),
k_shape=(32, 16),
activation=None,
has_reshape=False,
use_add=False,
dtype="float32",
):
dense, dic, param_lst = get_dense(
x_shape=x_shape, k_shape=k_shape, has_reshape=has_reshape, dtype=dtype
)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
if use_add:
out = relay.add(dense, bias)
else:
out = relay.nn.bias_add(dense, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
if activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
def test_dnnl_not_compatible(run_module, target="llvm", dtype="float32"):
xshape = (1, 32, 14, 14)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.add(x, x)
z = relay.cast(relay.cast(y, "int32"), "float32")
out = relay.nn.relu(z)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
mod = partition_for_dnnl(mod)
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=tvm.cpu(0), target=target).evaluate()
if run_module:
results = func(x_data)
def test_multiple_outputs(run_module, dtype="float32"):
def get_graph():
x = relay.var("x", shape=(1, 3), dtype=dtype)
y = relay.var("y", shape=(1, 3), dtype=dtype)
z = relay.add(x, y)
w = relay.add(z, y)
out = relay.Tuple((z, w))
f = tvm.IRModule.from_expr(out)
return f, {"x": (1, 3), "y": (1, 3)}, []
run_and_verify_func(get_graph(), run_module=run_module, dtype=dtype)
def test_elementwise(run_module, dtype="float32"):
def get_graph(op, x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = op(x)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
for op in [
relay.abs,
relay.exp,
relay.log,
relay.sqrt,
relay.nn.relu,
relay.tanh,
relay.sigmoid,
]:
run_and_verify_func(get_graph(op), run_module=run_module)
def test_clip(run_module, dtype="float32"):
def get_graph(x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.clip(x, a_min=-0.2, a_max=0.4)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module)
def test_leaky_relu(run_module, dtype="float32"):
def get_graph(x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.leaky_relu(x, alpha=0.1)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module)
def test_softmax(run_module, dtype="float32"):
def get_graph(x_shape, axis):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.softmax(x, axis=axis)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph((1, 1000), axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 1000), axis=-1), run_module=run_module)
run_and_verify_func(get_graph((1, 3, 4), axis=-2), run_module=run_module)
run_and_verify_func(get_graph((1, 3, 4), axis=1), run_module=run_module)
def test_conv1d(run_module, dtype="float32"):
conv1d, dic, param_lst = get_conv1d(channels=16, dtype=dtype)
conv1d = tvm.IRModule.from_expr(conv1d)
config = conv1d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
x_shape = (1, 32, 224)
k_shape = (16, 32, 3)
conv1d_bias, dic, param_lst = get_conv1d(x_shape, k_shape, dtype=dtype)
conv1d_bias = tvm.IRModule.from_expr(conv1d_bias)
config = conv1d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv1d_pattern(run_module, dtype="float32"):
x_shape = (1, 3, 224)
k_shape = (16, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid"]
for a in activation_lst:
conv1d, dic, param_lst = get_conv1d(x_shape, k_shape, activation=a, dtype=dtype)
conv1d = tvm.IRModule.from_expr(conv1d)
config = conv1d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv1d_bias, dic, param_lst = get_conv1d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv1d_bias = tvm.IRModule.from_expr(conv1d_bias)
config = conv1d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32), ((32, 2, 3, 3), 16)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
for dilation in [(1, 1), (2, 2)]:
conv2d, dic, param_lst = get_conv2d(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
dtype=dtype,
)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_weights_const(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
x_shape = (1, 3, 8, 8)
k_shape = (16, 3, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_pattern(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d(x_shape, k_shape, activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_bias_sum_relu(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
def get_conv2d_bn_sum_relu(x_shape, k_shape, dtype="float32"):
out, dic, param_lst = get_conv2d_bias(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
out, _, _ = relay.nn.batch_norm(
out,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
sum_in = relay.var("sum_in", shape=x_shape, dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
conv_sum = relay.nn.conv2d(
sum_in,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
)
# sum over two conv2d outputs to meet inplace condition
out = relay.add(out, conv_sum)
dic["sum_in"] = x_shape
return relay.nn.relu(out), dic, param_lst
conv2d_bn_sum_relu, dic, param_lst = get_conv2d_bn_sum_relu(x_shape, k_shape, dtype=dtype)
conv2d_bn_sum_relu = tvm.IRModule.from_expr(conv2d_bn_sum_relu)
config = conv2d_bn_sum_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_transpose(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((32, 16, 3, 3), 1), ((32, 1, 3, 3), 32), ((32, 4, 3, 3), 16)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
conv2d_transpose, dic, param_lst = get_conv2d_transpose(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dtype=dtype,
)
conv2d_transpose = tvm.IRModule.from_expr(conv2d_transpose)
config = conv2d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_transpose_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d_transpose(activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_transpose_bias(activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d(run_module, dtype="float32"):
conv3d, dic, param_lst = get_conv3d(dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d, dic, param_lst = get_conv3d(padding=(0, 0, 0, 1, 1, 1), dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d, dic, param_lst = get_conv3d(
x_shape=(1, 3, 8, 8, 8), k_shape=(16, 3, 3, 3, 3), dtype=dtype
)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv3d, dic, param_lst = get_conv3d(activation=a, dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_bias, dic, param_lst = get_conv3d_bias(activation=a, dtype=dtype)
conv3d_bias = tvm.IRModule.from_expr(conv3d_bias)
config = conv3d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_transpose(run_module, dtype="float32"):
conv3d_transpose, dic, param_lst = get_conv3d_transpose(dtype=dtype)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_transpose, dic, param_lst = get_conv3d_transpose(strides=(2, 2, 2), dtype=dtype)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_transpose, dic, param_lst = get_conv3d_transpose(
strides=(2, 2, 2), output_padding=(1, 1, 1), dtype=dtype
)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_transpose_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv3d, dic, param_lst = get_conv3d_transpose(activation=a, dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_bias, dic, param_lst = get_conv3d_transpose_bias(activation=a, dtype=dtype)
conv3d_bias = tvm.IRModule.from_expr(conv3d_bias)
config = conv3d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape=(1, 16), dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape, activation="gelu", dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense_pattern(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, activation="gelu", dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_pool2d(run_module, dtype="float32"):
def get_graph(
op,
x_shape=(1, 3, 32, 32),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=None,
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
out = tvm.IRModule.from_expr(out)
return out, {"x": x_shape}, []
for pool_size in [(2, 2), (3, 3)]:
for strides in [(1, 1), (2, 2)]:
for padding in [(0, 0), (1, 1), (0, 0, 1, 1)]:
for ceil_mode in [False]:
# Skip "the padding size is larger than or equal to the filter size for exclusive-counting pooling"
if pool_size == (2, 2) and padding == (0, 0, 1, 1):
continue
for count_include_pad in [False, True]:
# Skip "inclusive-counted blended or average pooling is not supported in combination with asymmetric padding"
if count_include_pad and (padding == (0, 0, 1, 1) or strides == (2, 2)):
continue
run_and_verify_func(
get_graph(
relay.nn.avg_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
),
run_module=run_module,
)
run_and_verify_func(
get_graph(
relay.nn.max_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
),
run_module=run_module,
)
def test_global_avg_pooling2d(run_module, dtype="float32"):
x_shape = (1, 3, 32, 32)
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.global_avg_pool2d(x)
out = tvm.IRModule.from_expr(out)
config = out, {"x": x_shape}, []
run_and_verify_func(config, run_module=run_module)
def test_pool3d(run_module, dtype="float32"):
def get_graph(
op,
x_shape=(1, 3, 8, 32, 32),
pool_size=(2, 2, 2),
strides=(2, 2, 2),
padding=(0, 0, 0),
ceil_mode=False,
count_include_pad=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
out = tvm.IRModule.from_expr(out)
return out, {"x": x_shape}, []
run_and_verify_func(get_graph(relay.nn.avg_pool3d), run_module=run_module)
run_and_verify_func(get_graph(relay.nn.max_pool3d), run_module=run_module)
run_and_verify_func(
get_graph(relay.nn.max_pool3d, padding=(0, 0, 0, 1, 1, 1)), run_module=run_module
)
run_and_verify_func(get_graph(relay.nn.max_pool3d, strides=(1, 1, 1)), run_module=run_module)
def test_prune_dnnl_subgraph(run_module):
"""In this test, OP "add" should be offloaded from dnnl codegen."""
def get_graph():
x1 = relay.var("x1", shape=(1, 32, 56, 56))
x2 = relay.var("x2", shape=(1, 32, 56, 56))
bias = relay.var("bias", shape=(32,))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(
x1,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.bias_add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.global_max_pool2d(y)
y = relay.add(y, x2)
dic = {
"x1": (1, 32, 56, 56),
"x2": (1, 32, 56, 56),
"weight": (32, 32, 3, 3),
"bias": (32,),
}
param_lst = ["weight", "bias"]
out = tvm.IRModule.from_expr(y)
return out, dic, param_lst
run_and_verify_func(get_graph(), subgraph_num=1, run_module=run_module, test_bf16=False)
def test_layer_norm(run_module, dtype="float32"):
x_shape = (1, 49, 64)
ln, dic, param_lst = get_layer_norm(x_shape, dtype=dtype)
ln = tvm.IRModule.from_expr(ln)
config = ln, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_rewrite_dense_bias_gelu_reshape_last(run_module, dtype="float32"):
def get_graph(act=None):
x_shape = (1, 16)
k_shape = (32, 16)
dense_bias, dic, param_lst = get_dense_bias(
x_shape, k_shape, activation=act, has_reshape=True, use_add=True, dtype=dtype
)
dense_bias = tvm.IRModule.from_expr(dense_bias)
processed_dense_bias = partition_for_dnnl(
dense_bias, params=None, alter_layout=False, prune_subgraphs=False
)
check_dnnl_used(processed_dense_bias, 1)
return dense_bias, dic, param_lst
run_and_verify_func(
get_graph("gelu"), subgraph_num=1, run_module=run_module, dtype=dtype, test_bf16=False
)
run_and_verify_func(
get_graph(), subgraph_num=1, run_module=run_module, dtype=dtype, test_bf16=False
)
def test_resnetv1_rewrite(run_module, dtype="float32"):
def get_graph():
data_shape = (1, 256, 56, 56)
w_shapes = [
(64, 256, 1, 1),
(64, 64, 3, 3),
(256, 64, 1, 1),
(128, 256, 1, 1),
(128, 128, 3, 3),
(512, 128, 1, 1),
(512, 256, 1, 1),
]
x = relay.var("x", shape=data_shape, dtype=dtype)
wights = [relay.const(np.random.randint(0, 1, w).astype(dtype)) for w in w_shapes]
biases = [relay.const(np.random.randint(0, 1, w[0]).astype(dtype)) for w in w_shapes]
conv1 = relay.nn.conv2d(
x,
wights[0],
channels=w_shapes[0][0],
kernel_size=w_shapes[0][2:4],
padding=(w_shapes[0][2] // 2, w_shapes[0][3] // 2),
)
conv1 = relay.nn.bias_add(conv1, biases[0])
conv1 = relay.nn.relu(conv1)
conv2 = relay.nn.conv2d(
conv1,
wights[1],
channels=w_shapes[1][0],
kernel_size=w_shapes[1][2:4],
padding=(w_shapes[1][2] // 2, w_shapes[1][3] // 2),
)
conv2 = relay.nn.bias_add(conv2, biases[1])
conv2 = relay.nn.relu(conv2)
conv3 = relay.nn.conv2d(
conv2,
wights[2],
channels=w_shapes[2][0],
kernel_size=w_shapes[2][2:4],
padding=(w_shapes[2][2] // 2, w_shapes[2][3] // 2),
)
conv3 = relay.nn.bias_add(conv3, biases[2])
conv3 = relay.add(conv3, x)
conv3 = relay.nn.relu(conv3)
left_conv4 = relay.nn.conv2d(
conv3,
wights[3],
channels=w_shapes[3][0],
strides=(2, 2),
kernel_size=w_shapes[3][2:4],
padding=(w_shapes[3][2] // 2, w_shapes[3][3] // 2),
)
left_conv4 = relay.nn.bias_add(left_conv4, biases[3])
left_conv4 = relay.nn.relu(left_conv4)
left_conv5 = relay.nn.conv2d(
left_conv4,
wights[4],
channels=w_shapes[4][0],
kernel_size=w_shapes[4][2:4],
padding=(w_shapes[4][2] // 2, w_shapes[4][3] // 2),
)
left_conv5 = relay.nn.bias_add(left_conv5, biases[4])
left_conv5 = relay.nn.relu(left_conv5)
left_conv6 = relay.nn.conv2d(
left_conv5,
wights[5],
channels=w_shapes[5][0],
kernel_size=w_shapes[5][2:4],
padding=(w_shapes[5][2] // 2, w_shapes[5][3] // 2),
)
left_conv6 = relay.nn.bias_add(left_conv6, biases[5])
right_conv7 = relay.nn.conv2d(
conv3,
wights[6],
channels=w_shapes[6][0],
strides=(2, 2),
kernel_size=w_shapes[6][2:4],
padding=(w_shapes[6][2] // 2, w_shapes[6][3] // 2),
)
right_conv7 = relay.nn.bias_add(right_conv7, biases[6])
out = relay.add(left_conv6, right_conv7)
out = relay.nn.relu(out)
dic = {"x": data_shape}
param_lst = []
return out, dic, param_lst
net, dic, param_lst = get_graph()
net = tvm.IRModule.from_expr(net)
config = net, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_fuse_pad_avg_pool(run_module, dtype="float32"):
def get_graph():
data_shape = (1, 768, 17, 17)
x = relay.var("x", shape=data_shape, dtype=dtype)
out = relay.nn.pad(x, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]])
out = relay.nn.avg_pool2d(out, pool_size=[3, 3])
dic = {"x": data_shape}
param_lst = []
return out, dic, param_lst
net, dic, param_lst = get_graph()
net = tvm.IRModule.from_expr(net)
config = net, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def permute_shape(shape, l_from="", l_to=""):
res_shape = []
for label in l_to:
pos = l_from.find(label)
res_shape.append(shape[pos])
return res_shape
def expand_dim(shape, rank=0):
assert len(shape) == 1
return shape + [1] * (rank - 1)
def filler_uni(low=0, high=1):
def filler_func(shape):
return np.random.uniform(low, high, shape)
return filler_func
class QnnBuilder:
def __init__(self, qnn_profile=None):
self._args = {}
self._args_op = []
self._qp = qnn_profile
def arg(self, shape=[], dtype="float32", filler=filler_uni(), is_const=True):
if isinstance(filler, (int, float)):
value = np.full(shape, filler).astype(dtype)
else:
value = filler(shape).astype(dtype)
if is_const:
res = relay.const(value, dtype=dtype)
else:
name = f"in_{len(self._args)}"
res = relay.var(name, shape=shape, dtype=dtype)
self._args[name] = value
self._args_op.append(res)
return res
def make_zp(self, mean_val, num_ch=1, dispersion=0.2):
if num_ch == 1:
return self.arg(shape=[], dtype="int32", filler=mean_val)
else:
low = int(mean_val * (1 - dispersion))
high = int(mean_val * (1 + dispersion))
return self.arg(shape=[num_ch], dtype="int32", filler=filler_uni(low, high))
def make_scl(self, mean_val, num_ch=1, dispersion=0.2):
if num_ch == 1:
return self.arg(shape=[], dtype="float32", filler=mean_val)
else:
low = mean_val * (1 - dispersion)
high = mean_val * (1 + dispersion)
return self.arg(shape=[num_ch], dtype="float32", filler=filler_uni(low, high))
def make_zp_and_scl(self, name, num_ch=1, dispersion=0.2):
is_per_channel = getattr(self._qp, f"{name}_pc")
zp_val = getattr(self._qp, f"{name}_zp")
scl_val = getattr(self._qp, f"{name}_scl")
zp = self.make_zp(zp_val, num_ch if is_per_channel else 1, dispersion)
scl = self.make_scl(scl_val, num_ch if is_per_channel else 1, dispersion)
return zp, scl
def finalize(self, op):
func = relay.Function(self._args_op, op)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod, self._args
def check_fully_annotated(mod, desired_compiler):
matched_ops = []
other_ops = []
def _visit(node):
if isinstance(node, tvm.relay.Call):
op = node.op
if isinstance(op, relay.GlobalVar):
func = mod[op]
if "Compiler" in func.attrs and func.attrs["Compiler"] == desired_compiler:
matched_ops.append(op)
return
else:
other_ops.append(op)
tvm.relay.analysis.post_order_visit(mod["main"].body, _visit)
assert len(other_ops) == 0 and len(matched_ops) != 0, "Model is not fully DNNL compiled"
def check_result(
mod,
ref_mod,
map_inputs,
tol=1e-5,
target="llvm",
device=tvm.cpu(),
params=None,
ref_result=None,
atol=None,
desired_compiler="dnnl",
):
if atol is None:
atol = tol
if desired_compiler is not None:
check_fully_annotated(mod, desired_compiler)
if ref_result is None:
# Run the reference result
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
ref_lib = relay.build(ref_mod, target=target, params=params)
ref_rt_mod = tvm.contrib.graph_executor.GraphModule(ref_lib["default"](device))
for name, data in map_inputs.items():
ref_rt_mod.set_input(name, data)
ref_rt_mod.run()
out = ref_rt_mod.get_output(0)
ref_result = out.numpy()
def check_vm_result():
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = tvm.runtime.vm.VirtualMachine(exe, device)
output = vm.run(**map_inputs)
tvm.testing.assert_allclose(output.numpy(), ref_result, rtol=tol, atol=atol)
def check_graph_executor_result():
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](device))
rt_mod.run(**map_inputs)
output = rt_mod.get_output(0)
tvm.testing.assert_allclose(output.numpy(), ref_result, rtol=tol, atol=atol)
check_vm_result()
check_graph_executor_result()
ConvProfile = collections.namedtuple(
"ConvProfile",
[
"SHAPE",
"KER",
"STR",
"PAD",
"DEL",
"OC",
"GR",
"D_LAYOUT",
"K_LAYOUT",
],
)
base_conv = ConvProfile(
SHAPE=[1, 8, 5, 5],
KER=[3, 3],
STR=[1, 1],
PAD=[1, 1],
DEL=[1, 1],
OC=16,
GR=1,
D_LAYOUT="NCHW",
K_LAYOUT="OIHW",
)
base_conv_nhwc = base_conv._replace(D_LAYOUT="NHWC", K_LAYOUT="HWIO")
base_conv_dilated = base_conv._replace(PAD=[2, 2], DEL=[2, 2])
base_conv_no_pad = base_conv._replace(PAD=[0, 0])
base_conv_no_pad_nhwc = base_conv_no_pad._replace(D_LAYOUT="NHWC", K_LAYOUT="HWIO")
base_conv_group_no_pad = base_conv_no_pad._replace(GR=2)
base_conv_dw_no_pad = base_conv_no_pad._replace(SHAPE=[1, 16, 5, 5], GR=16)
DenseProfile = collections.namedtuple("DenseProfile", ["N", "IC", "OC"])
base_dense_profile = DenseProfile(N=2, IC=10, OC=16)
ArgConstConfig = collections.namedtuple("ArgConstConfig", ["Data", "Weights", "Bias", "Sum"])
acp_regular = ArgConstConfig(Data=False, Weights=True, Bias=True, Sum=None)
acp_no_bias = ArgConstConfig(Data=False, Weights=True, Bias=None, Sum=None)
acp_with_sum = ArgConstConfig(Data=False, Weights=True, Bias=True, Sum=False)
acp_no_bias_with_sum = ArgConstConfig(Data=False, Weights=True, Bias=None, Sum=False)
QuantizationConfig = collections.namedtuple(
"QuantizationConfig",
[
"d_zp",
"d_scl",
"d_pc",
"k_zp",
"k_scl",
"k_pc",
"rq_zp",
"rq_scl",
"rq_pc",
"sum_zp",
"sum_scl",
"sum_pc",
"o_zp",
"o_scl",
"o_pc",
],
)
qp_regular = QuantizationConfig(
d_zp=0,
d_scl=0.2,
d_pc=False,
k_zp=0,
k_scl=0.1,
k_pc=False,
rq_zp=30,
rq_scl=0.2,
rq_pc=False,
sum_zp=15,
sum_scl=0.3,
sum_pc=False,
o_zp=5,
o_scl=0.2,
o_pc=False,
)
qp_asymmetric_data = qp_regular._replace(
d_zp=3, rq_zp=10, rq_scl=0.1, sum_zp=15, sum_scl=0.3, o_zp=4
)
qnn_conv_profiles = tvm.testing.parameter(
by_dict={
# Pattern qnn.conv2d + qnn.requantize
"Base": (base_conv, acp_regular, qp_regular),
"NHWC": (base_conv_nhwc, acp_regular, qp_regular),
# Asymmetric input. NOTE: No pad! Input ZP is not compatible with padding
"Group": (base_conv_group_no_pad, acp_regular, qp_asymmetric_data),
"DW": (base_conv_dw_no_pad, acp_regular, qp_asymmetric_data),
"NoBias": (base_conv, acp_no_bias, qp_regular),
"AsymmetricInput": (base_conv_no_pad, acp_regular, qp_asymmetric_data),
"AsymmetricInput_NHWC": (base_conv_no_pad_nhwc, acp_regular, qp_asymmetric_data),
# Pattern Conv2d + Requantize + Sum
"WithSum": (base_conv_no_pad, acp_with_sum, qp_asymmetric_data),
"WithSum_NHWC": (base_conv_no_pad_nhwc, acp_with_sum, qp_asymmetric_data),
"WithSum_NoBias": (base_conv_no_pad, acp_no_bias_with_sum, qp_asymmetric_data),
}
)
@has_dnnl_codegen
def test_qnn_conv2d(qnn_conv_profiles):
def generate_model(p, c, q):
np.random.seed(0)
N, IC, IH, IW = p.SHAPE
d_shape = p.SHAPE
w_shape = [p.OC, IC, *p.KER]
b_shape = [p.OC]
s_shape = [
p.SHAPE[0],
p.OC,
(IH + 2 * p.PAD[0] - (p.KER[0] - 1) * p.DEL[0] - 1) // p.STR[0] + 1,
(IW + 2 * p.PAD[1] - (p.KER[1] - 1) * p.DEL[1] - 1) // p.STR[1] + 1,
]
if p.GR != 1:
w_shape[1] //= p.GR
d_shape = permute_shape(d_shape, l_from="NCHW", l_to=p.D_LAYOUT)
s_shape = permute_shape(s_shape, l_from="NCHW", l_to=p.D_LAYOUT)
w_shape = permute_shape(w_shape, l_from="OIHW", l_to=p.K_LAYOUT)
c_dim = p.D_LAYOUT.find("C")
b_shape = expand_dim(b_shape, rank=len(p.D_LAYOUT) - c_dim)
bld = QnnBuilder(qnn_profile=q)
# Start build a test graph
data = bld.arg(shape=d_shape, dtype="uint8", is_const=c.Data, filler=filler_uni(0, 20))
d_zp, d_scl = bld.make_zp_and_scl("d", IC)
# Convolution
wgh = bld.arg(shape=w_shape, dtype="int8", is_const=c.Weights, filler=filler_uni(-20, 20))
w_zp, w_scl = bld.make_zp_and_scl("k")
op = tvm.relay.qnn.op.conv2d(
data,
wgh,
d_zp,
w_zp,
d_scl,
w_scl,
kernel_size=p.KER,
padding=p.PAD,
strides=p.STR,
dilation=p.DEL,
groups=p.GR,
channels=p.OC,
out_dtype="int32",
data_layout=p.D_LAYOUT,
kernel_layout=p.K_LAYOUT,
)
# Optional bias
if c.Bias is not None:
bias = bld.arg(
shape=b_shape, dtype="int32", is_const=c.Bias, filler=filler_uni(-50, 50)
)
op = tvm.relay.add(op, bias)
# Re-quantization
rq_in_zp = bld.make_zp(0)
rq_in_scl = bld.make_scl(q.d_scl * q.k_scl) # in real cases that should be a vector
rq_out_zp, rq_out_scl = bld.make_zp_and_scl("rq")
op = tvm.relay.qnn.op.requantize(
op, rq_in_scl, rq_in_zp, rq_out_scl, rq_out_zp, out_dtype="int32"
)
op = tvm.relay.clip(
op, a_min=0.0, a_max=255.0
) # pytorch frontend specific, I guess it's redundant
op = tvm.relay.cast(op, dtype="uint8")
# Optional sum (ResNet like)
if c.Sum is not None:
sum_in = bld.arg(dtype="uint8", shape=s_shape, filler=filler_uni(0, 10), is_const=c.Sum)
lhs_zp, lhs_scl = bld.make_zp_and_scl("rq")
rhs_zp, rhs_scl = bld.make_zp_and_scl("sum")
out_zp, out_scl = bld.make_zp_and_scl("o")
op = tvm.relay.qnn.op.add(op, sum_in, lhs_scl, lhs_zp, rhs_scl, rhs_zp, out_scl, out_zp)
op = tvm.relay.clip(op, a_min=0.0, a_max=255.0)
return bld.finalize(op)
conv_p, arg_p, quant_p = qnn_conv_profiles
ref_mod, args = generate_model(conv_p, arg_p, quant_p)
mod = partition_for_dnnl(ref_mod)
# atol=1 means int values should match with +-1 quantum value tolerance
check_result(mod, ref_mod, args, tol=1e-10, atol=1, desired_compiler="dnnl")
conv_profiles = tvm.testing.parameter(
by_dict={
"Base": (base_conv, acp_regular),
"NHWC": (base_conv_nhwc, acp_regular),
"Group": (base_conv_group_no_pad, acp_regular),
"DW": (base_conv_dw_no_pad, acp_regular),
"Dilated": (base_conv_dilated, acp_regular),
}
)
@has_dnnl_codegen
def test_conv2d_plus(conv_profiles):
def generate_model(p, c):
np.random.seed(0)
N, IC, IH, IW = p.SHAPE
d_shape = p.SHAPE
w_shape = [p.OC, IC, *p.KER]
b_shape = [p.OC]
s_shape = [
p.SHAPE[0],
p.OC,
(IH + 2 * p.PAD[0] - (p.KER[0] - 1) * p.DEL[0] - 1) // p.STR[0] + 1,
(IW + 2 * p.PAD[1] - (p.KER[1] - 1) * p.DEL[1] - 1) // p.STR[1] + 1,
]
if p.GR != 1:
w_shape[1] //= p.GR
d_shape = permute_shape(d_shape, l_from="NCHW", l_to=p.D_LAYOUT)
s_shape = permute_shape(s_shape, l_from="NCHW", l_to=p.D_LAYOUT)
w_shape = permute_shape(w_shape, l_from="OIHW", l_to=p.K_LAYOUT)
c_dim = p.D_LAYOUT.find("C")
# b_shape = expand_dim(b_shape, rank=len(p.D_LAYOUT) - c_dim)
bld = QnnBuilder()
op = bld.arg(shape=d_shape, dtype="float32", is_const=c.Data)
wgh = bld.arg(shape=w_shape, dtype="float32", is_const=c.Weights)
op = tvm.relay.nn.conv2d(
op,
wgh,
kernel_size=p.KER,
padding=p.PAD,
strides=p.STR,
dilation=p.DEL,
groups=p.GR,
channels=p.OC,
out_dtype="float32",
data_layout=p.D_LAYOUT,
kernel_layout=p.K_LAYOUT,
)
if c.Bias is not None:
bias = bld.arg(shape=b_shape, dtype="float32", is_const=c.Bias)
op = tvm.relay.nn.bias_add(op, bias, axis=c_dim)
if c.Sum is not None:
sum_in = bld.arg(shape=s_shape, dtype="float32", is_const=c.Sum)
op = tvm.relay.op.add(op, sum_in)
return bld.finalize(op)
conv_p, arg_p = conv_profiles
ref_mod, args = generate_model(conv_p, arg_p)
mod = partition_for_dnnl(ref_mod, alter_layout=False)
check_result(mod, ref_mod, args, tol=1e-5, desired_compiler="dnnl")
qnn_dense_profiles = tvm.testing.parameter(
by_dict={
# Pattern Dense + Requantize
"Base": (base_dense_profile, acp_regular, qp_regular),
"AsymmetricInput": (base_dense_profile, acp_regular, qp_asymmetric_data),
# Pattern Dense + Requantize + Sum
"AsymmetricInput_Sum": (base_dense_profile, acp_with_sum, qp_asymmetric_data),
}
)
@has_dnnl_codegen
def test_qnn_dense(qnn_dense_profiles):
def generate_model(p, c, q):
np.random.seed(0)
d_shape = [p.N, p.IC]
w_shape = [p.OC, p.IC]
b_shape = [p.OC]
s_shape = [p.N, p.OC]
bld = QnnBuilder(qnn_profile=q)
# Start build a test graph
data = bld.arg(shape=d_shape, dtype="uint8", is_const=c.Data, filler=filler_uni(0, 20))
d_zp, d_scl = bld.make_zp_and_scl("d", p.IC)
# Convolution
wgh = bld.arg(shape=w_shape, dtype="int8", is_const=c.Weights, filler=filler_uni(-20, 20))
w_zp, w_scl = bld.make_zp_and_scl("k")
op = tvm.relay.qnn.op.dense(
data, wgh, d_zp, w_zp, d_scl, w_scl, units=p.OC, out_dtype="int32"
)
# Optional bias
if c.Bias is not None:
bias = bld.arg(
shape=b_shape, dtype="int32", is_const=c.Bias, filler=filler_uni(-50, 50)
)
op = tvm.relay.add(op, bias)
# Re-quantization
rq_in_zp = bld.make_zp(0)
rq_in_scl = bld.make_scl(q.d_scl * q.k_scl) # in real cases that should be a vector
rq_out_zp, rq_out_scl = bld.make_zp_and_scl("rq")
op = tvm.relay.qnn.op.requantize(
op, rq_in_scl, rq_in_zp, rq_out_scl, rq_out_zp, out_dtype="int32"
)
op = tvm.relay.clip(
op, a_min=0.0, a_max=255.0
) # pytorch frontend specific, I guess it's redundant
op = tvm.relay.cast(op, dtype="uint8")
# Optional sum (ResNet like)
if c.Sum is not None:
sum_in = bld.arg(dtype="uint8", shape=s_shape, filler=filler_uni(0, 10), is_const=c.Sum)
lhs_zp, lhs_scl = bld.make_zp_and_scl("rq")
rhs_zp, rhs_scl = bld.make_zp_and_scl("sum")
out_zp, out_scl = bld.make_zp_and_scl("o")
op = tvm.relay.qnn.op.add(op, sum_in, lhs_scl, lhs_zp, rhs_scl, rhs_zp, out_scl, out_zp)
op = tvm.relay.clip(op, a_min=0.0, a_max=255.0)
return bld.finalize(op)
conv_p, arg_p, quant_p = qnn_dense_profiles
ref_mod, args = generate_model(conv_p, arg_p, quant_p)
mod = partition_for_dnnl(ref_mod)
# atol=1 means int values should match with +-1 quantum value tolerance
check_result(mod, ref_mod, args, tol=1e-10, atol=1, desired_compiler="dnnl")
dense_profiles = tvm.testing.parameter(
by_dict={
"Base": (base_dense_profile, acp_regular),
"WithSum": (base_dense_profile, acp_with_sum),
}
)
@has_dnnl_codegen
def test_dense_plus(dense_profiles):
def generate_model(p, c):
np.random.seed(0)
d_shape = [p.N, p.IC]
w_shape = [p.OC, p.IC]
b_shape = [p.OC]
s_shape = [p.N, p.OC]
c_dim = 1
bld = QnnBuilder()
op = bld.arg(shape=d_shape, dtype="float32", is_const=c.Data)
wgh = bld.arg(shape=w_shape, dtype="float32", is_const=c.Weights)
op = tvm.relay.nn.dense(op, wgh, out_dtype="float32")
if c.Bias is not None:
bias = bld.arg(shape=b_shape, dtype="float32", is_const=c.Bias)
op = tvm.relay.nn.bias_add(op, bias, axis=c_dim)
if c.Sum is not None:
sum_in = bld.arg(shape=s_shape, dtype="float32", is_const=c.Sum)
op = tvm.relay.op.add(op, sum_in)
return bld.finalize(op)
dense_p, arg_p = dense_profiles
ref_mod, args = generate_model(dense_p, arg_p)
mod = partition_for_dnnl(ref_mod)
check_result(mod, ref_mod, args, tol=1e-5, desired_compiler="dnnl")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_edgetpu_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
import numpy as np
from tvm import rpc
from tvm.contrib import utils, tflite_runtime
# import tflite_runtime.interpreter as tflite
# NOTE: This script was tested on tensorflow/tflite (v2.4.1)
def skipped_test_tflite_runtime():
def get_tflite_model_path(target_edgetpu):
# Return a path to the model
edgetpu_path = os.getenv("EDGETPU_PATH", "/home/mendel/edgetpu")
# Obtain mobilenet model from the edgetpu repo path
if target_edgetpu:
model_path = os.path.join(
edgetpu_path, "test_data/mobilenet_v1_1.0_224_quant_edgetpu.tflite"
)
else:
model_path = os.path.join(edgetpu_path, "test_data/mobilenet_v1_1.0_224_quant.tflite")
return model_path
def init_interpreter(model_path, target_edgetpu):
# Initialize interpreter
if target_edgetpu:
edgetpu_path = os.getenv("EDGETPU_PATH", "/home/mendel/edgetpu")
libedgetpu = os.path.join(edgetpu_path, "libedgetpu/direct/aarch64/libedgetpu.so.1")
interpreter = tflite.Interpreter(
model_path=model_path, experimental_delegates=[tflite.load_delegate(libedgetpu)]
)
else:
interpreter = tflite.Interpreter(model_path=model_path)
return interpreter
def check_remote(server, target_edgetpu=False):
tflite_model_path = get_tflite_model_path(target_edgetpu)
# inference via tflite interpreter python apis
interpreter = init_interpreter(tflite_model_path, target_edgetpu)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]["shape"]
tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.uint8)
interpreter.set_tensor(input_details[0]["index"], tflite_input)
interpreter.invoke()
tflite_output = interpreter.get_tensor(output_details[0]["index"])
# inference via remote tvm tflite runtime
remote = rpc.connect(server.host, server.port)
dev = remote.cpu(0)
if target_edgetpu:
runtime_target = "edge_tpu"
else:
runtime_target = "cpu"
with open(tflite_model_path, "rb") as model_fin:
runtime = tflite_runtime.create(model_fin.read(), dev, runtime_target)
runtime.set_input(0, tvm.nd.array(tflite_input, dev))
runtime.invoke()
out = runtime.get_output(0)
np.testing.assert_equal(out.numpy(), tflite_output)
# Target CPU on coral board
check_remote(rpc.Server("127.0.0.1"))
# Target EdgeTPU on coral board
check_remote(rpc.Server("127.0.0.1"), target_edgetpu=True)
if __name__ == "__main__":
# skipped_test_tflite_runtime()
pass
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for Arm(R) Ethos(TM)-N"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/_infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expose test functions to the Python front end"""
import tvm._ffi
tvm._ffi._init_api("relay.ethos-n.test.infra", __name__)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/infrastructure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N test functions"""
from __future__ import absolute_import, print_function
from hashlib import md5
from itertools import zip_longest, combinations
import os
from typing import Tuple
import math
import numpy as np
from PIL import Image
import tvm
from tvm import relay
from tvm.contrib import utils, graph_executor, download
from tvm.relay.op.contrib import partition_for_ethosn
from tvm.driver.tvmc.target import parse_target
from . import _infrastructure
def get_real_image(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download.download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def assert_lib_hash(lib, golden):
"""Check that the Ethos-N runtime modules in a library hash to the same values
as given by the golden hash(es).
If there's only one Ethos-N module, the golden hash may be provided as a str.
If there's multiple, a set of golden hashes should be provided to correspond
with each Ethos-N module that is expected.
This function is used to ensure that no change is made which alters the output
of a compilation. If such a change is made deliberately (eg. to fix a bug) then
the golden hash should be updated after verifying on hardware that the behaviour
is still correct.
This method is used because of the lack of hardware availability in upstream CI.
"""
# Convert str hash into a set of hashes
if isinstance(golden, str):
golden = {golden}
temp = utils.tempdir()
path = temp.relpath("lib.cmm")
hash_set = set()
for mod in lib.imported_modules:
if mod.type_key == "ethos-n":
mod.save(path)
with open(path, "rb") as compiled_model:
lib_hash = md5(compiled_model.read()).hexdigest()
hash_set.add(lib_hash)
assert hash_set == golden, "Expected hash: {} Got hash: {}".format(golden, hash_set)
def make_module(func, params):
func = relay.Function(relay.analysis.free_vars(func), func)
if params:
relay.build_module.bind_params_by_name(func, params)
mod = tvm.IRModule.from_expr(func)
return relay.transform.InferType()(mod)
def make_ethosn_composite(ethosn_expr, name):
variables = relay.analysis.free_vars(ethosn_expr)
inner_vars = [relay.Var(v.name_hint, v.type_annotation) for v in variables]
func = relay.Function(inner_vars, ethosn_expr)
func = func.with_attr("Composite", name)
call = relay.Call(func, variables)
return call
def make_ethosn_partition(ethosn_expr):
"""Make an Ethos(TM)-N partition."""
# Create an Ethos-N global function
mod = tvm.IRModule({})
variables = relay.analysis.free_vars(ethosn_expr)
# NB: it is illegal to reuse variables inside and outside a scope in Relay
# if you want to duplicate types and names you must re-allocate them.
fresh_vars = [relay.Var(v.name_hint, v.type_annotation) for v in variables]
binds = {}
for var, fresh_var in zip(variables, fresh_vars):
binds[var] = fresh_var
ethosn_expr_fresh = relay.bind(ethosn_expr, binds)
func = relay.Function(fresh_vars, ethosn_expr_fresh)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", "ethos-n")
func = func.with_attr("global_symbol", "ethos-n_0")
global_var = relay.GlobalVar("ethos-n_0")
mod[global_var] = func
mod = relay.transform.InferType()(mod)
# These are the vars to call the Ethos-N partition with
more_vars = relay.analysis.free_vars(ethosn_expr)
# Call the Ethos-N partition in main
call_fn1 = global_var(*more_vars)
mod["main"] = relay.Function(more_vars, call_fn1)
return relay.transform.InferType()(mod)
def get_host_op_count(mod):
"""Return the number of host operators."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build(
mod, params, npu=True, expected_host_ops=0, npu_partitions=1, additional_config_args=None
):
"""Build a network with or without Ethos-N offloading.
Parameters
----------
mod : IRModule
The Relay module to build.
params : dict of str to NDArray
The weights to build with.
npu : bool, optional
Whether to build with Ethos-N offloading.
expected_host_ops : int, optional
The number of ops expected to remain on the host.
npu_partitions : int, optional
The number of Ethos-N partitions expected.
additional_config_args : dict, optional
Additional compiler config options for the NPU.
"""
relay.backend.te_compiler.get().clear()
if not additional_config_args:
additional_config_args = {}
npu_config = {**get_ethosn_device_options(), **additional_config_args}
print(npu_config)
with tvm.transform.PassContext(opt_level=3, config={"relay.ext.ethos-n.options": npu_config}):
with tvm.target.Target("llvm"):
if npu:
mod = partition_for_ethosn(mod, params)
host_op_count = get_host_op_count(mod)
assert (
host_op_count == expected_host_ops
), "Got {} host operators, expected {}".format(host_op_count, expected_host_ops)
attrs = [
mod[var.name_hint].attrs
for var in mod.get_global_vars()
if mod[var.name_hint].attrs
]
partition_count = sum(
[
key == "Compiler" and value == "ethos-n"
for attr in attrs
for key, value in attr.items()
]
)
assert (
npu_partitions == partition_count
), "Got {} ethos-n partitions, expected {}".format(partition_count, npu_partitions)
return relay.build(mod, params=params)
def run(lib, inputs, outputs, npu=True):
"""Run a module with specified inputs.
Parameters
----------
lib : runtime.Module
The runtime module.
inputs : dict of str to NDArray
The input dictionary.
outputs : int
The expected number of outputs.
npu : bool
Whether or not any part of the lib is offloaded to Ethos-N.
If it's false (i.e. it's all running on the CPU), we set
the mocked result equal to the output so that a subsequent
mocked run on the NPU returns the same value.
Returns
-------
out : list of NDArray
The results.
"""
# Export and load lib to confirm this works
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
lib.export_library(lib_path)
lib = tvm.runtime.load_module(lib_path)
module = graph_executor.GraphModule(lib["default"](tvm.cpu()))
module.set_input(**inputs)
module.run()
out = [module.get_output(i) for i in range(outputs)]
if not npu:
inference_result(out)
return out
def build_and_run(
mod,
inputs,
outputs,
params,
npu=True,
expected_host_ops=0,
npu_partitions=1,
additional_config_args=None,
):
"""
Convenient wrapper for building and running a module on the NPU.
"""
lib = build(mod, params, npu, expected_host_ops, npu_partitions, additional_config_args)
return run(lib, inputs, outputs, npu)
def verify(answers, dtype, atol, rtol=1e-07, verify_saturation=True):
"""Compare the array of answers. Each entry is a list of outputs"""
if len(answers) < 2:
print("No results to compare: expected at least two, found ", len(answers))
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).max)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == np.iinfo(dtype).min)
< 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
def inference_result(outputs):
"""Set the expected results of an Ethos inference, if the testing
infrastructure is available. This assumes that the entire graph
was offloaded to the neural processor."""
if tvm.get_global_func("relay.ethos-n.test.infra.inference_result", True):
return _infrastructure.inference_result(*outputs)
return False
def test_error(mod, params, err_msg):
"""Test an operator error message."""
caught = None
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.ethos-n.options": get_ethosn_device_options()}
):
with tvm.target.Target("llvm"):
try:
mod = relay.transform.InferType()(mod)
relay.build(mod, params=params)
except tvm.error.TVMError as error:
caught = error.args[0]
finally:
relay.backend.te_compiler.get().clear()
assert caught is not None
assert err_msg in caught, caught
def get_conv2d(var, shape, dtype):
"""Standard convolution to test activation functions"""
weight_shape = (1, 1, shape[3], 1)
weights_array = tvm.nd.array(np.ones(weight_shape, dtype))
weights = relay.const(weights_array, dtype)
conv = relay.qnn.op.conv2d(
var,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(1, 1),
channels=1,
data_layout="NHWC",
kernel_layout="HWIO",
)
b = tvm.nd.array(np.zeros((shape[0],), "int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=0)
req = relay.qnn.op.requantize(
bias,
relay.const(1.0, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(1.1, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": b}
return req, params
def get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels
):
"""Return Conv2D QNN params."""
kernel_sc = (
kernel_sc.numpy() if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray) else [kernel_sc]
)
dtype_min = np.iinfo(dtype).min
dtype_max = np.iinfo(dtype).max
input_max = input_sc * (dtype_max - input_zp)
input_min = input_sc * (dtype_min - input_zp)
kernel_max = max(kernel_sc) * (dtype_max - kernel_zp)
kernel_min = min(kernel_sc) * (dtype_min - kernel_zp)
output_limits = [
kernel_max * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_max,
kernel_min * kernel_h * kernel_w * channels * input_min,
kernel_max * kernel_h * kernel_w * channels * input_min,
]
output_max = max(output_limits)
output_min = min(output_limits)
output_sc = (output_max - output_min) / (dtype_max - dtype_min)
output_zp = int(dtype_min - (output_min / output_sc))
return output_zp, output_sc
def get_same_padding(
data: Tuple[int, int],
kernel: Tuple[int, int],
dilation: Tuple[int, int],
stride: Tuple[int, int],
) -> Tuple[int, int, int, int]:
"""
Get the padding values required for 'SAME' padding.
Parameters
----------
data : Tuple[int, int]
The height and width of the data respectively.
kernel : Tuple[int, int]
The height and width of the kernel respectively.
dilation : Tuple[int, int]
The dilation of the kernel.
stride : Tuple[int, int]
The stride of the kernel.
Returns
-------
Tuple[int, int, int, int]
The padding values for top, left, bottom and right respectively.
"""
dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1
dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1
out = int(math.ceil(float(data[0]) / float(stride[0])))
pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - data[0])
pad_top = pad // 2
pad_bottom = pad - pad_top
out = int(math.ceil(float(data[1]) / float(stride[1])))
pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - data[1])
pad_left = pad // 2
pad_right = pad - pad_left
return (pad_top, pad_left, pad_bottom, pad_right)
def get_ethosn_device_options():
"""Determine the NPU configuration used for testing."""
default_target_string = "ethos-n -variant=n78 -tops=1 -ple_ratio=2"
target_string = os.getenv("ETHOSN_TEST_TARGET_CONFIG", default_target_string)
target = parse_target(target_string)
return target[0]["opts"]
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_addition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration addition tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=False,
rhs_is_constant=False,
constant_data=None,
):
"""Return a model and any parameters it may have"""
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
if lhs_is_constant:
a_data = np.array(constant_data, dtype=dtype).reshape(lhs_shape)
a = relay.const(a_data, dtype=dtype)
else:
a = relay.var("a", shape=lhs_shape, dtype=dtype)
if rhs_is_constant:
b_data = np.array(constant_data, dtype=dtype).reshape(rhs_shape)
np.random.randint(data_min, data_max + 1, size=rhs_shape, dtype=dtype)
b = relay.const(b_data, dtype=dtype)
else:
b = relay.var("b", shape=rhs_shape, dtype=dtype)
model = relay.qnn.op.add(
lhs=a,
rhs=b,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
return model
def _get_addition_qnn_params(dtype):
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp = np.random.randint(data_min, data_max)
lhs_sc = np.random.random() * 2
rhs_zp = np.random.randint(data_min, data_max)
rhs_sc = np.random.random() * 2
input1_max = lhs_sc * (255 - lhs_zp)
input1_min = -lhs_sc * lhs_zp
input2_max = rhs_sc * (255 - rhs_zp)
input2_min = -rhs_sc * rhs_zp
output_max = input1_max + input2_max
output_min = input1_min + input2_min
output_sc = (output_max - output_min) / 255
output_zp = -int(output_min / output_sc)
return lhs_zp, lhs_sc, rhs_zp, rhs_sc, output_zp, output_sc
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 22, 9, 9), (1, 27, 21, 16)])
def test_addition(dtype, shape):
"""Compare Addition output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
"b": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
}
model = _get_model(shape, shape, lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc, dtype)
for npu in [False, True]:
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 4, 4, 8), False, (1, 1, 1, 8), True),
((4,), True, (1, 16, 12, 4), False),
((1, 1, 1, 8), True, (1, 4, 4, 8), False),
((1, 16, 12, 4), False, (4,), True),
],
)
def test_addition_to_depthwise(dtype, lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant):
"""Compare addition to depthwise with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
constant_shape = lhs_shape if lhs_is_constant else rhs_shape
constant_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
constant_data=constant_data,
)
input_shape = rhs_shape if lhs_is_constant else lhs_shape
input_name = "b" if lhs_is_constant else "a"
inputs = {
input_name: tvm.nd.array(
np.random.randint(data_min, data_max + 1, size=input_shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"lhs_shape,lhs_is_constant,rhs_shape,rhs_is_constant",
[
((1, 2, 8, 4), False, None, True),
((1, 5, 6, 7), False, (1, 1, 1, 1), True),
(None, True, (1, 2, 8, 4), False),
((1, 1, 1, 1), True, (1, 5, 6, 7), False),
],
)
def test_addition_to_reinterpret_quantize(lhs_shape, lhs_is_constant, rhs_shape, rhs_is_constant):
"""Compare addition to depthwise with TVM."""
np.random.seed(0)
dtype = "uint8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity operation. We must choose the quantization and
# constant data carefully to maske sure that this is the case.
if lhs_is_constant:
rhs_zp = 128
rhs_sc = 0.0078125
lhs_zp = 0
lhs_sc = 0.003921568859368563
else:
lhs_zp = 128
lhs_sc = 0.0078125
rhs_zp = 0
rhs_sc = 0.003921568859368563
out_zp = 0
out_sc = 0.007814894430339336
constant_data = 255
model = _get_model(
lhs_shape,
rhs_shape,
lhs_zp,
lhs_sc,
rhs_zp,
rhs_sc,
out_zp,
out_sc,
dtype,
lhs_is_constant=lhs_is_constant,
rhs_is_constant=rhs_is_constant,
constant_data=constant_data,
)
input_shape = rhs_shape if lhs_is_constant else lhs_shape
input_name = "b" if lhs_is_constant else "a"
inputs = {
input_name: tvm.nd.array(
np.random.randint(data_min, data_max + 1, size=input_shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,err_msg",
[
(
"uint8",
(2, 4, 4, 4),
"batch size=2, batch size must = 1; batch size=2, batch size must = 1",
),
(
"int16",
(1, 4, 4, 4),
"dtype='int16', dtype must be either uint8, int8 or int32; dtype='int16', "
"dtype must be either uint8, int8 or int32",
),
],
)
def test_addition_failure(dtype, shape, err_msg):
"""Check addition error messages."""
np.random.seed(0)
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
model = _get_model(shape, shape, lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_add")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""NPU codegen tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
@requires_ethosn
def test_compile_with_unsupported_variant():
"""Test compilation with unsupported variant."""
dtype = "int8"
input_shape = (1, 2, 2, 2)
x = relay.var("x", shape=input_shape, dtype=dtype)
y = relay.reshape(x, newshape=(1, 1, 1, 8))
mod = tei.make_ethosn_partition(y)
additional_config_args = {
"variant": "foo",
"inline_non_compute_intensive_partitions": False,
}
inputs = {
"x": np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=input_shape, dtype=dtype
)
}
with pytest.raises(tvm.TVMError, match=r"Unknown NPU type"):
tei.build_and_run(mod, inputs, 1, {}, True, additional_config_args=additional_config_args)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_concatenate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Concatenate tests for Arm(R) Ethos(TM)-N"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_inputs(shapes, dtype):
inputs = {}
for i, shape in enumerate(shapes):
inputs["in" + str(i)] = tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
return inputs
def _get_model(shapes, dtype, axis):
tup = []
for i, shape in enumerate(shapes):
a = relay.var("in" + str(i), shape=shape, dtype=dtype)
tup.append(a)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con = relay.qnn.op.concatenate(
tup,
input_scales=[zerof] * len(shapes),
input_zero_points=[zeroi] * len(shapes),
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return con
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shapes,axis",
[
([(1, 4), (1, 6)], 1),
([(1, 16, 4), (1, 16, 4)], 1),
([(1, 25, 4, 16)] * 3, 3),
([(1, 25, 4, 16), (1, 25, 5, 16), (1, 25, 6, 16)], 2),
([(1, 4), (1, 6)], -1),
([(1, 16, 4), (1, 16, 4)], -2),
],
)
def test_concatenate(dtype, shapes, axis):
"""Compare Concatenate output with TVM."""
np.random.seed(0)
outputs = []
inputs = _get_inputs(shapes, dtype)
for npu in [False, True]:
model = _get_model(shapes, dtype, axis)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize(
"shapes,dtype,axis,err_msg",
[
([(1, 4, 4, 4, 4), (1, 4, 4, 4, 4)], "uint8", 1, "dimensions=5, dimensions must be <= 4;"),
(
[(1, 4, 4, 4), (1, 4, 4, 4)],
"uint8",
3,
"Concatenation along the channels dimension (axis 3) "
"requires input tensors with a multiple of 16 channels;",
),
(
[(1, 4, 4, 4), (1, 4, 4, 4)],
"int16",
2,
"dtype='int16', dtype must be either uint8, int8 or int32; dtype='int16', "
"dtype must be either uint8, int8 or int32;",
),
(
[(2, 4, 4, 4), (2, 4, 4, 4)],
"uint8",
2,
"batch size=2, batch size must = 1; batch size=2, batch size must = 1;",
),
(
[(1, 4, 4, 4)],
"uint8",
0,
"Concatenation cannot be performed along batch axis (axis 0);",
),
],
)
def test_concatenate_failure(shapes, dtype, axis, err_msg):
"""Check Concatenate error messages."""
model = _get_model(shapes, dtype, axis)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_constant_duplication.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test that constants aren't duplicated for Arm(R) Ethos(TM)-N"""
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model():
"""Return a model and any parameters it may have"""
shape = (1, 4, 4, 4)
kernel_h = 3
kernel_w = 3
out_channels = 8
a = relay.var("a", shape=shape, dtype="uint8")
add_const_value = tvm.nd.array(np.random.randint(0, high=10, size=shape, dtype="uint8"))
add_const = relay.const(add_const_value, "uint8")
a = relay.add(a, add_const)
weight_shape = (kernel_h, kernel_w, shape[3], out_channels)
weights_array = tvm.nd.array(
np.random.randint(low=0, high=255, size=weight_shape, dtype="uint8")
)
weights = relay.const(weights_array, "uint8")
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(0.3, "float32"),
kernel_scale=relay.const(0.4, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout="HWIO",
dilation=(1, 1),
strides=(1, 1),
groups=1,
channels=out_channels,
padding=(0, 0, 0, 0),
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=10, size=(out_channels,), dtype="int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=3)
req = relay.qnn.op.requantize(
bias,
relay.const(0.3 * 0.4, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(0.4, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype="uint8",
)
params = {"w": weights_array, "b": b}
return req, params
@requires_ethosn
def test_constant_duplication():
"""Test that constants are not duplicated."""
np.random.seed(0)
model, params = _get_model()
mod = tei.make_module(model, params)
res = tei.build(mod, params, npu=True, expected_host_ops=1)
for key, value in res.params.items():
assert key == "p0"
assert value.numpy().size == 64
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration conv2d tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
kernel_h,
kernel_w,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
strides,
dilation,
groups,
dtype,
out_channels,
weight_format,
):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
if pad in ("op", "both"):
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
a = relay.nn.pad(
a,
pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
pad_value=input_zp,
pad_mode="constant",
)
shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)
if weight_format == "HWIO":
weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
else:
weight_shape = (kernel_h, kernel_w, out_channels, 1)
weights_array = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=weight_shape, dtype=dtype
)
)
weights = relay.const(weights_array, dtype)
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout=weight_format,
dilation=dilation,
strides=strides,
groups=groups,
channels=out_channels,
padding=p if pad in ("attr", "both") else (0, 0, 0, 0),
out_dtype="int32",
)
bias_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=(out_channels,), dtype="int32"
)
)
biasc = relay.const(bias_data, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=3)
if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):
req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]
else:
req_input_sc = input_sc * kernel_sc
req = relay.qnn.op.requantize(
bias,
relay.const(req_input_sc, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(output_sc, "float32"), # output zero scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": bias_data}
return req, params
@requires_ethosn
@pytest.mark.parametrize(
"dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
)
@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
@pytest.mark.parametrize(
"shape,out_channels,kernel_size",
[
[(1, 17, 20, 26), 4, (3, 1)],
[(1, 9, 20, 30), 7, (1, 5)],
[(1, 21, 21, 22), 8, (2, 2)],
],
)
def test_conv2d(
dtype,
shape,
out_channels,
kernel_size,
pad,
stride,
qnn_per_channel,
):
"""Compare Conv2D output with TVM."""
np.random.seed(0)
dilation = (1, 1)
groups = 1
weight_format = "HWIO"
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
input_sc = np.random.random() * 2
if qnn_per_channel:
kernel_sc = tvm.nd.array(
np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
)
else:
kernel_sc = np.random.random() * 2
kernel_zp = (
0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
)
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
)
model, params = _get_model(
shape,
kernel_size[0],
kernel_size[1],
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
)
@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
@pytest.mark.parametrize(
"shape,kernel_size",
[
[(1, 17, 20, 28), (3, 3)],
[(1, 9, 20, 30), (5, 5)],
[(1, 21, 21, 22), (2, 2)],
],
)
def test_conv2d_depthwise(
dtype,
shape,
kernel_size,
pad,
stride,
qnn_per_channel,
):
"""Compare Conv2D output with TVM."""
np.random.seed(0)
dilation = (1, 1)
out_channels = shape[3]
groups = out_channels
weight_format = "HWOI"
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
input_sc = np.random.random() * 2
if qnn_per_channel:
kernel_sc = tvm.nd.array(
np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
)
else:
kernel_sc = np.random.random() * 2
kernel_zp = (
0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
)
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
)
model, params = _get_model(
shape,
kernel_size[0],
kernel_size[1],
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,pad,stride,dilation,err_msg",
[
(
(1, 4, 4, 4),
"both",
(1, 1),
(1, 1),
"both op and attr padding exist, must be either op/attr only or no padding",
),
(
(1, 4, 4, 4),
"none",
(1, 1, 1),
(1, 1),
"stride size=3, stride size must = 2",
),
(
(1, 4, 4, 4),
"none",
(1, 1),
(2, 1),
"dilation=[2, 1], dilation must = [1, 1]",
),
(
(2, 4, 4, 4),
"none",
(1, 1),
(1, 1),
"batch size=2, batch size must = 1",
),
],
)
def test_conv2d_failure(shape, pad, stride, dilation, err_msg):
"""Check Conv2D error messages."""
np.random.seed(0)
kernel_size = (2, 2)
groups = 1
dtype = "uint8"
out_channels = 8
weight_format = "HWIO"
model, _ = _get_model(
shape,
kernel_size[0],
kernel_size[1],
0,
1,
0,
1,
0,
1,
pad,
stride,
dilation,
groups,
dtype,
out_channels,
weight_format,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
@requires_ethosn
def test_conv2d_out_of_range_scale():
"""Check Conv2D scale out of range error."""
np.random.seed(0)
input_sc = 1024
kernel_sc = 1024
output_sc = 1
model, _ = _get_model(
(1, 4, 4, 4),
1,
1,
0,
input_sc,
0,
kernel_sc,
0,
output_sc,
"none",
(1, 1),
(1, 1),
1,
"uint8",
8,
"HWIO",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
mod = tei.make_ethosn_partition(model)
expected_err_msg = (
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
)
tei.test_error(mod, {}, expected_err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration conv2d tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
kernel_h,
kernel_w,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
stride,
dilation,
groups,
kernel_layout,
dtype,
out_channels,
bias,
):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, stride)
weight_shape = (shape[3], out_channels // groups, kernel_h, kernel_w)
weight_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
high=(np.iinfo(dtype).max + 1),
size=weight_shape,
dtype=dtype,
)
)
weights = relay.const(weight_data, dtype)
op = relay.qnn.op.conv2d_transpose(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
kernel_scale=relay.const(kernel_sc, "float32"),
kernel_size=(kernel_h, kernel_w),
padding=p,
strides=stride,
dilation=dilation,
data_layout="NHWC",
kernel_layout=kernel_layout,
out_dtype="int32",
channels=out_channels,
groups=groups,
)
if bias:
bias_data = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=(out_channels,),
dtype="int32",
)
)
biasc = relay.const(bias_data, "int32")
op = relay.nn.bias_add(op, biasc, axis=3)
if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):
req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]
else:
req_input_sc = input_sc * kernel_sc
op = relay.qnn.op.requantize(
op,
input_zero_point=relay.const(input_zp, "int32"),
input_scale=relay.const(req_input_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
axis=3,
rounding="UPWARD",
out_dtype=dtype,
)
params = {"w": weight_data}
if bias:
params["b"] = bias_data
return op, params
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"ifm_shape,strides,kernel_size,out_channels,bias",
[
((1, 2, 2, 1), (2, 2), (1, 1), 1, False),
((1, 2, 2, 5), (2, 2), (3, 5), 4, False),
((1, 7, 7, 4), (2, 2), (7, 9), 8, True),
],
)
def test_conv2d_transpose(ifm_shape, strides, kernel_size, out_channels, dtype, bias):
"""Check transpose convolution output with TVM."""
np.random.seed(0)
kernel_layout = "IOHW"
dilation = (1, 1)
groups = 1
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
kernel_zp = np.random.randint(data_min, data_max)
kernel_sc = np.random.random() * 4
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, kernel_zp, kernel_sc, ifm_shape[1], ifm_shape[2], ifm_shape[3]
)
model, params = _get_model(
shape=ifm_shape,
kernel_h=kernel_size[0],
kernel_w=kernel_size[1],
input_zp=input_zp,
input_sc=input_sc,
kernel_zp=kernel_zp,
kernel_sc=kernel_sc,
output_zp=output_zp,
output_sc=output_sc,
stride=strides,
dilation=dilation,
groups=groups,
kernel_layout=kernel_layout,
dtype=dtype,
out_channels=out_channels,
bias=bias,
)
outputs = []
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=ifm_shape, dtype=dtype))
}
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, stride, dilation, groups, err_msg",
[
(
(1, 4, 4, 4),
(1, 1, 1),
(1, 1),
1,
"stride size=3, stride size must = 2",
),
(
(1, 4, 4, 4),
(2, 2),
(2, 2),
2,
"dilation=[2, 2], dilation must = [1, 1]",
),
(
(2, 4, 4, 4),
(1, 1),
(1, 1),
1,
"batch size=2, batch size must = 1",
),
],
)
def test_conv2d_transpose_failure(
shape,
stride,
dilation,
groups,
err_msg,
dtype,
):
"""
Test transpose_conv2d error messages.
"""
np.random.seed(0)
out_channels = 8
model, _ = _get_model(
shape=shape,
kernel_h=1,
kernel_w=1,
input_zp=0,
input_sc=1,
kernel_zp=0,
kernel_sc=1,
output_zp=0,
output_sc=1,
stride=stride,
dilation=dilation,
groups=groups,
kernel_layout="IOHW",
dtype=dtype,
out_channels=out_channels,
bias=False,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d_transpose")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_convert_equivalents.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for the convert equivalents pass."""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import ConvertEquivalents
from tvm.relay import ExprVisitor
from . import infrastructure as tei
from .test_addition import _get_addition_qnn_params
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"ConvertEquivalents is not correctly transforming the input "
"graph."
)
assert tvm.ir.structural_equal(a, b), reason
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape,channels", [((1, 4, 4, 8), 8), ((1, 16, 12, 4), 4)])
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_multiply_to_depthwise(dtype, shape, channels, reverse_inputs):
"""Check that multiply is correctly converted to a depthwise operation."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
x = relay.var("x", shape=shape, dtype=dtype)
constant_shape = (1, 1, 1, channels)
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_depthwise")
return tei.make_ethosn_partition(composite)
def expected():
constant_shape_hwoi = (1, 1, channels, 1)
y_data_hwoi = y_data.reshape(constant_shape_hwoi)
y_hwoi = relay.const(y_data_hwoi, dtype=dtype)
expr = relay.qnn.op.conv2d(
x,
y_hwoi,
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
(1, 1),
channels,
(1, 1),
(0, 0),
(1, 1),
channels,
"NHWC",
"HWOI",
"NHWC",
"int32",
)
expr = relay.nn.bias_add(expr, relay.const(np.zeros((channels,), dtype="int32")), axis=3)
expr = relay.qnn.op.requantize(
expr,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_conv2d")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[("int8", (1, 4, 4), (4,)), ("int16", (1, 16, 12, 4), (1, 1, 1, 4))],
)
def test_unsupported_multiply_to_depthwise(dtype, shape, constant_shape):
"""Check that unsupported variants of multiply to depthwise are not converted."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[-1]
)
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_depthwise")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_mul_to_depthwise" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape",
[((1, 4, 4, 8), (1, 1, 1, 1)), ((1, 16, 12, 4), None)],
)
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_multiply_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Check that multiply is correctly converted to a reinterpret quantize operation."""
np.random.seed(0)
dtype = "uint8"
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
def expected():
expr = relay.qnn.op.requantize(
x,
relay.const(input_sc, "float32"),
relay.const(input_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[("int16", (1, 16, 12, 4), None)],
)
def test_unsupported_multiply_to_reinterpret_quantize(dtype, shape, constant_shape):
"""
Check that unsupported variants of multiply conversion to reinterpret
quantize are not converted.
"""
np.random.seed(0)
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_mul_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_mul_to_reinterpret_quantize" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_add_to_depthwise(reverse_inputs):
"""
Check that add is converted correctly.
"""
dtype = "uint8"
lhs_shape = (1, 2, 4, 8)
rhs_shape = (1, 1, 1, 8)
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
x = relay.var("x", shape=lhs_shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=rhs_shape, dtype=dtype)
def before():
y = relay.const(y_data)
expr = relay.qnn.op.add(
lhs=y if reverse_inputs else x,
rhs=x if reverse_inputs else y,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_depthwise")
return tei.make_ethosn_partition(composite)
class ConversionChecker(ExprVisitor):
"""
Pass to check the new composite function is in the expected format.
"""
sequence = ["qnn.conv2d", "nn.bias_add", "qnn.requantize"]
# pylint: disable=invalid-name
def visit_function(self, fn):
composite_name = fn.attrs["Composite"]
expected = "ethos-n.qnn_conv2d"
assert (
composite_name == expected
), f"Expected Composite attribute {expected} but got {composite_name}"
super().visit_function(fn)
def visit_call(self, call):
op_name = call.op.name
expected_name = self.sequence.pop()
assert op_name == expected_name, f"Got operator {op_name} but expected {expected_name}"
super().visit_call(call)
mod = before()
mod = ConvertEquivalents()(mod)
mod = ConversionChecker().visit(mod["ethos-n_0"].body.op)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,lhs_shape,rhs_shape", [("uint8", (1, 4, 4), (1, 1, 4)), ("int16", (1, 4, 4, 4), (4,))]
)
def test_unsupported_add_to_depthwise(dtype, lhs_shape, rhs_shape):
"""Check that unsupported variants of add are not converted."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
lhs_zp, lhs_sc, rhs_zp, rhs_sc, out_zp, out_sc = _get_addition_qnn_params(dtype)
x = relay.var("x", shape=lhs_shape, dtype=dtype)
y_data = np.random.randint(data_min, data_max + 1, size=rhs_shape, dtype=dtype)
def before():
y = relay.const(y_data)
expr = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_sc, "float32"),
lhs_zero_point=relay.const(lhs_zp, "int32"),
rhs_scale=relay.const(rhs_sc, "float32"),
rhs_zero_point=relay.const(rhs_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_depthwise")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_add_to_depthwise" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape",
[
((1, 4, 4, 8), (1, 1, 1, 1)),
((1, 16, 12, 4), None),
],
)
@pytest.mark.parametrize("reverse_inputs", [True, False])
def test_add_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Check that add is correctly converted to a reinterpret quantize operation."""
np.random.seed(0)
dtype = "uint8"
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 128
input_sc = 0.0078125
input2_zp = 0
input2_sc = 0.003921568859368563
output_zp = 0
output_sc = 0.007814894430339336
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.add(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input2_sc if reverse_inputs else input_sc, "float32"),
relay.const(input2_zp if reverse_inputs else input_zp, "int32"),
relay.const(input_sc if reverse_inputs else input2_sc, "float32"),
relay.const(input_zp if reverse_inputs else input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
def expected():
expr = relay.qnn.op.requantize(
x,
relay.const(input_sc, "float32"),
relay.const(input_zp if reverse_inputs else input_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
out_dtype=dtype,
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
mod = ConvertEquivalents()(mod)
expected_mod = expected()
_assert_structural_equal(mod["ethos-n_0"], expected_mod["ethos-n_0"])
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[
("int16", (1, 16, 12, 4), None),
],
)
def test_unsupported_add_to_reinterpret_quantize(dtype, shape, constant_shape):
"""Check that unsupported variants of add to reinterpret quantize are not converted."""
np.random.seed(0)
# Add can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 128
input_sc = 0.0078125
input2_zp = 0
input2_sc = 0.003921568859368563
output_zp = 0
output_sc = 0.007814894430339336
constant_data = 255
x = relay.var("x", shape=shape, dtype=dtype)
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
def before():
y = relay.const(y_data, dtype=dtype)
expr = relay.qnn.op.add(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
composite = tei.make_ethosn_composite(expr, "ethos-n.qnn_add_to_reinterpret_quantize")
return tei.make_ethosn_partition(composite)
mod = before()
error_regex = (
r'Operation "ethos-n.qnn_add_to_reinterpret_quantize" was marked '
r"as having a valid conversion, but it could not be converted."
)
with pytest.raises(tvm.TVMError, match=error_regex):
mod = ConvertEquivalents()(mod)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration depth-to-space tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, block, dtype, layout):
a = relay.var("a", shape=shape, dtype=dtype)
depth = relay.nn.depth_to_space(a, layout=layout, block_size=block)
return depth
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape",
[
(1, 16, 16, 16),
(1, 64, 32, 16),
],
)
def test_depth_to_space(dtype, shape):
"""Compare Depth To Space output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, 2, dtype, "NHWC")
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,block,dtype,layout,err_msg",
[
((2, 16, 16, 16), 2, "uint8", "NHWC", "batch size=2, batch size must = 1"),
(
(1, 16, 16, 16),
2,
"int16",
"NHWC",
"dtype='int16', dtype must be either uint8, int8 or int32;",
),
((1, 16, 16, 16), 4, "uint8", "NHWC", "Only block size of 2 is supported"),
((1, 16, 16, 16), 2, "uint8", "NCHW", "Input layer must be NHWC or NHWCB"),
],
)
def test_depth_to_space_failure(shape, block, dtype, layout, err_msg):
"""Check Depth To Space error messages."""
model = _get_model(shape, block, dtype, layout)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_fullyconnected.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration fully connected tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape, weight_shape, input_zp, input_sc, kernel_zp, kernel_sc, output_zp, output_sc, dtype
):
"""Return a model an any parameters it may have"""
a = relay.var("a", shape=shape, dtype=dtype)
weights_array = tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=weight_shape, dtype=dtype
)
)
weights = relay.const(weights_array, dtype)
dense = relay.qnn.op.dense(
a,
weights,
input_zero_point=relay.const(input_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(input_sc, "float32"),
kernel_scale=relay.const(kernel_sc, "float32"),
units=weight_shape[0],
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=255, size=(weight_shape[0],), dtype="int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(dense, biasc)
req = relay.qnn.op.requantize(
bias,
relay.const(input_sc * kernel_sc, "float32"), # input zero scale
relay.const(input_zp * kernel_zp, "int32"), # input zero point
relay.const(output_sc, "float32"), # output zero scale
relay.const(output_zp, "int32"), # output zero point
out_dtype=dtype,
)
params = {"w": weights_array, "b": b}
return req, params
@requires_ethosn
@pytest.mark.parametrize(
"shape,out_channels",
[
((1, 1024), 64),
((1, 16384), 1),
((1, 1280), 1000),
],
)
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_fullyconnected(shape, out_channels, dtype):
"""Compare Fully Connected output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
inputs = {
"a": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype)),
}
outputs = []
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
kernel_zp = np.random.randint(data_min, data_max)
kernel_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
shape[0],
shape[1],
1,
)
model, params = _get_model(
shape,
(out_channels, shape[1]),
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
dtype,
)
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,weight_shape,err_msg",
[
(
(1, 1, 1, 64),
(1, 64),
"Weights tensor must have I dimension equal to the number"
" of channels of the input tensor.;",
),
((1024, 64), (1, 64), "batch size=1024, batch size must = 1;"),
],
)
def test_fullyconnected_failure(shape, weight_shape, err_msg):
"""Check Fully Connected error messages."""
np.random.seed(0)
dtype = "uint8"
model, _ = _get_model(
shape,
weight_shape,
0,
1,
0,
1,
0,
1,
dtype,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
@requires_ethosn
def test_fullyconnected_scale_out_of_range():
"""Check Fully Connected out of range scale error message."""
np.random.seed(0)
input_sc = 1024
kernel_sc = 1024
output_sc = 1
model, _ = _get_model(
(1, 64),
(1, 64),
0,
input_sc,
0,
kernel_sc,
0,
output_sc,
"uint8",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
expected_error_msg = (
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
)
tei.test_error(mod, {}, expected_error_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_inline_partitions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests for the 'InlineNonComputeIntensivePartitions' pass.
"""
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import InlineNonComputeIntensivePartitions
from . import infrastructure as tei
def _assert_structural_equal(a, b):
"""Check structural equality of two Relay expressions."""
reason = (
"Actual and expected relay functions are not equal. "
"InlineNonComputeIntensiveSubgraphs is not correctly "
"transforming the input graph."
)
assert tvm.ir.structural_equal(a, b, map_free_vars=True), reason
@requires_ethosn
def test_single_reshape():
"""Check that a single reshape is inlined correctly."""
def get_reshape():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
return relay.reshape(x, newshape=(2, 2, 4))
def before():
reshape = get_reshape()
return tei.make_ethosn_partition(reshape)
def expected():
reshape = get_reshape()
mod = tvm.IRModule.from_expr(reshape)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
_assert_structural_equal(mod, expected_mod)
@requires_ethosn
def test_multiple_non_compute_intensive_ops():
"""
Check that a partitioned function is correctly inlined
when it contains multiple non-compute intensive operations.
"""
def get_graph():
x = relay.var("x", shape=(2, 2, 4), dtype="int8")
x = relay.reshape(x, newshape=(1, 2, 2, 4))
x = relay.clip(x, 0.0, 1.0)
x = relay.reshape(x, newshape=(2, 2, 4))
return relay.clip(x, 0.0, 1.0)
def before():
func = get_graph()
return tei.make_ethosn_partition(func)
def expected():
func = get_graph()
mod = tvm.IRModule.from_expr(func)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
_assert_structural_equal(mod, expected_mod)
@requires_ethosn
def test_compute_intensive_ops():
"""
Check that a partitioned function that is considered
compute intensive is not inlined.
"""
def before():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x, layout="NHWC")
x = relay.reshape(x, newshape=(2, 2, 4))
return tei.make_ethosn_partition(x)
mod = before()
transformed_mod = InlineNonComputeIntensivePartitions()(mod)
for global_var in mod.get_global_vars():
_assert_structural_equal(mod[global_var], transformed_mod[global_var])
@requires_ethosn
def test_multiple_partitioned_functions():
"""
Tests the pass on a number of partitioned functions.
"""
def before():
composite_func_name = "ethos-n_0"
inp = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
# partitioned func 1 (non compute intensive)
x = relay.reshape(inp, newshape=(1, 2, 2, 4))
partitioned_func_1 = tei.make_ethosn_partition(x)[composite_func_name]
gv_1 = relay.GlobalVar("ethos-n_0")
# partitioned func 2 (compute intensive)
x = relay.nn.max_pool2d(inp, layout="NHWC")
partitioned_func_2 = tei.make_ethosn_partition(x)[composite_func_name]
gv_2 = relay.GlobalVar("ethos-n_1")
# partitioned func 3 (non compute intensive)
x = relay.clip(inp, 0.0, 1.0)
partitioned_func_3 = tei.make_ethosn_partition(x)[composite_func_name]
gv_3 = relay.GlobalVar("ethos-n_2")
mod = tvm.IRModule({})
mod[gv_1] = partitioned_func_1
mod[gv_2] = partitioned_func_2
mod[gv_3] = partitioned_func_3
main_expr = relay.Call(gv_1, [inp])
main_expr = relay.Call(gv_2, [main_expr])
main_expr = relay.Call(gv_3, [main_expr])
mod["main"] = relay.Function([inp], main_expr)
return relay.transform.InferType()(mod)
def expected():
composite_func_name = "ethos-n_0"
inp = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
# partitioned func 2 (compute intensive)
x = relay.nn.max_pool2d(inp, layout="NHWC")
partitioned_func_2 = tei.make_ethosn_partition(x)[composite_func_name]
gv_2 = relay.GlobalVar("ethos-n_1")
mod = tvm.IRModule({})
mod[gv_2] = partitioned_func_2
main_expr = relay.reshape(inp, newshape=(1, 2, 2, 4))
main_expr = relay.Call(gv_2, [main_expr])
main_expr = relay.clip(main_expr, 0.0, 1.0)
mod["main"] = relay.Function([inp], main_expr)
return relay.transform.InferType()(mod)
mod = before()
mod = InlineNonComputeIntensivePartitions()(mod)
expected_mod = expected()
for global_var in mod.get_global_vars():
_assert_structural_equal(mod[global_var.name_hint], expected_mod[global_var.name_hint])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_leaky_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration tests for Leaky ReLU"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype, alpha):
x = relay.var("x", shape=shape, dtype=dtype)
x = relay.qnn.op.dequantize(
x,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
x = relay.nn.leaky_relu(x, alpha=alpha)
return relay.qnn.op.quantize(
x,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3), (1, 3, 8, 2)])
@pytest.mark.parametrize("alpha", [0.001, 0.5678])
def test_leaky_relu(dtype, shape, alpha):
"""Compare Leaky ReLU output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
zp_min = iinfo.min
zp_max = iinfo.max
input_zp = zp_min + 120
input_sc = 0.0068132
output_zp = zp_min + 128
output_sc = 0.0078125
inputs = {"x": tvm.nd.array(np.random.randint(zp_min, high=zp_max, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype, alpha)
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["int8"])
@pytest.mark.parametrize("shape", [(1, 14, 14, 2)])
@pytest.mark.parametrize("alpha", [-1.34, 2.32, 1, 0])
def test_leaky_relu_unsupported_alpha(dtype, shape, alpha):
"""Test unsupported values of alpha (<= 0, >= 1) in Leaky ReLU."""
iinfo = np.iinfo(dtype)
zp_min = iinfo.min
err_msg = f"leaky relu alpha must be less than 1 and greater than 0, but was {alpha}"
model = _get_model(shape, zp_min + 120, 0.0068132, zp_min + 128, 0.0078125, dtype, alpha)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_leaky_relu")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_mean.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration mean tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, axis, keepdims, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
casted = relay.op.cast(a, "int32")
mean = relay.mean(casted, axis, keepdims)
model = relay.qnn.op.requantize(
mean,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 7, 7, 2048), (1, 8, 8)])
def test_mean(dtype, shape):
"""Compare Mean output with TVM."""
np.random.seed(0)
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
inputs = {
"a": tvm.nd.array(np.random.randint(zp_min, high=zp_max + 1, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape, [1, 2], True, zp_min + 128, 0.0784314, zp_min + 128, 0.0784314, dtype=dtype
)
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
def test_mean_non_equal_quantization(dtype):
"""Test mean is not offloaded when quantization is not equal."""
np.random.seed(0)
shape = (1, 7, 7, 2048)
zp_min = np.iinfo(dtype).min
model = _get_model(shape, [1, 2], True, zp_min + 120, 0.0068132, zp_min + 128, 0.0078125, dtype)
mod = tei.make_module(model, [])
tei.build(mod, {}, npu=True, expected_host_ops=3, npu_partitions=0)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration tests for Multiply."""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs=False,
constant_data=None,
):
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
x = relay.var("x", shape=shape, dtype=dtype)
if constant_data:
y_data = np.array(constant_data, dtype=dtype).reshape(constant_shape)
else:
y_data = np.random.randint(data_min, data_max + 1, size=constant_shape, dtype=dtype)
y = relay.const(y_data, dtype=dtype)
out = relay.qnn.op.mul(
y if reverse_inputs else x,
x if reverse_inputs else y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
params = {"y": y_data}
return out, params
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,constant_shape",
[((1, 4, 4, 8), (1, 1, 1, 8)), ((1, 16, 12, 4), (4,))],
)
@pytest.mark.parametrize("reverse_inputs", [False, True])
def test_multiply_to_depthwise(dtype, shape, constant_shape, reverse_inputs):
"""Compare Multiply -> Depthwise conversion output with TVM."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs,
)
inputs = {"x": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,constant_shape", [((1, 4, 5, 8), (1, 1, 1, 1)), ((1, 3, 7, 10), None)]
)
@pytest.mark.parametrize("reverse_inputs", [False, True])
def test_multiply_to_reinterpret_quantize(shape, constant_shape, reverse_inputs):
"""Compare Multiply -> Reinterpret Quantize conversion output with TVM."""
np.random.seed(0)
dtype = "uint8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
# Multiply can only be offloaded as a reinterpret quantize operation if
# it is an identity option. We must choose the quantization and constant
# data carefully to make sure that this is the case.
input_zp = 0
input_sc = 0.007814894430339336
input2_zp = 0
input2_sc = 0.5
output_zp = 0
output_sc = 0.9963990449905396
constant_data = 255
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs,
constant_data,
)
inputs = {"x": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=shape, dtype=dtype))}
outputs = []
for npu in [False, True]:
mod = tei.make_module(model, params)
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
params,
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
def test_multiply_multiple_inputs_unsupported():
"""Check multiply operator with two inputs is not offloaded."""
np.random.seed(0)
shape = (1, 4, 5, 6)
dtype = "int8"
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[3]
)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
model = relay.qnn.op.mul(
x,
y,
relay.const(input_sc, "float32"),
relay.const(input_zp, "int32"),
relay.const(input2_sc, "float32"),
relay.const(input2_zp, "int32"),
relay.const(output_sc, "float32"),
relay.const(output_zp, "int32"),
)
expected_host_ops = 1
npu_partitions = 0
for npu in [False, True]:
mod = tei.make_module(model, {})
tei.build(
mod,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
@requires_ethosn
@pytest.mark.parametrize(
"dtype,shape,constant_shape",
[
("int16", (1, 4, 5, 6), (1, 1, 1, 6)),
("int8", (1, 1, 3), (1, 1, 1, 3)),
("int8", (1, 2, 4, 8), (1, 2, 4, 8)),
],
)
def test_multiply_unsupported(dtype, shape, constant_shape):
"""Check multiply operator with unsupported attributes is not offloaded."""
np.random.seed(0)
iinfo = np.iinfo(dtype)
data_min = iinfo.min
data_max = iinfo.max
input_zp = np.random.randint(data_min, data_max)
input_sc = np.random.random() * 2
input2_zp = np.random.randint(data_min, data_max)
input2_sc = np.random.random() * 2
output_zp, output_sc = tei.get_conv2d_qnn_params(
dtype, input_zp, input_sc, input2_zp, input2_sc, 1, 1, shape[-1]
)
model, params = _get_model(
shape,
constant_shape,
input_zp,
input_sc,
input2_zp,
input2_sc,
output_zp,
output_sc,
dtype,
reverse_inputs=False,
constant_data=False,
)
expected_host_ops = 1
npu_partitions = 0
for npu in [False, True]:
mod = tei.make_module(model, {})
tei.build(
mod,
params,
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position, wrong-import-order
"""Arm(R) Ethos(TM)-N integration end-to-end network tests"""
import pytest
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
import tflite.Model
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.contrib import download
import tvm.relay.testing.tf as tf_testing
from . import infrastructure as tei
def _get_tflite_model(tflite_model_path, inputs_dict, dtype):
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for value in inputs_dict:
input_shape = inputs_dict[value]
shape_dict[value] = input_shape
dtype_dict[value] = dtype
return relay.frontend.from_tflite(
tflite_model,
shape_dict=shape_dict,
dtype_dict=dtype_dict,
)
def _test_image_network(
model_url,
model_sub_path,
input_dict,
compile_hash,
output_count,
host_ops=0,
npu_partitions=1,
run=False,
):
"""Test an image network.
Parameters
----------
model_url : str
The URL to the model.
model_sub_path : str
The name of the model file.
input_dict : dict
The input dict.
compile_hash : str, set
The compile hash(es) to check the compilation output against.
output_count : int
The expected number of outputs.
host_ops : int
The expected number of host operators.
npu_partitions : int
The expected number of Ethos-N partitions.
run : bool
Whether or not to try running the network. If hardware isn't
available, the run will still take place but with a mocked
inference function, so the results will be incorrect. This is
therefore just to test the runtime flow is working rather than
to check the correctness/accuracy.
"""
def get_model():
if model_url[-3:] in ("tgz", "zip"):
model_path = tf_testing.get_workload_official(
model_url,
model_sub_path,
)
else:
model_path = download.download_testdata(
model_url,
model_sub_path,
)
return _get_tflite_model(model_path, input_dict, "uint8")
inputs = {}
for input_name in input_dict:
input_shape = input_dict[input_name]
inputs[input_name] = tei.get_real_image(input_shape[1], input_shape[2])
mod, params = get_model()
m = tei.build(mod, params, npu=True, expected_host_ops=host_ops, npu_partitions=npu_partitions)
tei.assert_lib_hash(m.get_lib(), compile_hash)
if run:
tei.run(m, inputs, output_count, npu=True)
@requires_ethosn
def test_mobilenet_v1():
"""Compare compile hashes for mobilenetv1 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"c37fec1f214c7f93ce49ee4e3b587969"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
model_sub_path="mobilenet_v1_1.0_224_quant.tflite",
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
npu_partitions=1,
run=True,
)
@requires_ethosn
def test_resnet_50_int8():
"""Compare compile hashes for resnet50 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {
"f16dc9caa8e696bc5da8a5c6a644eb72",
"41acecca37b2735bd580f6ec38d8c2e0",
}
_test_image_network(
model_url="https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/"
"models/Quantized/resnet_50_quantized.tflite",
model_sub_path="resnet_50_quantized.tflite",
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=10,
npu_partitions=2,
)
@requires_ethosn
def test_inception_v3():
"""Compare compile hashes for inceptionv3 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"cff892eb15944756f22dad4b83c756d2"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite_11_05_08/inception_v3_quant.tgz",
model_sub_path="inception_v3_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=0,
npu_partitions=1,
)
@requires_ethosn
def test_inception_v4():
"""Compare compile hashes for inceptionv4 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"c00c119506b34c8e87f81aa009b42431"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/inception_v4_299_quant_20181026.tgz",
model_sub_path="inception_v4_299_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
npu_partitions=1,
)
@requires_ethosn
def test_ssd_mobilenet_v1():
"""Compare compile hashes for ssdmobilenetv1 with an expected result."""
# If this test is failing due to a hash mismatch, please notify @lhutton1 and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"ec2b78852192058f88b64d45c26620d5", "f68cbeaaba03874ea735ce3f5eab9227"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
model_sub_path="detect.tflite",
input_dict={"normalized_input_image_tensor": (1, 300, 300, 3)},
compile_hash=_compile_hash,
output_count=4,
host_ops=27,
npu_partitions=2,
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration pooling tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, typef, sizes, strides, pads, layout, dtype):
"""Return a model and any parameters it may have"""
req = relay.var("a", shape=shape, dtype=dtype)
if typef is relay.nn.avg_pool2d:
req = relay.cast(req, "int32")
req = typef(req, pool_size=sizes, strides=strides, padding=pads, ceil_mode=True, layout=layout)
if typef is relay.nn.avg_pool2d:
req = relay.cast(req, dtype)
return req
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,typef,size,stride,pad",
[
((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0)),
((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0)),
((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1)),
],
)
def test_pooling(dtype, shape, typef, size, stride, pad):
"""Compare Pooling output with TVM."""
np.random.seed(0)
layout = "NHWC"
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
)
),
}
outputs = []
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,size,stride,layout,dtype,err_msg",
[
(
(2, 8, 8, 8),
(2, 2),
(2, 2),
"NHWC",
"uint8",
"batch size=2, batch size must = 1",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2),
"NHWC",
"int16",
"dtype='int16', dtype must be either uint8, int8 or int32",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2),
"NCHW",
"uint8",
"data format=NCHW, data format must = NHWC",
),
(
(1, 8, 8, 8),
(2, 2),
(2, 2, 2),
"NHWC",
"uint8",
"stride size=3, stride size must = 2",
),
(
(1, 8, 8, 8),
(2, 2, 2),
(2, 2),
"NHWC",
"uint8",
"dimensions=3, dimensions must = 2",
),
],
)
def test_pooling_failure(shape, size, stride, layout, dtype, err_msg):
"""Check Pooling error messages."""
typef = relay.nn.max_pool2d
pad = (0, 0, 0, 0)
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration relu tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, a_min, a_max):
assert a_min >= np.iinfo(dtype).min and a_max <= np.iinfo(dtype).max
a = relay.var("a", shape=shape, dtype=dtype)
relu = relay.clip(a, a_min=a_min, a_max=a_max)
return relu
@requires_ethosn
@pytest.mark.parametrize(
"shape,a_min,a_max,dtype",
[
((1, 4, 4, 4), 65, 178, "uint8"),
((1, 8, 4, 2), 1, 254, "uint8"),
((1, 8, 4, 2), -100, 100, "int8"),
((1, 16), -120, -20, "int8"),
],
)
def test_relu(dtype, shape, a_min, a_max):
"""Compare Relu output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=shape,
dtype=dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,dtype,a_min,a_max,err_msg",
[
((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
],
)
def test_relu_failure(shape, dtype, a_min, a_max, err_msg):
"""Check Relu error messages."""
model = _get_model(shape, dtype, a_min, a_max)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_requantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration requantize tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, in_dtype, out_dtype):
a = relay.var("a", shape=shape, dtype=in_dtype)
model = relay.qnn.op.requantize(
data=a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=out_dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
@pytest.mark.parametrize("out_dtype", ["int8", "uint8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3)])
def test_requantize(in_dtype, out_dtype, shape):
"""Compare Requantize output with TVM."""
np.random.seed(0)
low = 0 if in_dtype == "uint8" else -5
high = low + 10
input_zp = (high + low) / 2
inputs = {
"a": tvm.nd.array(np.random.randint(low=low, high=high, size=shape, dtype=in_dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape=shape,
input_zp=input_zp,
input_sc=0.002,
output_zp=10,
output_sc=0.008,
in_dtype=in_dtype,
out_dtype=out_dtype,
)
mod = tei.make_module(model, [])
x = tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(x)
tei.verify(outputs, out_dtype, 1)
@requires_ethosn
def test_requantize_mixed_precision_with_following_op():
"""
Checks a requantize operation that changes precision from uint8 to int8 with a
following add op.
"""
np.random.seed(0)
shape = (1, 4, 6, 8)
in_sc = 0.012566
in_zp = 131
out_sc = 0.012566
out_zp = 3
in_dtype = "uint8"
out_dtype = "int8"
def get_model():
a = relay.var("a", shape=shape, dtype=in_dtype)
b = relay.var("b", shape=shape, dtype=out_dtype)
req = relay.qnn.op.requantize(
data=a,
input_scale=relay.const(in_sc, "float32"),
input_zero_point=relay.const(in_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
out_dtype=out_dtype,
)
req = relay.qnn.op.add(
req,
b,
lhs_scale=relay.const(out_sc, "float32"),
lhs_zero_point=relay.const(out_zp, "int32"),
rhs_scale=relay.const(out_sc, "float32"),
rhs_zero_point=relay.const(out_zp, "int32"),
output_scale=relay.const(out_sc, "float32"),
output_zero_point=relay.const(out_zp, "int32"),
)
return req
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(in_dtype).min, high=np.iinfo(in_dtype).max, size=shape, dtype=in_dtype
)
),
"b": tvm.nd.array(
np.random.randint(
low=np.iinfo(out_dtype).min,
high=np.iinfo(out_dtype).max,
size=shape,
dtype=out_dtype,
)
),
}
outputs = []
for npu in [False, True]:
model = get_model()
mod = tei.make_module(model, {})
x = tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(x)
tei.verify(outputs, out_dtype, 1)
@requires_ethosn
def test_requantize_failure():
"""Check Requantize error messages."""
input_sc = 0.8
output_sc = (input_sc / 128) - 0.0001
model = _get_model(
shape=(1, 52, 52, 3),
input_zp=0,
input_sc=input_sc,
output_zp=0,
output_sc=output_sc,
in_dtype="int8",
out_dtype="int8",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_requantize")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, "Output scale must be bigger than input scale / 128")
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_reshape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration reshape tests"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(input_shape, output_shape, dtype):
"""Return a model and any parameters it may have"""
a = relay.var("a", shape=input_shape, dtype=dtype)
req = relay.reshape(a, output_shape)
return req, {}
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"input_shape, output_shape",
[
((1, 15, 4, 1), (1, 60)),
((1, 15, 4, 1), (1, 30, 2)),
((1, 15, 4, 1), (1, 4, 15, 1)),
((1, 15, 4, 1), (1, 12, 5, 1)),
((1, 15, 4, 1), (1, 0, 2, 2)),
((1, 15, 4, 1), (1, -1, 2, 1)),
((1, 15, 4, 1), (1, -2)),
((1, 15, 4, 1), (1, -3, 1, 1)),
((1, 15, 4, 1), (1, -4, 3, 5, 4)),
((1, 15, 4, 1), (0, -1, -2)),
((1, 15, 4, 1), (0, -1, -3, 1)),
((1, 15, 4, 1), (1, -4, -1, 5, 4)),
],
)
def test_reshape(dtype, input_shape, output_shape):
"""Compare Reshape output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min,
high=np.iinfo(dtype).max + 1,
size=input_shape,
dtype=dtype,
)
)
}
outputs = []
for npu in [False, True]:
model, params = _get_model(input_shape, output_shape, dtype)
mod = tei.make_module(model, params)
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
params,
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"input_shape, output_shape",
[
(
(1, 13, 13, 255),
(1, 13, 13, 3, 85),
),
],
)
def test_reshape_failure(input_shape, output_shape):
"""Check Resize is not offloaded."""
model, params = _get_model(input_shape, output_shape, "int8")
mod = tei.make_module(model, params)
tei.build(
mod,
params,
expected_host_ops=1,
npu_partitions=0,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_resize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration resize tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(
shape,
dtype,
size,
input_zp,
input_sc,
output_zp,
output_sc,
coordinate_transformation_mode,
rounding_method,
):
x = relay.var("x", shape=shape, dtype=dtype)
resize = relay.image.resize2d(
data=x,
size=size,
layout="NHWC",
method="nearest_neighbor",
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
)
model = relay.qnn.op.requantize(
resize,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, size, coordinate_transformation_mode, rounding_method",
[
((1, 4, 4, 2), (8, 8), "half_pixel", "round_prefer_ceil"),
((1, 4, 4, 2), (7, 7), "asymmetric", "floor"),
((1, 4, 8, 3), (8, 16), "half_pixel", "round_prefer_ceil"),
((1, 4, 8, 3), (7, 15), "asymmetric", "floor"),
],
)
def test_resize(dtype, shape, size, coordinate_transformation_mode, rounding_method):
"""Compare Resize output with TVM."""
np.random.seed(0)
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
inputs = {
"x": tvm.nd.array(np.random.randint(zp_min, high=zp_max + 1, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(
shape=shape,
dtype=dtype,
size=size,
input_zp=zp_min + 128,
input_sc=0.0784314,
output_zp=zp_min + 128,
output_sc=0.0784314,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
)
mod = tei.make_module(model, {})
x = tei.build_and_run(mod, inputs, 1, {}, npu=npu)
outputs.append(x)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"size,err_msg",
[
(
(30, 20),
"Requested height isn't supported",
),
(
(20, 30),
"Requested width isn't supported",
),
],
)
def test_resize_failure(size, err_msg):
"""Check Resize error messages."""
dtype = "int8"
zp_min = np.iinfo(dtype).min
model = _get_model(
shape=(1, 10, 10, 1),
dtype=dtype,
size=size,
input_zp=zp_min + 128,
input_sc=0.0784314,
output_zp=zp_min + 128,
output_sc=0.0784314,
coordinate_transformation_mode="half_pixel",
rounding_method="round_prefer_ceil",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_resize")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_sigmoid.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration sigmoid tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
sigmoid = relay.sigmoid(dequantize)
model = relay.qnn.op.quantize(
sigmoid,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape",
[
(1, 16, 16, 16),
(1, 8, 8),
],
)
def test_sigmoid(dtype, shape):
"""Compare Sigmoid output with TVM."""
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
for _ in range(1, 2):
if dtype == "uint8":
input_zp = 0
output_zp = 0
else:
input_zp = 127
output_zp = -128
model = _get_model(shape, input_zp, 0.02, output_zp, 1.0 / 256.0, dtype)
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize(
"shape,input_zp,input_sc,output_zp,output_sc,err_msg",
[
((2, 4, 4, 4), 64, 0.2, 0, 1 / 256, "batch size=2, batch size must = 1"),
(
(1, 4, 4, 4),
64,
0.2,
3,
1,
"output quantization params=(3, 1), must = (0, 1/256)",
),
],
)
def test_sigmoid_failure(shape, input_zp, input_sc, output_zp, output_sc, err_msg):
"""Check Sigmoid error messages."""
dtype = "uint8"
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_sigmoid")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_split.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Split tests for Arm(R) Ethos(TM)-N"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, dtype, splits, axis):
a = relay.var("a", shape=shape, dtype=dtype)
split = relay.op.split(a, indices_or_sections=splits, axis=axis)
return split.astuple()
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,splits,axis",
[
((1, 16, 16, 32), (2, 7, 10), 2),
((1, 12, 8, 16), 3, 1),
],
)
def test_split(dtype, shape, splits, axis):
"""Compare Split output with TVM."""
np.random.seed(0)
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
for npu in [False, True]:
model = _get_model(shape, dtype, splits, axis)
mod = tei.make_module(model, {})
output_count = splits if isinstance(splits, int) else len(splits) + 1
outputs.append(
tei.build_and_run(
mod,
inputs,
output_count,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize(
"shape,dtype,splits,axis,err_msg",
[
((1, 4, 4, 4, 4), "uint8", 4, 2, "dimensions=5, dimensions must be <= 4;"),
((1, 4, 4, 4), "int16", 4, 2, "dtype='int16', dtype must be either uint8, int8 or int32;"),
((2, 4, 4, 4), "uint8", 4, 2, "batch size=2, batch size must = 1;"),
((1, 4, 4, 4), "uint8", 1, 0, "Split cannot be performed along batch axis (axis 0);"),
(
(1, 4, 4, 4),
"uint8",
4,
3,
"Split along the channels dimension (axis 3) requires all output sizes "
"(specified in splitInfo.m_Sizes) to be multiples of 16;",
),
],
)
def test_split_failure(shape, dtype, splits, axis, err_msg):
"""Check Split error messages."""
model = _get_model(shape, dtype, splits, axis)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_tanh.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N NPU integration tanh tests"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
)
tanh = relay.tanh(dequantize)
model = relay.qnn.op.quantize(
tanh,
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize("shape", [(1, 52, 52, 3)])
def test_tanh(dtype, shape):
"""Compare Tanh output with TVM."""
zp_min = np.iinfo(dtype).min
zp_max = np.iinfo(dtype).max
np.random.seed(0)
inputs = {
"a": tvm.nd.array(np.random.randint(zp_min, high=zp_max, size=shape, dtype=dtype)),
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, zp_min + 120, 0.0250629, zp_min + 128, 0.0078125, dtype)
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape, input_zp, input_sc, output_zp, output_sc, err_msg",
[
(
(1, 16, 16, 16),
120,
0.0250629,
64,
0.0078125,
"output quantization params=(64, 0.0078125), must = ({test_zp}, 1/256);",
)
],
)
def test_tanh_failure(shape, input_zp, input_sc, output_zp, output_sc, err_msg, dtype):
"""Check Tanh error messages."""
test_zp = 0 if dtype == "int8" else 128
model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_tanh")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg.format(test_zp=test_zp))
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosn/test_topologies.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N tests for complex network topologies."""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import Available, ethosn_available
from . import infrastructure as tei
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_split_add_concat(dtype):
"""Test a model with split, add and contatenate."""
def get_model(input_shape, dtype, var_names):
"""Return a model"""
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
split_scale = relay.const(0.25, "float32")
split_zp = relay.const(100, "int32")
add_scale = relay.const(0.75, "float32")
add_zp = relay.const(120, "int32")
axis = 2
split = relay.split(a, indices_or_sections=4, axis=axis)
b = relay.qnn.op.add(
split[0],
split[1],
lhs_scale=split_scale,
lhs_zero_point=split_zp,
rhs_scale=split_scale,
rhs_zero_point=split_zp,
output_scale=add_scale,
output_zero_point=add_zp,
)
conc = relay.qnn.op.concatenate(
[b, split[2], split[3]],
input_scales=(add_scale, split_scale, split_scale),
input_zero_points=(add_zp, split_zp, split_zp),
output_scale=add_scale,
output_zero_point=add_zp,
axis=axis,
)
return conc
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 16, 16, 4), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model(inputs["a"].shape, dtype, iter(inputs))
mod = tei.make_module(model, [])
expected_host_ops = 0
npu_partitions = 1
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
if outputs:
tei.verify(outputs, dtype, 2)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_multiple_command_streams(dtype):
"""Check that multiple Ethos-N partitions are correctly handled.
If there's more than one Ethos-N graph partition, more than one command
stream will be created. This should be handled correctly by both the
Ethos-N codegen and Ethos-N runtime module. This test checks against a
simple graph which creates two Ethos-N partitions and checks the result
against an 'all-CPU' run through TVM.
"""
def get_model(dtype):
"""
max_pool2d
|
abs
|
max_pool2d
"""
x = relay.var("x", shape=(1, 4, 4, 4), dtype=dtype)
out = relay.nn.max_pool2d(x, (2, 2), (2, 2), layout="NHWC") # supported
out = relay.op.abs(out) # not supported
out = relay.nn.max_pool2d(out, (2, 2), (2, 2), layout="NHWC") # supported
return out
np.random.seed(0)
inputs = {
"x": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4, 4, 4), dtype=dtype
)
)
}
model = get_model(dtype)
mod = tei.make_module(model, {})
# Mock inference is only supported when the whole graph is offloaded to the NPU
if ethosn_available() == Available.SW_ONLY:
tei.build(mod, {}, npu=True, expected_host_ops=1, npu_partitions=2)
else:
tei.build_and_run(mod, inputs, 1, {}, npu=True, expected_host_ops=1, npu_partitions=2)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_order(dtype):
"""Test the output order."""
def get_model(input_shape, dtype, var_names):
"""Return a model"""
min_value = np.iinfo(dtype).min
max_value = np.iinfo(dtype).max
a = relay.var(next(var_names), shape=input_shape, dtype=dtype)
op_z = relay.op.clip(a, min_value, max_value)
op_b = relay.op.clip(op_z, min_value, min_value + 15)
op_c = relay.op.clip(op_z, min_value + 16, min_value + 31)
op_d = relay.op.clip(op_z, min_value + 32, min_value + 47)
op_e = relay.op.clip(op_z, min_value + 48, min_value + 63)
op_f = relay.op.clip(op_z, min_value + 64, min_value + 79)
op_g = relay.op.clip(op_z, min_value + 80, min_value + 95)
op_h = relay.op.clip(op_z, min_value + 96, min_value + 111)
op_i = relay.op.clip(op_z, min_value + 112, max_value)
return relay.Tuple((op_d, op_c, op_e, op_f, op_i, op_b, op_h, op_g))
np.random.seed(0)
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 16, 16, 4), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model(inputs["a"].shape, dtype, iter(inputs))
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(
mod,
inputs,
8,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_order_different_sizes(dtype):
"""
Test the output order when there are multiple outputs of different sizes.
"""
np.random.seed(0)
input_name = "a"
input_shape = (1, 8, 8, 4)
dtype_min = np.iinfo(dtype).min
dtype_max = np.iinfo(dtype).max
def get_model():
var = relay.var(input_name, shape=input_shape, dtype=dtype)
clip = relay.op.clip(var, dtype_min, dtype_max)
max_pool = relay.nn.max_pool2d(clip, (2, 2), (2, 2), ceil_mode=True, layout="NHWC")
mean = relay.op.cast(clip, "int32")
mean = relay.mean(mean, axis=[1, 2], keepdims=True)
mean = relay.qnn.op.requantize(
mean,
input_scale=relay.const(0.0784314, "float32"),
input_zero_point=relay.const(dtype_min + 128, "int32"),
output_scale=relay.const(0.0784314, "float32"),
output_zero_point=relay.const(dtype_min + 128, "int32"),
out_dtype=dtype,
)
return relay.Tuple((mean, max_pool, clip))
inputs = {
input_name: tvm.nd.array(
np.random.randint(dtype_min, dtype_max + 1, size=input_shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
model = get_model()
mod = tei.make_module(model, [])
outputs.append(
tei.build_and_run(mod, inputs, 3, {}, npu=npu, expected_host_ops=0, npu_partitions=1)
)
tei.verify(outputs, dtype, 1)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
@pytest.mark.parametrize(
"shape,splits,axis",
[
((1, 16, 16, 32), (2, 7, 10), 2),
],
)
def test_split_with_asym_concats(dtype, shape, splits, axis):
"""Test a model with split and contatenates."""
np.random.seed(0)
def get_model(shape, dtype, splits, axis):
a = relay.var("a", shape=shape, dtype=dtype)
split = relay.op.split(a, indices_or_sections=splits, axis=axis)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con1 = relay.qnn.op.concatenate(
[split[0], split[1]],
input_scales=[zerof] * 2,
input_zero_points=[zeroi] * 2,
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
con2 = relay.qnn.op.concatenate(
[split[2], split[3]],
input_scales=[zerof] * 2,
input_zero_points=[zeroi] * 2,
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return relay.Tuple((con2, con1))
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
for npu in [False, True]:
model = get_model(shape, dtype, splits, axis)
mod = tei.make_module(model, {})
expected_host_ops = 0
npu_partitions = 1
# Mock inference is only supported when the whole graph is offloaded to the NPU
if ethosn_available() == Available.SW_ONLY:
tei.build(
mod,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
else:
outputs.append(
tei.build_and_run(
mod,
inputs,
2,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
if outputs:
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_output_tuple_propagation(dtype):
"""This tests the case where the output tuple must be inferred
as having dummy tensor information."""
def get_model(dtype):
a = relay.var("a", shape=(1, 4, 4, 16), dtype=dtype)
split = relay.op.split(a, indices_or_sections=4, axis=2)
return relay.Tuple((split[0], split[1], split[2], split[3]))
np.random.seed(0)
outputs = []
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4, 4, 16), dtype=dtype
)
)
}
for npu in [False, True]:
model = get_model(dtype)
mod = tei.make_module(model, {})
outputs.append(
tei.build_and_run(
mod,
inputs,
4,
{},
npu=npu,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
)
tei.verify(outputs, dtype, 0)
@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_input_tuples(dtype):
"""Test a model with a tuple as input."""
def get_model(shapes, dtype, axis):
tup = []
for i, shape in enumerate(shapes):
a = relay.var("in" + str(i), shape=shape, dtype=dtype)
tup.append(a)
zeroi = relay.const(1, "int32")
zerof = relay.const(0.5, "float32")
con = relay.qnn.op.concatenate(
tup,
input_scales=[zerof] * len(shapes),
input_zero_points=[zeroi] * len(shapes),
output_scale=zerof,
output_zero_point=zeroi,
axis=axis,
)
return con
np.random.seed(0)
inputs = {
"in0": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 4), dtype=dtype
)
),
"in1": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=(1, 6), dtype=dtype
)
),
}
outputs = []
for npu in [False, True]:
model = get_model([(1, 4), (1, 6)], dtype, 1)
if not npu:
mod = tei.make_module(model, {})
else:
mod = tei.make_ethosn_partition(model)
lib = tei.build(
mod,
{},
npu=False,
additional_config_args={"inline_non_compute_intensive_partitions": False},
)
outputs.append(tei.run(lib, inputs, 1, npu=npu))
tei.verify(outputs, dtype, 0)
@requires_ethosn
def test_inline_non_compute_intensive_operations():
"""Tests the case when a subgraph is unpartitioned."""
np.random.seed(0)
dtype = "int8"
shape = (1, 2, 2, 4)
inp = relay.var("x", shape=shape, dtype=dtype)
reshape = relay.reshape(inp, newshape=(1, 1, 4, 4))
inputs = {
"x": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
),
}
outputs = []
for npu in [False, True]:
mod = tei.make_module(reshape, {})
outputs.append(
tei.build_and_run(mod, inputs, 1, {}, npu=npu, expected_host_ops=1, npu_partitions=0)
)
tei.verify(outputs, dtype, 0)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test infrastructure for Arm(R) Ethos(TM)-U NPU related tests"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test infrastructure for the NPU cascader"""
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
ethosu_enabled = True
try:
import ethosu.vela
except ImportError:
ethosu_enabled = False
import tvm.contrib.ethosu.cascader as cs
@pytest.fixture
def FLASH():
return cs.MemoryRegion(
name="FLASH",
size=10**7,
read_bandwidth=4,
write_bandwidth=4,
read_latency=0,
write_latency=0,
burst_length=1,
)
@pytest.fixture
def DRAM():
return cs.MemoryRegion(
name="DRAM",
size=10**9,
read_bandwidth=8,
write_bandwidth=8,
read_latency=0,
write_latency=0,
burst_length=1,
)
@pytest.fixture
def SRAM():
return cs.MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
if ethosu_enabled:
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from .infra import create_te_graph
from ..infra import (
make_ethosu_conv2d,
make_ethosu_depthwise_conv2d,
make_ethosu_binary_elementwise,
)
def make_TwoConv2DTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 12, 12, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=8,
ofm_channels=32,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHCWB16",
)
conv2 = make_ethosu_conv2d(
ifm=conv1,
ifm_channels=32,
ofm_channels=16,
kernel_shape=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHCWB16",
ofm_layout="NHWC",
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def TwoConv2DTE():
return make_TwoConv2DTE()
@pytest.fixture
def TwoConv2DGraph():
_, te_graph, const_dict = make_TwoConv2DTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_TwoConv2DWithSliceTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 12, 12, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=8,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
)
strided_slice = relay.strided_slice(conv1, [0, 0, 0, 0], [1, 6, 6, 128])
conv2 = make_ethosu_conv2d(
ifm=strided_slice,
ifm_channels=64,
ofm_channels=16,
kernel_shape=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHCWB16",
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def TwoConv2DWithSliceTE():
return make_TwoConv2DWithSliceTE()
@pytest.fixture
def TwoConv2DWithSliceGraph():
_, te_graph, const_dict = make_TwoConv2DWithSliceTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv2DiamondTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 56, 56, 96), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=96,
ofm_channels=24,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=conv1,
ifm_channels=24,
ofm_channels=144,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=144,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=144,
ofm_channels=24,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
add1 = make_ethosu_binary_elementwise(
ifm=conv1,
ifm2=conv3,
ifm_channels=24,
ifm2_channels=24,
operator_type="ADD",
ofm_dtype="int8",
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv2DiamondTE():
return make_MobileNetv2DiamondTE()
@pytest.fixture
def MobileNetv2DiamondGraph():
_, te_graph, const_dict = make_MobileNetv2DiamondTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_BinaryTE():
def _get_func():
ifm_a = relay.var("ifm_a", shape=(1, 8, 8, 8), dtype="int8")
ifm_b = relay.var("ifm_b", shape=(1, 8, 8, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm_a,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=ifm_b,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
add1 = make_ethosu_binary_elementwise(
ifm=conv1,
ifm2=conv2,
ifm_channels=8,
ifm2_channels=8,
operator_type="ADD",
ofm_dtype="int8",
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def BinaryTE():
return make_BinaryTE()
@pytest.fixture
def BinaryGraph():
_, te_graph, const_dict = make_BinaryTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv1StartTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 224, 224, 3), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=3,
ofm_channels=32,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv1,
channels=32,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=32,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth2 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=64,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth2,
ifm_channels=64,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth3 = make_ethosu_depthwise_conv2d(
ifm=conv3,
channels=128,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv4 = make_ethosu_conv2d(
ifm=depth3,
ifm_channels=128,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth4 = make_ethosu_depthwise_conv2d(
ifm=conv4,
channels=128,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
func = relay.Function(relay.analysis.free_vars(depth4), depth4)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv1StartTE():
return make_MobileNetv1StartTE()
@pytest.fixture
def MobileNetv1StartGraph():
_, te_graph, const_dict = make_MobileNetv1StartTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv1TE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 224, 224, 3), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=3,
ofm_channels=32,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv1,
channels=32,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=32,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth2 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=64,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth2,
ifm_channels=64,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth3 = make_ethosu_depthwise_conv2d(
ifm=conv3,
channels=128,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv4 = make_ethosu_conv2d(
ifm=depth3,
ifm_channels=128,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth4 = make_ethosu_depthwise_conv2d(
ifm=conv4,
channels=128,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv5 = make_ethosu_conv2d(
ifm=depth4,
ifm_channels=128,
ofm_channels=256,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth5 = make_ethosu_depthwise_conv2d(
ifm=conv5,
channels=256,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv6 = make_ethosu_conv2d(
ifm=depth5,
ifm_channels=256,
ofm_channels=256,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth6 = make_ethosu_depthwise_conv2d(
ifm=conv6,
channels=256,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv7 = make_ethosu_conv2d(
ifm=depth6,
ifm_channels=256,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth7 = make_ethosu_depthwise_conv2d(
ifm=conv7,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv8 = make_ethosu_conv2d(
ifm=depth7,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth8 = make_ethosu_depthwise_conv2d(
ifm=conv8,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv9 = make_ethosu_conv2d(
ifm=depth8,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth9 = make_ethosu_depthwise_conv2d(
ifm=conv9,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv10 = make_ethosu_conv2d(
ifm=depth9,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth10 = make_ethosu_depthwise_conv2d(
ifm=conv10,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv11 = make_ethosu_conv2d(
ifm=depth10,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth11 = make_ethosu_depthwise_conv2d(
ifm=conv11,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv12 = make_ethosu_conv2d(
ifm=depth11,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth12 = make_ethosu_depthwise_conv2d(
ifm=conv12,
channels=512,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv13 = make_ethosu_conv2d(
ifm=depth12,
ifm_channels=512,
ofm_channels=1024,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth13 = make_ethosu_depthwise_conv2d(
ifm=conv13,
channels=1024,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv14 = make_ethosu_conv2d(
ifm=depth13,
ifm_channels=1024,
ofm_channels=1024,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv14), conv14)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv1TE():
return make_MobileNetv1TE()
@pytest.fixture
def MobileNetv1Graph():
_, te_graph, const_dict = make_MobileNetv1TE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/infra.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
ethosu_enabled = True
try:
import ethosu.vela
except ImportError:
ethosu_enabled = False
import tvm
from tvm import relay
import tvm.contrib.ethosu.cascader as cs
import numpy as np
def make_options(
cascade_region: cs.MemoryRegion,
max_proposals: int = 1,
stripe_factors: int = 1,
max_plan_size: int = 1,
max_open_plans: int = 8,
max_closed_plans: int = 32,
always_copy_size: int = 1024,
disable_pareto_plans: bool = False,
disable_pareto_proposals: bool = False,
enable_striping: bool = True,
):
return cs.CascaderOptions(
cascade_region=cascade_region,
max_proposals=max_proposals,
stripe_factors=stripe_factors,
max_plan_size=max_plan_size,
max_open_plans=max_open_plans,
max_closed_plans=max_closed_plans,
always_copy_size=always_copy_size,
disable_pareto_plans=disable_pareto_plans,
disable_pareto_proposals=disable_pareto_proposals,
enable_striping=enable_striping,
)
def make_simple_home_map(graph, var_region, const_region):
home_map = {}
for tensor in graph.tensor_order:
if tensor.is_constant:
home_map[tensor] = [const_region]
else:
home_map[tensor] = [var_region]
return home_map
if ethosu_enabled:
from tvm.relay.backend.contrib.ethosu.tir.compiler import extract_constants, lower_to_te
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def create_te_graph(func):
func, consts = extract_constants(func)
mod = tvm.IRModule.from_expr(func)
func = relay.transform.InferType()(mod)["main"]
te_graph = lower_to_te(func)
return te_graph, consts
def make_matrices(
op_type,
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation=(1, 1),
ifm_channels=1,
ofm_channels=1,
):
kernel_h, kernel_w = kernel
stride_h, stride_w = stride
dilation_h, dilation_w = dilation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
if op_type == "ethosu_conv2d":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
elif op_type == "ethosu_depthwise_conv2d":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
]
elif op_type == "ethosu_pooling":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
scale_bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weight_matrix = np.matmul(weight_matrix, nhcwb16_to_nhwc).tolist()
scale_bias_matrix = np.matmul(scale_bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_offset = (
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0]
)
weight_offset = [0, 0, 0, 0]
scale_bias_offset = [0, 0]
return (
ifm_matrix,
ifm_offset,
weight_matrix,
weight_offset,
scale_bias_matrix,
scale_bias_offset,
)
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_calculate_memory_pressure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position
"""
Test memory pressure is calculated correctly from used memory annotations.
"""
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import _calculate_memory_pressure
from tvm.contrib.ethosu.cascader.scheduler import extract_memory_info
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _npu_and_non_npu_functions():
mod = tvm.IRModule({})
# NPU function 1
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
max_pool = relay.nn.max_pool2d(x)
composite_func = relay.Function([x], max_pool)
composite_func = composite_func.with_attr("Composite", "ethos-u.pooling")
inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [32])
npu_compiler_func1 = compiler_func.with_attr("Compiler", "ethos-u")
g1 = relay.GlobalVar("g1")
mod[g1] = npu_compiler_func1
# Non-NPU function
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
max_pool = relay.abs(x)
composite_func = relay.Function([x], max_pool)
composite_func = composite_func.with_attr("Composite", "foo.unary_elementwise")
inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [32])
non_npu_compiler_func = compiler_func.with_attr("Compiler", "foo")
g2 = relay.GlobalVar("g2")
mod[g2] = non_npu_compiler_func
# NPU function 2
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
max_pool = relay.abs(x)
composite_func = relay.Function([x], max_pool)
composite_func = composite_func.with_attr("Composite", "ethos-u.unary_elementwise")
inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [32])
npu_compiler_func2 = compiler_func.with_attr("Compiler", "ethos-u")
g3 = relay.GlobalVar("g3")
mod[g3] = npu_compiler_func2
# Main
inp = relay.var("main_input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(g1, [inp])
call2 = relay.Call(g2, [call1])
call3 = relay.Call(g3, [call2])
main_func = relay.Function([inp], call3)
main_func = main_func.with_attr("io_used_memory", 32)
mod["main"] = main_func
return mod
def _parallel_npu_functions():
mod = tvm.IRModule({})
# NPU function 1
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
max_pool = relay.nn.max_pool2d(x)
composite_func = relay.Function([x], max_pool)
composite_func = composite_func.with_attr("Composite", "ethos-u.pooling")
inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [32])
npu_compiler_func1 = compiler_func.with_attr("Compiler", "ethos-u")
g1 = relay.GlobalVar("g1")
mod[g1] = npu_compiler_func1
# NPU function 2
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
abs_op = relay.abs(x)
composite_func = relay.Function([x], abs_op)
composite_func = composite_func.with_attr("Composite", "ethos-u.unary_elementwise")
inp = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [32 + 16])
npu_compiler_func2 = compiler_func.with_attr("Compiler", "ethos-u")
g2 = relay.GlobalVar("g2")
mod[g2] = npu_compiler_func2
# Main
inp = relay.var("main_input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(g1, [inp])
call2 = relay.Call(g2, [inp])
concat = relay.concatenate([call1, call2], axis=3)
main_func = relay.Function([inp], concat)
main_func = main_func.with_attr("io_used_memory", 32)
mod["main"] = main_func
return mod
def _full_offload():
mod = tvm.IRModule({})
# NPU function
x = relay.var("x", shape=(1, 4, 4, 16), dtype="int8")
max_pool = relay.nn.max_pool2d(x)
composite_func = relay.Function([x], max_pool)
composite_func = composite_func.with_attr("Composite", "ethos-u.pooling")
inp = relay.var("input", shape=(1, 4, 4, 16), dtype="int8")
compiler_func = relay.Function([inp], composite_func)
compiler_func = compiler_func.with_attr("used_memory", [256 + 256])
npu_compiler_func = compiler_func.with_attr("Compiler", "ethos-u")
g1 = relay.GlobalVar("g1")
mod[g1] = npu_compiler_func
# Main
inp = relay.var("main_input", shape=(1, 4, 4, 16), dtype="int8")
call = relay.Call(g1, [inp])
main_func = relay.Function([inp], call)
main_func = main_func.with_attr("io_used_memory", 256 + 256)
mod["main"] = main_func
return mod
@pytest.mark.parametrize(
"model_func,use_workspace_io,expected_memory_pressure",
[
(_npu_and_non_npu_functions, True, (16 + 16) + (16 + 16)),
(_npu_and_non_npu_functions, False, (16 + 16) + (16 + 16) - (16 + 16)),
(_parallel_npu_functions, True, (16 + 16) + (16 + 16 + 16)),
(_parallel_npu_functions, False, (16 + 16) + (16 + 16 + 16) - (16 + 16)),
(_full_offload, True, (256 + 256)),
(_full_offload, False, (256 + 256) - (256 + 256)),
],
)
def test_calculate_memory_pressure_pass(model_func, use_workspace_io, expected_memory_pressure):
"""
Test that memory pressure is correctly calculated for NPU external functions.
"""
mod = model_func()
with tvm.transform.PassContext(config={"tir.usmp.use_workspace_io": use_workspace_io}):
memory_pressure = _calculate_memory_pressure(mod)
assert memory_pressure == expected_memory_pressure
def test_extract_memory_info():
"""
Test memory pressure value correctly reduces the workspace size.
"""
initial_pool_size = 2000
memory_pressure = 500
memory_pool = WorkspacePoolInfo(
"SRAM",
[tvm.target.Target("c"), tvm.target.Target("ethos-u")],
PoolInfoProperties(
size_hint_bytes=initial_pool_size,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={tvm.target.Target("ethos-u"): 1},
),
)
sram = extract_memory_info(memory_pool, memory_pressure)
assert sram.size == initial_pool_size - memory_pressure
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_binary_elementwise_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.binary_elementwise import (
match_ethosu_binary_elementwise,
binary_elementwise_compute,
)
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def _make_matrices(broadcast, ifm_layout, ifm2_layout, ofm_layout, ofm_channels):
broadcast_h, broadcast_w, broadcast_c = broadcast
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - broadcast_h), 0, 0, broadcast_h],
[0, 0, (1 - broadcast_w), 0, broadcast_w],
[0, 0, 0, (1 - broadcast_c), broadcast_c],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
return (ifm_matrix, ifm2_matrix)
@pytest.mark.parametrize(
"ofm_shape",
[
[1, 12, 15, 128],
[1, 16, 16, 16],
[1, 1, 1, 1024],
[1, 73, 51, 20],
[1, 124, 172, 5],
],
)
@pytest.mark.parametrize("ifm2_broadcast", [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ifm2_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("op_type", ["MUL", "ADD", "MIN"])
def test_ethosu_binary_elementwise_matcher(
ofm_shape, ifm2_broadcast, ifm_layout, ifm2_layout, ofm_layout, op_type
):
ifm_shape = ofm_shape.copy()
ifm2_shape = [1] + [1 if (b == 1) else a for a, b in zip(ofm_shape[1:], ifm2_broadcast)]
ifm_channels = ifm_shape[3]
ifm2_channels = ifm2_shape[3]
ofm_channels = ofm_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
broadcast = [1 if a == 1 else 0 for a in ifm2_shape[1:]]
if ifm_layout == "NHCWB16":
ifm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm_shape
+ [
1,
],
).tolist()[:-1]
]
if ifm2_layout == "NHCWB16":
ifm2_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm2_shape
+ [
1,
],
).tolist()[:-1]
]
if ofm_layout == "NHCWB16":
ofm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ofm_shape
+ [
1,
],
).tolist()[:-1]
]
order = [1, 2, 4, 3, 0]
else:
order = [1, 2, 3, 4]
ifm = te.placeholder(ifm_shape, dtype="int8")
ifm2 = te.placeholder(ifm2_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = binary_elementwise_compute(
ifm=ifm,
ifm2=ifm2,
lut=lut,
operator_type=op_type,
ifm_scale=1,
ifm_zero_point=0,
ifm2_scale=1,
ifm2_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ifm_channels=ifm_channels,
ifm2_channels=ifm2_channels,
reversed_operands=False,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
ifm_layout=ifm_layout,
ifm2_layout=ifm2_layout,
ofm_layout=ofm_layout,
ofm_dtype="int8",
)
ifm_propagator = out.op.attrs["ifm_propagator"]
ifm2_propagator = out.op.attrs["ifm2_propagator"]
offset = [0] * len(ofm_shape)
stripes = [0] * len(ofm_shape)
output_stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
(ifm_transform, ifm2_transform) = _make_matrices(
broadcast, ifm_layout, ifm2_layout, ofm_layout, ofm_channels
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_binary_elementwise(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 2
assert part.propagators[0].transform == ifm_transform
assert part.propagators[1].transform == ifm2_transform
propagated_ifm = ifm_propagator.propagate(output_stripe_config).shape
propagated_ifm2 = ifm2_propagator.propagate(output_stripe_config).shape
# The layout transforms that have the exact number of output channels in them
# will lose no information about the number of channels
assert ifm_shape == propagated_ifm
assert ifm2_shape == propagated_ifm2
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_block_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
import tvm
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
from .infra import make_matrices
@pytest.mark.parametrize(
"test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape",
[
# Conv2D
(
0,
"ethosu_conv2d",
"NONE",
(34, 19),
(2, 2),
(1, 1),
(0, 0, 0, 0),
(1, 266, 111, 15),
(1, 117, 47, 15),
),
(
1,
"ethosu_conv2d",
"NONE",
(14, 14),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 125, 63, 64),
(1, 112, 50, 128),
),
(
2,
"ethosu_conv2d",
"NONE",
(7, 1),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 4, 12),
(1, 4, 4, 511),
),
(
3,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 16),
),
(
4,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 1),
),
(
5,
"ethosu_conv2d",
"NONE",
(3, 3),
(1, 1),
(2, 2),
(0, 0, 0, 0),
(1, 62, 94, 32),
(1, 58, 90, 16),
),
# Depthwise Conv2D
(
6,
"ethosu_depthwise_conv2d",
"NONE",
(3, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 77, 23, 18),
(1, 75, 19, 18),
),
(
7,
"ethosu_depthwise_conv2d",
"NONE",
(3, 3),
(2, 2),
(1, 1),
(1, 1, 1, 1),
(1, 25, 10, 276),
(1, 13, 5, 276),
),
# Pooling
(
8,
"ethosu_pooling",
"NONE",
(13, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 5, 276),
(1, 1, 1, 276),
),
(
9,
"ethosu_pooling",
"NONE",
(7, 3),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 317, 14, 21),
(1, 156, 12, 21),
),
],
)
@pytest.mark.parametrize(
"layouts",
[
("NHWC", "NHWC"),
("NHCWB16", "NHCWB16"),
("NHWC", "NHCWB16"),
("NHCWB16", "NHWC"),
],
)
@pytest.mark.parametrize(
"acc_config, expected_block_configs",
[
(
"ethos-u55-32",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 96), (1, 4, 6, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 4), (1, 5, 1, 12, 4), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 4, 1, 12, 16)),
((1, 8, 5, 16), (1, 6, 1, 5, 16)),
# Pooling
((1, 1, 1, 128), (1, 1, 4, 1, 16)),
((1, 9, 6, 16), (1, 8, 1, 4, 16)),
],
),
(
"ethos-u55-64",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 96), (1, 4, 6, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 8), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 4, 1, 12, 16)),
((1, 8, 5, 16), (1, 6, 1, 5, 16)),
# Pooling
((1, 1, 1, 128), (1, 1, 4, 1, 16)),
((1, 9, 6, 16), (1, 8, 1, 4, 16)),
],
),
(
"ethos-u55-128",
[
# Conv2D
((1, 7, 6, 16), (1, 7, 1, 6, 16)),
((1, 5, 8, 16), (1, 5, 1, 8, 16)),
((1, 4, 4, 128), (1, 4, 8, 4, 16)),
((1, 16, 4, 16), (1, 16, 1, 4, 16)),
((1, 8, 12, 8), (1, 10, 1, 6, 16)),
((1, 10, 6, 16), (1, 10, 1, 6, 16), (1, 6, 1, 6, 16)),
# Depthwise Conv2D
((1, 7, 10, 16), (1, 7, 1, 10, 16), (1, 6, 1, 10, 16)),
((1, 10, 6, 16), (1, 10, 1, 6, 16), (1, 6, 1, 6, 16)),
# Pooling
# ((1, 1, 2, 16), (1, 1, 1, 2, 16)),
((1, 1, 2, 128), (1, 1, 4, 2, 16)),
((1, 10, 6, 16), (1, 9, 1, 6, 16)),
],
),
(
"ethos-u55-256",
[
# Conv2D
((1, 14, 8, 16), (1, 14, 1, 8, 16)),
((1, 16, 8, 16), (1, 16, 1, 8, 16)),
((1, 4, 4, 128), (1, 4, 8, 4, 16)),
((1, 32, 4, 16), (1, 10, 12, 16), (1, 32, 1, 4, 16), (1, 10, 1, 12, 16)),
((1, 20, 12, 8), (1, 10, 1, 12, 16)),
((1, 12, 10, 16), (1, 12, 1, 10, 16)),
# Depthwise Conv2D
((1, 8, 20, 16), (1, 6, 1, 20, 16), (1, 6, 2, 20, 16)),
((1, 14, 6, 16), (1, 12, 1, 6, 16)),
# Pooling
# ((1, 2, 2, 16), (1, 2, 1, 2, 16)),
((1, 2, 2, 128), (1, 2, 6, 2, 16)),
((1, 10, 12, 16), (1, 10, 1, 12, 16)),
],
),
],
)
def test_best_block_config(
test_id,
op_type,
activation,
kernel,
stride,
dilation,
padding,
in_shape,
out_shape,
layouts,
acc_config,
expected_block_configs,
):
ofm_channels = out_shape[3]
ifm_channels = in_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices(
op_type,
kernel,
stride,
padding,
layouts[0],
layouts[1],
dilation,
ifm_channels,
ofm_channels,
)
if layouts[0] == "NHCWB16":
in_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, in_shape + (1,)).tolist()[:-1]
]
if layouts[1] == "NHCWB16":
out_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, out_shape + (1,)).tolist()[:-1]
]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
weight_propagator = cs.Propagator(weight_matrix, weight_offset)
subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8)
op_attrs = {
"op": op_type,
"activation": activation,
"stride_h": stride[0],
"stride_w": stride[1],
"dilation_h": dilation[0],
"dilation_w": dilation[1],
}
device_config = cs.EthosuDeviceConfig(acc_config)
block_configs = device_config.get_valid_block_configs(
propagator,
op_attrs,
out_shape,
ofm_channels,
ifm_channels,
layouts[1],
layouts[0],
"int8",
"int8",
kernel[0],
kernel[1],
)
output_quantum = [1, 1, 2, 8]
if layouts[1] == "NHCWB16":
output_quantum = [1, 1, 1, 2, 8]
# Create EthosUPart
te_subgraph = cs.TESubgraph([], None)
part = cs.EthosuPart(
te_subgraph,
[propagator, weight_propagator],
output_quantum,
subkernels,
block_configs,
1,
)
# Add tensors
input_tensor = cs.Tensor(in_shape, "int8")
part.set_input(0, input_tensor)
if op_type == "ethosu_conv2d":
weight_tensor = cs.Tensor([ofm_channels, kernel[0], kernel[1], ifm_channels], "int8")
part.set_input(1, weight_tensor)
elif op_type == "ethosu_depthwise_conv2d":
weight_tensor = cs.Tensor([ofm_channels, kernel[0], kernel[1], 1], "int8")
part.set_input(1, weight_tensor)
output_tensor = cs.Tensor(out_shape, "int8")
part.set_output(output_tensor)
order = [1, 2, 3, 4] if layouts[1] == "NHCWB16" else [1, 2, 4, 3, 0]
stripes = [1] * len(output_quantum)
offset = [0] * len(output_quantum)
stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset)
block = part.get_block_config(stripe_config)
block_shape = tuple(int(a) for a in block.output_shape)
assert block_shape in expected_block_configs[test_id]
@pytest.mark.parametrize(
"ofm_layout, block_config_str, expected_block_shape",
[
("NHWC", "4x4x8", [1, 4, 4, 8]),
("NHCWB16", "4x4x8", [1, 4, 1, 4, 16]),
("NHCWB16", "4x4x24", [1, 4, 2, 4, 16]),
],
)
def test_force_block_config_kernelwise(ofm_layout, block_config_str, expected_block_shape):
op_type = "ethosu_pooling"
activation = "NONE"
kernel = (2, 2)
stride = (2, 2)
padding = (0, 0)
dilation = (1, 1)
ifm_channels = 32
out_shape = (1, 8, 10, 16)
ifm_matrix, ifm_offset, _, _, _, _ = make_matrices(
op_type, kernel, stride, padding, "NHWC", ofm_layout, dilation, ifm_channels
)
ofm_channels = out_shape[3]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
op_attrs = {
"op": op_type,
"activation": activation,
"stride_h": stride[0],
"stride_w": stride[1],
"dilation_h": dilation[0],
"dilation_w": dilation[1],
}
config = {
"enable_cascader": True,
"dev_force_block_config": block_config_str,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
device_config = cs.EthosuDeviceConfig("ethos-u55-128")
block_configs = device_config.get_valid_block_configs(
propagator,
op_attrs,
out_shape,
ofm_channels,
ifm_channels,
ofm_layout,
"NHWC",
"int8",
"int8",
kernel[0],
kernel[1],
)
assert len(block_configs) == 1
assert block_configs[0].output_shape == expected_block_shape
@pytest.mark.parametrize(
"ofm_layout, block_config_str, expected_block_shape",
[
("NHWC", "4x4x8", [1, 4, 4, 8]),
("NHCWB16", "4x4x8", [1, 4, 1, 4, 16]),
("NHCWB16", "4x4x24", [1, 4, 2, 4, 16]),
],
)
def test_force_block_config_elementwise(ofm_layout, block_config_str, expected_block_shape):
op_type = "ethosu_elementwise_unary"
op_str = "ABS"
activation = "NONE"
ofm_shape = (1, 8, 10, 16)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm_offset = [0, 0, 0, 0]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
op_attrs = {
"op": op_type,
"operator_type": op_str,
"activation": activation,
"clip_min": 0,
"clip_max": 0,
"rounding_mode": "TFL",
}
config = {
"enable_cascader": True,
"dev_force_block_config": block_config_str,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
device_config = cs.EthosuDeviceConfig("ethos-u55-128")
block_configs = device_config.get_elementwise_block_config(
propagator,
None,
op_attrs,
ofm_shape,
ofm_layout,
"NWHC",
None,
"int8",
"int8",
)
assert len(block_configs) == 1
assert block_configs[0].output_shape == expected_block_shape
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_conv2d_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.convolution import match_ethosu_conv2d, conv2d_compute
from .infra import make_matrices
@pytest.mark.parametrize("kernel", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("dilation", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_channels", [8, 57])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_conv2d_matcher(
kernel, stride, dilation, padding, ifm_channels, ifm_layout, ofm_layout
):
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ifm_channels)
else:
ifm_shape = (1, 12, 1 + ((ifm_channels - 1) // 16), 15, 16)
ofm_channels = 8
kernel_h, kernel_w = kernel
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder((ofm_channels, kernel_h, kernel_w, ifm_channels), dtype="int8")
scale_bias = te.placeholder((ofm_channels, 10), dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=stride,
padding=padding,
dilation=dilation,
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
(
ifm_transform,
ifm_offset,
weight_transform,
weight_offset,
scale_bias_transform,
scale_bias_offset,
) = make_matrices(
"ethosu_conv2d",
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation,
ifm_channels,
ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_conv2d(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 3
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
assert part.propagators[1].transform == weight_transform
assert part.propagators[1].offset == weight_offset
assert part.propagators[2].transform == scale_bias_transform
assert part.propagators[2].offset == scale_bias_offset
@pytest.mark.parametrize(
"ifm_layout, ofm_layout, ifm_channels, expected_cycles",
[
("NHWC", "NHWC", 24, 2304),
("NHCWB16", "NHWC", 12, 2352),
("NHWC", "NHCWB16", 38, 7056),
("NHCWB16", "NHCWB16", 55, 4608),
],
)
def test_ethosu_conv2d_block_config_from_matcher(
ifm_layout, ofm_layout, ifm_channels, expected_cycles
):
ofm_channels = 10
ifm_height = 123
ifm_width = 155
ifm_shape = (
(1, ifm_height, ifm_width, ifm_channels)
if ifm_layout == "NHWC"
else (1, ifm_height, 1 + ((ifm_channels - 1) // 16), ifm_width, 16)
)
weight_shape = (ofm_channels, 3, 3, ifm_channels)
scale_bias_shape = (ofm_channels, 10)
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder(weight_shape, dtype="int8")
scale_bias = te.placeholder(scale_bias_shape, dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=(1, 1),
padding=(0, 0, 0, 0),
dilation=(1, 1),
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_conv2d(out, device_config)
ofm_shape = [int(i) for i in part.subgraph.output_tensor.shape]
# Add inputs and outputs to the part
input_tensor = cs.Tensor(ifm_shape, "int8")
part.set_input(0, input_tensor)
weight_tensor = cs.Tensor(weight_shape, "int8")
part.set_input(1, weight_tensor)
scale_bias_tensor = cs.Tensor(scale_bias_shape, "int8")
part.set_input(2, scale_bias_tensor)
output_tensor = cs.Tensor(ofm_shape, "int8")
part.set_output(output_tensor)
# Create a stripe of a size of the output tensor
order = [1, 2, 3, 4] if ofm_layout == "NHWC" else [1, 2, 4, 3, 0]
stripes = [1] * len(order)
offset = [0] * len(order)
stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
block = part.get_block_config(stripe_config)
# Since we dont know the values of the variables we passed to the get_valid_block_configs in
# the matcher, best we can do is to verify the compute cycle count since the channels have a
# significant effect on it
assert block.compute_cycles == expected_cycles
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_depthwise2d_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.depthwise import (
match_ethosu_depthwise_conv2d,
depthwise_conv2d_compute,
)
from .infra import make_matrices
@pytest.mark.parametrize("kernel", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("dilation", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_depthwise2d_matcher(kernel, stride, dilation, padding, ifm_layout, ofm_layout):
ofm_channels = 57
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ofm_channels)
else:
ifm_shape = (1, 12, 1 + ((ofm_channels - 1) // 16), 15, 16)
kernel_h, kernel_w = kernel
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder((ofm_channels, kernel_h, kernel_w, 1), dtype="int8")
scale_bias = te.placeholder((ofm_channels, 10), dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = depthwise_conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=stride,
padding=padding,
dilation=dilation,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
ofm_dtype=ifm.dtype,
)
(
ifm_transform,
ifm_offset,
weight_transform,
weight_offset,
scale_bias_transform,
scale_bias_offset,
) = make_matrices(
"ethosu_depthwise_conv2d",
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation,
ofm_channels=ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_depthwise_conv2d(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 3
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
assert part.propagators[1].transform == weight_transform
assert part.propagators[1].offset == weight_offset
assert part.propagators[2].transform == scale_bias_transform
assert part.propagators[2].offset == scale_bias_offset
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_identity_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.identity import match_ethosu_identity, identity_compute
from .infra import make_matrices
def test_ethosu_identity_matcher():
ofm_channels = 21
ifm_shape = (1, 12, 15, ofm_channels)
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = identity_compute(
ifm=ifm,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
activation="NONE",
)
length = len(ifm.shape)
ifm_transform = np.identity(length + 1).tolist()
ifm_offset = np.zeros(length, dtype="int64").tolist()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_identity(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_inline_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import te
from tvm.topi.transform import reshape
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.inline import match_ethosu_inline
def test_ethosu_inline_matcher():
ifm_shape = (2, 5, 6)
new_shape = (2, 30)
ifm = te.placeholder(ifm_shape, dtype="int8")
out = reshape(ifm, new_shape)
ifm_transform = [
[0, 0, ifm_shape[0]],
[0, 0, ifm_shape[1]],
[0, 0, ifm_shape[2]],
[0, 0, 1],
]
ifm_offset = [0, 0, 0]
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_inline(out, device_config)
assert isinstance(part, cs.InlinePart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_part.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
from tvm.contrib.ethosu.cascader.graph import BufferMode
from tvm.contrib.ethosu.cascader.parts import EthosuPart
def test_ethosu_part():
te_subgraph = cs.TESubgraph([], None)
output_quantum = [1, 2, 2, 8]
propagator = cs.Propagator(
[[1, 0, 0, 0, 2], [0, 1, 0, 0, 2], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[0, 0, 0, 0],
)
stripe_config = cs.StripeConfig(
[1, 4, 4, 16], [1, 64, 72, 96], [1, 4, 4, 16], [1, 2, 3, 4], [1, 16, 13, 6], [0, 0, 0, 0]
)
subkernels = 3
valid_block_configs = [cs.BlockConfig([1, 2, 4, 16], [1, 2, 4, 16], 15000, 7500)]
part = EthosuPart(
te_subgraph,
[propagator],
output_quantum,
subkernels,
valid_block_configs,
1,
)
input_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8")
part.set_input(0, input_tensor)
output_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8")
part.set_output(output_tensor)
assert part.get_stripe_align_hint() == output_quantum
# Check that the performance model runs, don't verify output
part.get_performance_info(stripe_config, BufferMode.ROLLING)
part.get_performance_info(stripe_config, BufferMode.RECOMPUTE)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_part_performance.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from functools import reduce
import numpy as np
import math
import tvm.contrib.ethosu.cascader as cs
from tvm.contrib.ethosu.cascader.device_config import _Shape
from .infra import make_matrices
@pytest.mark.parametrize(
"acc_config, expected",
[
("ethos-u55-256", (1, 0.125, 0.75, 0.375, 0.75)),
("ethos-u55-128", (1, 0.25, 1.5, 0.75, 0.75)),
("ethos-u55-64", (1, 0.5, 3, 1.5, 1.5)),
("ethos-u55-32", (2, 1, 6, 3, 3)),
],
)
def test_device_config_cycles(acc_config, expected):
device_config = cs.EthosuDeviceConfig(acc_config)
conv_type = "ethosu_conv2d"
conv_str = None
conv_ifm_dtype = "int8"
conv_ofm_dtype = "int8"
conv_activation = "LUT"
conv_cycles = device_config._get_output_cycles(
conv_type, conv_str, conv_ifm_dtype, conv_ofm_dtype, conv_activation
)
assert conv_cycles == expected[0]
pool_type = "ethosu_pooling"
pool_str = "MAX"
pool_ifm_dtype = "int8"
pool_ofm_dtype = "int8"
pool_activation = "NONE"
pool_cycles = device_config._get_output_cycles(
pool_type, pool_str, pool_ifm_dtype, pool_ofm_dtype, pool_activation
)
assert pool_cycles == expected[1]
add_type = "ethosu_binary_elementwise"
add_str = "ADD"
add_ifm_dtype = "int8"
add_ofm_dtype = "int8"
add_activation = "NONE"
add_cycles = device_config._get_output_cycles(
add_type, add_str, add_ifm_dtype, add_ofm_dtype, add_activation
)
assert add_cycles == expected[2]
mul_type = "ethosu_binary_elementwise"
mul_str = "MUL"
mul_ifm_dtype = "int8"
mul_ofm_dtype = "int8"
mul_activation = "NONE"
mul_cycles = device_config._get_output_cycles(
mul_type, mul_str, mul_ifm_dtype, mul_ofm_dtype, mul_activation
)
assert mul_cycles == expected[3]
mul_32_type = "ethosu_binary_elementwise"
mul_32_str = "MUL"
mul_32_ifm_dtype = "int8"
mul_32_ofm_dtype = "int32"
mul_32_activation = "NONE"
mul_32_cycles = device_config._get_output_cycles(
mul_32_type, mul_32_str, mul_32_ifm_dtype, mul_32_ofm_dtype, mul_32_activation
)
assert mul_32_cycles == expected[4]
@pytest.mark.parametrize(
"accelerator, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape, block_shape, input_block_shape, expected",
[
(
"ethos-u55-128",
"ethosu_conv2d",
"NONE",
(3, 3),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 16, 16, 96),
(1, 16, 16, 96),
(1, 8, 8, 16),
(1, 10, 10, 32),
167733,
),
(
"ethos-u55-128",
"ethosu_conv2d",
"NONE",
(10, 4),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 58, 13, 1),
(1, 25, 10, 276),
(1, 6, 10, 32),
(1, 18, 14, 8),
174105,
),
(
"ethos-u55-128",
"ethosu_depthwise_conv2d",
"NONE",
(3, 3),
(2, 2),
(1, 1),
(1, 1, 1, 1),
(1, 25, 10, 276),
(1, 13, 5, 276),
(1, 7, 6, 16),
(1, 15, 14, 16),
17590,
),
(
"ethos-u55-128",
"ethosu_depthwise_conv2d",
"NONE",
(4, 9),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 28, 81, 42),
(1, 25, 73, 41),
(1, 4, 16, 16),
(1, 7, 24, 16),
173414,
),
],
)
def test_conv_performance(
accelerator,
op_type,
activation,
kernel,
stride,
dilation,
padding,
in_shape,
out_shape,
block_shape,
input_block_shape,
expected,
):
ifm_channels = in_shape[3]
ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices(
op_type,
kernel,
stride,
padding,
"NHWC",
"NHWC",
dilation,
ifm_channels,
)
propagator = cs.Propagator(ifm_matrix, ifm_offset)
weight_propagator = cs.Propagator(weight_matrix, weight_offset)
subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8)
device_config = cs.EthosuDeviceConfig(accelerator)
output_cycles = device_config._get_output_cycles(op_type, "", "int8", "int8", activation)
output_cycles *= reduce(lambda a, b: a * b, block_shape, 1)
is_partkernel = device_config.is_partkernel(
op_type, ifm_channels, "int8", kernel[0] * kernel[1]
)
compute_cycles = device_config._estimate_compute_cycles_per_block(
op_type,
_Shape(block_shape),
_Shape(input_block_shape),
kernel[0],
kernel[1],
ifm_channels,
"int8",
is_partkernel,
)
block_configs = [
cs.BlockConfig(input_block_shape, block_shape, compute_cycles, int(output_cycles))
]
output_quantum = [1, 1, 2, 8]
te_subgraph = cs.TESubgraph([], None)
part = cs.EthosuPart(
te_subgraph,
[propagator, weight_propagator],
output_quantum,
subkernels,
block_configs,
1,
)
part.set_input(0, cs.Tensor(in_shape, "int8"))
part.set_input(1, cs.Tensor([ifm_channels, kernel[0], kernel[1], out_shape[-1]], "int8"))
part.set_output(cs.Tensor(out_shape, "int8"))
stripes = [1] * len(output_quantum)
offset = [0] * len(output_quantum)
order = [1, 2, 3, 4]
stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset)
compute_cycles = part.get_performance_info(stripe_config, cs.BufferMode.ROLLING).compute_cycles
tolerance = expected * 0.1
assert expected - tolerance <= compute_cycles <= expected + tolerance
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_pooling_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.pooling import match_ethosu_pooling, pooling_compute
from .infra import make_matrices
@pytest.mark.parametrize("pool_shape", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_pooling_matcher(pool_shape, stride, padding, ifm_layout, ofm_layout):
ofm_channels = 21
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ofm_channels)
else:
ifm_shape = (1, 12, 1 + ((ofm_channels - 1) // 16), 15, 16)
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = pooling_compute(
ifm=ifm,
lut=lut,
pooling_type="MAX",
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
pool_shape=pool_shape,
ofm_channels=ofm_channels,
strides=stride,
padding=padding,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
(ifm_transform, ifm_offset, _, _, _, _) = make_matrices(
"ethosu_pooling",
pool_shape,
stride,
padding,
ifm_layout,
ofm_layout,
ofm_channels=ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_pooling(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_ethosu_unary_elementwise_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.unary_elementwise import (
match_ethosu_unary_elementwise,
unary_elementwise_compute,
)
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def _make_matrices(ifm_layout, ofm_layout, ofm_channels):
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
return ifm_matrix
@pytest.mark.parametrize(
"ofm_shape",
[
[1, 12, 15, 128],
[1, 16, 16, 16],
[1, 1, 1, 1024],
[1, 53, 91, 7],
[1, 182, 12, 72],
],
)
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("op_type", ["ABS", "CLZ"])
def test_ethosu_unary_elementwise_matcher(ofm_shape, ifm_layout, ofm_layout, op_type):
ifm_shape = ofm_shape.copy()
ofm_channels = ofm_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
if ifm_layout == "NHCWB16":
ifm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm_shape
+ [
1,
],
).tolist()[:-1]
]
if ofm_layout == "NHCWB16":
ofm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ofm_shape
+ [
1,
],
).tolist()[:-1]
]
order = [1, 2, 4, 3, 0]
else:
order = [1, 2, 3, 4]
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = unary_elementwise_compute(
ifm=ifm,
lut=lut,
operator_type=op_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ofm_channels=ofm_channels,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
ifm_propagator = out.op.attrs["ifm_propagator"]
offset = [0] * len(ofm_shape)
stripes = [0] * len(ofm_shape)
output_stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
ifm_transform = _make_matrices(ifm_layout, ofm_layout, ofm_channels)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_unary_elementwise(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
propagated_ifm = ifm_propagator.propagate(output_stripe_config).shape
# The layout transforms that have the exact number of output channels in them
# will lose no information about the number of channels
assert ifm_shape == propagated_ifm
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
def test_tensor():
shape = [1, 2, 3]
dtype = "uint8"
is_constant = True
compression_ratio = 0.5
size = 6
tensor = cs.Tensor(shape, dtype, is_constant, compression_ratio)
assert tensor.shape == shape
assert tensor.dtype == dtype
assert tensor.is_constant == is_constant
assert tensor.compression_ratio == compression_ratio
assert tensor.size == size
def test_inline_part():
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[0, 0],
),
],
)
output_stripe_config = cs.StripeConfig([2, 4], [8, 8], [2, 4], [1, 2], [4, 2], [0, 0])
input_stripe_config = cs.StripeConfig([4, 2], [8, 8], [4, 2], [2, 1], [2, 4], [0, 0])
assert part.input_tensors == [None]
assert part.output_tensor == None
assert len(part.propagators) == 1
assert part.in_line == True
assert part.get_stripe_align_hint() == [1, 1]
performance_info = part.get_performance_info(output_stripe_config, cs.BufferMode.RECOMPUTE)
assert performance_info.compute_cycles == 0
assert performance_info.read_bytes == [0]
assert performance_info.write_bytes == 0
input_stripe_configs = part.calculate_input_stripe_configs(output_stripe_config)
assert len(input_stripe_configs) == 1
assert input_stripe_configs[0] == input_stripe_config
def test_small_graph():
subgraph = cs.TESubgraph([], None)
part_a = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[-1, -1],
),
],
)
part_b = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([9, 9], "uint8")
tensor_3 = cs.Tensor([10, 10], "uint8")
tensor_4 = cs.Tensor([10, 10], "uint8")
part_a.set_input(0, tensor_1)
part_a.set_input(1, tensor_2)
part_a.set_output(tensor_3)
tensor_1.add_consumer(part_a)
tensor_2.add_consumer(part_a)
tensor_3.add_producer(part_a)
part_b.set_input(0, tensor_3)
part_b.set_output(tensor_4)
tensor_3.add_consumer(part_b)
tensor_4.add_producer(part_b)
assert part_a.input_tensors == [tensor_1, tensor_2]
assert part_a.output_tensor == tensor_3
assert part_b.input_tensors == [tensor_3]
assert part_b.output_tensor == tensor_4
assert tensor_1.producers == []
assert tensor_1.consumers == [part_a]
assert tensor_2.producers == []
assert tensor_2.consumers == [part_a]
assert tensor_3.producers == [part_a]
assert tensor_3.consumers == [part_b]
assert tensor_4.producers == [part_b]
assert tensor_4.consumers == []
graph = cs.CascaderGraph([tensor_1, tensor_2], [tensor_4])
assert graph.input_tensors == [tensor_1, tensor_2]
assert graph.output_tensors == [tensor_4]
assert graph.part_order == [part_b, part_a]
for i, part in enumerate(graph.part_order):
assert graph.get_part_id(part) == i
def test_create_cascader_graph(TwoConv2DWithSliceTE):
_, te_graph, const_dict = TwoConv2DWithSliceTE
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
graph = cs.create_cascader_graph(te_graph, const_dict, device_config)
output_tensor = graph.output_tensors[0]
assert output_tensor.shape == [1, 6, 1, 6, 16]
assert len(output_tensor.producers) == 1
assert not output_tensor.is_constant
conv2_part = output_tensor.producers[0]
assert isinstance(conv2_part, cs.EthosuPart)
assert len(conv2_part.input_tensors) == 3
assert conv2_part.input_tensors[0].shape == [1, 6, 6, 64]
assert len(conv2_part.input_tensors[0].producers) == 1
assert not conv2_part.input_tensors[0].is_constant
assert conv2_part.input_tensors[1].shape == [16, 3, 3, 64]
assert len(conv2_part.input_tensors[1].producers) == 0
assert conv2_part.input_tensors[1].is_constant
assert conv2_part.input_tensors[2].shape == [16, 10]
assert len(conv2_part.input_tensors[2].producers) == 0
assert conv2_part.input_tensors[2].is_constant
slice_part = conv2_part.input_tensors[0].producers[0]
assert isinstance(slice_part, cs.InlinePart)
assert len(slice_part.input_tensors) == 1
assert slice_part.input_tensors[0].shape == [1, 12, 12, 64]
assert len(slice_part.input_tensors[0].producers) == 1
assert not slice_part.input_tensors[0].is_constant
conv1_part = slice_part.input_tensors[0].producers[0]
assert isinstance(conv1_part, cs.EthosuPart)
assert len(conv1_part.input_tensors) == 3
assert conv1_part.input_tensors[0].shape == [1, 12, 12, 8]
assert len(conv1_part.input_tensors[0].producers) == 0
assert not conv1_part.input_tensors[0].is_constant
assert conv1_part.input_tensors[1].shape == [64, 1, 1, 8]
assert len(conv1_part.input_tensors[1].producers) == 0
assert conv1_part.input_tensors[1].is_constant
assert conv1_part.input_tensors[2].shape == [64, 10]
assert len(conv1_part.input_tensors[2].producers) == 0
assert conv1_part.input_tensors[2].is_constant
def test_create_diamond_graph(MobileNetv2DiamondTE):
_, te_graph, const_dict = MobileNetv2DiamondTE
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
graph = cs.create_cascader_graph(te_graph, const_dict, device_config)
output_tensor = graph.output_tensors[0]
assert output_tensor.shape == [1, 56, 56, 24]
assert len(output_tensor.producers) == 1
assert not output_tensor.is_constant
add1_part = output_tensor.producers[0]
assert isinstance(add1_part, cs.EthosuPart)
assert len(add1_part.input_tensors) == 2
assert graph.get_part_id(add1_part) == 0
assert add1_part.input_tensors[0].shape == [1, 56, 56, 24]
assert len(add1_part.input_tensors[0].producers) == 1
assert not add1_part.input_tensors[0].is_constant
assert add1_part.input_tensors[1].shape == [1, 56, 56, 24]
assert len(add1_part.input_tensors[0].producers) == 1
assert not add1_part.input_tensors[0].is_constant
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,invalid-name
"""
Test the cascader in the compilation flow.
"""
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import _create_cascader
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig
from .. import infra as test_infra
from . import infra as cascader_test_infra
def _ethos_u55_cascader():
sram = MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig("ethos-u55-256")
cascader_options = cascader_test_infra.make_options(
cascade_region=sram,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _compile_model(relay_function):
mod = tvm.IRModule()
mod["main"] = relay_function
mod = relay.transform.InferType()(mod)
tir_mod = _lower_to_tir(mod["main"], _ethos_u55_cascader())[0]
return tir_mod["main"]
def _create_single_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv1), conv1)
return func
def _create_double_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = test_infra.make_ethosu_conv2d(conv1, 4, 4, (1, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
return func
def _create_scalar_add():
ifm = relay.var("x", shape=(1, 5, 4, 3), dtype="int8")
ifm2 = relay.const(np.ones((1, 1, 1, 1)), dtype="int8")
add = test_infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_channels=3, ifm2_channels=1, operator_type="ADD", ofm_dtype="int8"
)
func = relay.Function(relay.analysis.free_vars(add), add)
return func
def test_single_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for single convolution.
"""
primfunc = _compile_model(_create_single_conv2d())
ops = primfunc.body.body.seq
compute_cycles_hints = [2944, 320]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_double_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for double convolution.
"""
primfunc = _compile_model(_create_double_conv2d())
ops = primfunc.body.body.body.body.seq
compute_cycles_hints = [2944, 1408, 320, 240]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_scalar_add_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for add with scalar values.
"""
primfunc = _compile_model(_create_scalar_add())
ops = primfunc.body.body.seq
compute_cycles_hints = [16, 24]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_memory_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tensorflow as tf
import tflite.Model
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.micro import model_library_format as mlf
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
import tvm
from tvm import WorkspaceMemoryPools, WorkspacePoolInfo, PoolInfoProperties
from tvm.relay.backend.contrib.ethosu.codegen import extract_memory_info
from .. import infra
def _get_compilation_config(accel_type, enable_cascader, enable_striping):
enable_usmp = True
target = tvm.target.Target("c")
ethosu_target = tvm.target.Target("ethos-u")
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 16,
"interface-api": "c",
"unpacked-api": True,
},
)
pass_config = {
"tir.disable_vectorize": True,
"relay.ext.ethos-u.options": {
"accelerator_config": accel_type,
"enable_cascader": enable_cascader,
"enable_striping": enable_striping,
},
"tir.usmp.enable": enable_usmp,
"tir.usmp.algorithm": "hill_climb",
"tir.disable_storage_rewrite": enable_usmp,
}
return target, ethosu_target, runtime, executor, pass_config
def _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader, enable_striping
):
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
accel_type, enable_cascader, enable_striping
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=pool_size,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
with tvm.transform.PassContext(opt_level=3, config=pass_config):
lib = tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
return mlf_memory_map["main"][0]["workspace_size_bytes"]
@pytest.mark.parametrize(
"accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping",
[
("ethos-u55-256", 1067520, 14208),
("ethos-u55-128", 1067520, 4080),
("ethos-u55-64", 1067520, 4080),
("ethos-u55-32", 1067504, 4064),
],
)
def test_double_conv2d(
accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping
):
np.random.seed(1)
ifm_shape = (1, 321, 212, 6)
@tf.function
def tf_graph(x):
ofm_channels = 10
conv2d = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[3, 2, ifm_shape[3], ofm_channels]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="VALID",
dilations=(2, 1),
)
conv2d = tf.nn.conv2d(
conv2d,
filters=tf.constant(
np.random.uniform(size=(1, 1, ofm_channels, 3)), # HWIO
dtype=tf.float32,
),
strides=(3, 2),
padding="SAME",
dilations=(1, 1),
)
return conv2d
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
# Run the graph without the cascader, with lots of memory
pool_size = 2000000
workspace_size_cascader_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=False, enable_striping=False
)
workspace_size_cascader_enabled_striping_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=False
)
# if striping is not done, it should be same as cacader disabled
assert workspace_size_cascader_disabled == workspace_size_cascader_enabled_striping_disabled
# Run the same graph with the cascader, giving it less memory to persuade cascder to cascade
pool_size = 600000
workspace_size_cascader_enabled_striping_enabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=True
)
assert workspace_size_cascader_disabled == expected_ws_size_without_striping
assert workspace_size_cascader_enabled_striping_enabled == expected_ws_size_with_striping
@pytest.mark.parametrize(
"accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping",
[
("ethos-u55-256", 180288, 15312),
("ethos-u55-128", 180288, 15312),
("ethos-u55-64", 180288, 14544),
("ethos-u55-32", 180272, 14544),
],
)
def test_depthwise2d_conv2d_pooling(
accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping
):
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
@tf.function
def tf_graph(x):
# This graph will execute as one cascade
ofm_channels = 7
conv2d = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[3, 2, ifm_shape[3], ofm_channels]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="VALID",
dilations=(1, 1),
)
depthwise2d = tf.nn.depthwise_conv2d(
conv2d,
tf.constant(np.random.uniform(size=(3, 3, ofm_channels, 1)), dtype=tf.float32), # HWC1
strides=(1, 1, 1, 1),
padding="VALID",
dilations=(1, 1),
)
relu = tf.nn.relu(depthwise2d)
conv2d = tf.nn.conv2d(
relu,
filters=tf.constant(
np.random.uniform(size=[3, 2, ofm_channels, 2]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="SAME",
dilations=(1, 1),
)
max_pool = tf.nn.max_pool(conv2d, (3, 3), (1, 1), "SAME")
return max_pool
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
# Run the graph without the cascader, with lots of memory
pool_size = 10**6
workspace_size_cascader_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=False, enable_striping=False
)
workspace_size_cascader_enabled_striping_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=False
)
# if striping is not done, it should be same as cacader disabled
assert workspace_size_cascader_disabled == workspace_size_cascader_enabled_striping_disabled
# Run the same graph with the cascader, giving it less memory to persuade cascder to cascade
pool_size = 50000
workspace_size_cascader_enabled_striping_enabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=True
)
assert workspace_size_cascader_disabled == expected_ws_size_without_striping
assert workspace_size_cascader_enabled_striping_enabled == expected_ws_size_with_striping
def test_multiple_memory_pools():
"""
The cascader does not support multiple workspace memory
pools. Check the correct error is thrown.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
"ethos-u55-256", True, True
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
expected_reason = "Exactly one workspace pool needs to be provided for the U55 cascader"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
def test_missing_memory_pools():
"""
The cascader requires memory pools to be present, check the correct error
is thrown when there aren't any.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, _, runtime, executor, pass_config = _get_compilation_config("ethos-u55-256", True, True)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=None,
params=params,
)
expected_reason = "Workspace memory pool needs to be provided for the U55 cascader"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
def test_invalid_accelerator():
"""
Check an error is thrown when an unsupported accelerator configuration
is used.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
"ethos-u65-256", True, True
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
expected_reason = "Cascading is not supported for the U65 accelerator"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_pareto.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.tir import IntImm
from tvm.contrib.ethosu.cascader.pareto import (
_get_pareto_frontier,
_thin_vector,
_pareto_cull_plans,
)
from tvm.contrib.ethosu.cascader import (
Plan,
StripeConfig,
TensorConfig,
TensorConfigState,
BufferMode,
Tensor,
)
import pytest
import numpy as np
def _ref_get_pareto_frontier(costs):
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(
costs[is_efficient] < c, axis=1
) # Keep any point with a lower cost
is_efficient[i] = True # And keep self
return is_efficient
def _ref_thin_vector(vec, max_size):
if max_size < 1:
return []
if len(vec) <= max_size or len(vec) == 0:
return vec
if max_size == 1:
return [vec[0]]
samples = np.linspace(0, len(vec), max_size - 1, endpoint=False).astype(int)
samples = np.append(samples, len(vec) - 1)
return vec[samples]
def _ref_pareto_cull_plans(plans, points):
if len(plans) <= points:
return plans
plans = np.array(sorted(plans, key=lambda x: x.memory_usage))
costs = []
for plan in plans:
costs.append(np.array([plan.memory_usage, plan.cycles]))
is_efficient = _ref_get_pareto_frontier(np.array(costs))
culled_plans = plans[is_efficient]
thinned_plans = (
culled_plans
if len(culled_plans) <= points
else _ref_thin_vector(np.array(culled_plans), points)
)
return thinned_plans
@pytest.mark.parametrize("num_costs", [1, 10, 30, 100, 300, 1000])
def test_get_pareto_frontier(num_costs):
cost_low = 1
cost_high = 100
dims = 2
costs = []
for i in range(num_costs):
costs.append(list(np.random.randint(cost_low, cost_high, size=(dims,))))
reference = list(_ref_get_pareto_frontier(np.array(costs)))
result = _get_pareto_frontier(costs)
assert result == reference
@pytest.mark.parametrize("vec_length", [0, 1, 10, 25, 100])
@pytest.mark.parametrize("max_size", [0, 1, 2, 5, 11, 51])
def test_thin_vector(vec_length, max_size):
def _make_vector(length):
vector = []
for i in range(length):
obj = IntImm("int32", i)
vector.append(obj)
return vector
vector = _make_vector(vec_length)
reference = list(_ref_thin_vector(np.array(vector), max_size))
result = _thin_vector(vector, max_size)
assert result == reference
@pytest.mark.parametrize("num_plans", [0, 1, 10, 25, 100])
@pytest.mark.parametrize("max_plans", [0, 1, 2, 5, 11, 51])
def test_pareto_cull_plans(num_plans, max_plans, SRAM):
memory_usage_low = 1
memory_usage_high = 1000
cycles_low = 100
cycles_high = 10000
def _make_plan(memory_usage, cycles):
output_config = TensorConfig(
tensor=Tensor([1], "int8"),
home_region=SRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.RECOMPUTE,
stripe_configs=[StripeConfig([1], [1], [1], [1], [1], [0])],
)
return Plan(
tensor_configs={},
open_configs=[],
output_config=output_config,
part_group=[],
interior_region=SRAM,
memory_usage=memory_usage,
cycles=cycles,
)
def _make_plans(num):
plans = []
for _ in range(num):
memory_usage = np.random.randint(memory_usage_low, memory_usage_high)
cycles = np.random.randint(cycles_low, cycles_high)
plan = _make_plan(memory_usage, cycles)
plans.append(plan)
return plans
plans = _make_plans(num_plans)
reference = list(_ref_pareto_cull_plans(plans, max_plans))
result = _pareto_cull_plans(plans, max_plans, False)
assert result == reference
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_plan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm.contrib.ethosu.cascader as cs
import pytest
def test_plan(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
part.set_input(0, tensor_1)
part.set_output(tensor_2)
tensor_1.add_consumer(part)
tensor_2.add_producer(part)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_out = cs.TensorConfig(
tensor=tensor_2,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_config = part.calculate_input_stripe_configs(output_stripe_config)[0]
tensor_config_in = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_in, tensor_2: tensor_config_out}
open_configs = frozenset([tensor_config_in])
part_group = frozenset([part])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_out,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
assert plan.tensor_configs == tensor_configs
assert plan.open_configs == open_configs
assert plan.output_config == tensor_config_out
assert plan.part_group == part_group
assert plan.interior_region == interior_region
assert plan.memory_usage == memory_usage
assert plan.cycles == cycles
def test_plan_merge(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
part_2 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 0, 6], [0, 0, 6], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[1, 0], [0, 1]],
[0],
),
],
)
tensor_1 = cs.Tensor([20, 20], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
tensor_3 = cs.Tensor([6, 6], "uint8")
tensor_4 = cs.Tensor([10], "uint8")
tensor_5 = cs.Tensor([10, 10], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
part_2.set_input(0, tensor_2)
part_2.set_input(1, tensor_3)
part_2.set_input(2, tensor_4)
part_2.set_output(tensor_5)
tensor_2.add_consumer(part_2)
tensor_3.add_consumer(part_2)
tensor_4.add_consumer(part_2)
tensor_5.add_producer(part_2)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_5 = cs.TensorConfig(
tensor=tensor_5,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_configs = part_2.calculate_input_stripe_configs(output_stripe_config)
tensor_config_4 = cs.TensorConfig(
tensor=tensor_4,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[2]],
copy_tensor=False,
)
tensor_config_3 = cs.TensorConfig(
tensor=tensor_3,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[1]],
copy_tensor=False,
)
tensor_config_2 = cs.TensorConfig(
tensor=tensor_2,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_configs[0]],
copy_tensor=False,
)
input_stripe_config = part_1.calculate_input_stripe_configs(input_stripe_configs[0])[0]
tensor_config_1 = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_1, tensor_2: tensor_config_2}
open_configs = frozenset([tensor_config_2])
part_group = frozenset([part_1])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan_1 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_2,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
tensor_configs = {
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
open_configs = frozenset([tensor_config_2, tensor_config_3])
part_group = frozenset([part_2])
interior_region = SRAM
memory_usage = 200
cycles = 30
plan_2 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_5,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
merged_plan = plan_1.merge(plan_2)
assert merged_plan.tensor_configs == {
tensor_1: tensor_config_1,
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
assert merged_plan.open_configs == frozenset([tensor_config_3])
assert merged_plan.output_config == tensor_config_5
assert merged_plan.part_group == frozenset([part_1, part_2])
assert merged_plan.interior_region == interior_region
assert merged_plan.memory_usage == plan_1.memory_usage + plan_2.memory_usage
assert merged_plan.cycles == plan_1.cycles + plan_2.cycles
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_plan_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm.contrib.ethosu.cascader as cs
from .infra import make_simple_home_map, make_options, ethosu_enabled
from tvm.contrib.ethosu.cascader.plan_generator import (
_generate_output_stripe_configs,
_generate_single_plans,
_generate_graph_plans,
)
@pytest.mark.parametrize("stripe_factors", [3, 4, 8, 16, 10])
def test_generate_output_stripe_configs_disable_striping(stripe_factors):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([800, 800], "uint8")
tensor_2 = cs.Tensor([400, 400], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
assert (
len(
_generate_output_stripe_configs(
part_1, stripe_factors, enable_striping=False, multi_dimensional=False
)
)
== 1
)
def test_generate_output_stripe_configs_multi_dimensional():
stripe_factors = 3
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([800, 800], "uint8")
tensor_2 = cs.Tensor([400, 400], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
expected_stripe_configs = {
cs.StripeConfig([1, 1], [400, 400], [1, 1], [1, 2], [400, 400], [0, 0]),
cs.StripeConfig([1, 1], [400, 400], [1, 1], [2, 1], [400, 400], [0, 0]),
cs.StripeConfig([200, 1], [400, 400], [200, 1], [1, 2], [2, 400], [0, 0]),
cs.StripeConfig([200, 1], [400, 400], [200, 1], [2, 1], [2, 400], [0, 0]),
cs.StripeConfig([400, 1], [400, 400], [400, 1], [2, 1], [1, 400], [0, 0]),
cs.StripeConfig([1, 200], [400, 400], [1, 200], [1, 2], [400, 2], [0, 0]),
cs.StripeConfig([1, 200], [400, 400], [1, 200], [2, 1], [400, 2], [0, 0]),
cs.StripeConfig([200, 200], [400, 400], [200, 200], [2, 1], [2, 2], [0, 0]),
cs.StripeConfig([200, 200], [400, 400], [200, 200], [1, 2], [2, 2], [0, 0]),
cs.StripeConfig([400, 200], [400, 400], [400, 200], [2, 1], [1, 2], [0, 0]),
cs.StripeConfig([1, 400], [400, 400], [1, 400], [1, 2], [400, 1], [0, 0]),
cs.StripeConfig([200, 400], [400, 400], [200, 400], [1, 2], [2, 1], [0, 0]),
cs.StripeConfig([400, 400], [400, 400], [400, 400], [1, 2], [1, 1], [0, 0]),
}
output_stripe_configs = _generate_output_stripe_configs(
part=part_1, stripe_factors=stripe_factors, enable_striping=True, multi_dimensional=True
)
assert len(output_stripe_configs) == len(expected_stripe_configs)
assert set(output_stripe_configs) == expected_stripe_configs
def test_generate_output_stripe_configs_uncascadable_axis():
stripe_factors = 3
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 0, 200], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([800, 200], "uint8")
tensor_2 = cs.Tensor([400, 400], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
expected_stripe_configs = {
cs.StripeConfig([1, 400], [400, 400], [1, 400], [1, 2], [400, 1], [0, 0]),
cs.StripeConfig([200, 400], [400, 400], [200, 400], [1, 2], [2, 1], [0, 0]),
cs.StripeConfig([400, 400], [400, 400], [400, 400], [1, 2], [1, 1], [0, 0]),
}
output_stripe_configs = _generate_output_stripe_configs(
part=part_1, stripe_factors=stripe_factors, enable_striping=True, multi_dimensional=True
)
assert len(output_stripe_configs) == len(expected_stripe_configs)
assert set(output_stripe_configs) == expected_stripe_configs
def test_generate_output_stripe_configs_single_dimension():
stripe_factors = 3
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([800, 800], "uint8")
tensor_2 = cs.Tensor([400, 400], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
expected_stripe_configs = {
cs.StripeConfig([400, 1], [400, 400], [400, 1], [2, 1], [1, 400], [0, 0]),
cs.StripeConfig([400, 200], [400, 400], [400, 200], [2, 1], [1, 2], [0, 0]),
cs.StripeConfig([1, 400], [400, 400], [1, 400], [1, 2], [400, 1], [0, 0]),
cs.StripeConfig([200, 400], [400, 400], [200, 400], [1, 2], [2, 1], [0, 0]),
cs.StripeConfig([400, 400], [400, 400], [400, 400], [1, 2], [1, 1], [0, 0]),
}
output_stripe_configs = _generate_output_stripe_configs(
part=part_1, stripe_factors=stripe_factors, enable_striping=True, multi_dimensional=False
)
assert len(output_stripe_configs) == len(expected_stripe_configs)
assert set(output_stripe_configs) == expected_stripe_configs
def test_generate_single_plans(SRAM, DRAM):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([800, 800], "int8")
tensor_2 = cs.Tensor([400, 400], "int8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
home_map = {
tensor_1: [SRAM, DRAM],
tensor_2: [SRAM],
}
options = make_options(cascade_region=SRAM, stripe_factors=1)
output_stripe_configs = _generate_output_stripe_configs(
part_1,
options.stripe_factors,
enable_striping=True,
multi_dimensional=True,
)
plans = _generate_single_plans(part_1, output_stripe_configs, home_map, options)
for plan in plans:
assert plan.interior_region == SRAM
assert plan.part_group == frozenset([part_1])
assert set(plan.tensor_configs.keys()) == set([tensor_1, tensor_2])
for open_config in plan.open_configs:
assert open_config.state == cs.TensorConfigState.INTERIOR
def test_generate_graph_plans(SRAM, DRAM):
num_part_groups = 3
stripe_factors = 4
max_plan_size = 10
subgraph = cs.TESubgraph([], None)
part_a = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[-1, -1],
),
],
)
part_b = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "int8")
tensor_2 = cs.Tensor([9, 9], "int8")
tensor_3 = cs.Tensor([10, 10], "int8")
tensor_4 = cs.Tensor([10, 10], "int8")
part_a.set_input(0, tensor_1)
part_a.set_input(1, tensor_2)
part_a.set_output(tensor_3)
tensor_1.add_consumer(part_a)
tensor_2.add_consumer(part_a)
tensor_3.add_producer(part_a)
part_b.set_input(0, tensor_3)
part_b.set_output(tensor_4)
tensor_3.add_consumer(part_b)
tensor_4.add_producer(part_b)
graph = cs.CascaderGraph([tensor_1, tensor_2], [tensor_4])
home_map = {
tensor_1: [SRAM, DRAM],
tensor_2: [SRAM],
tensor_3: [SRAM],
tensor_4: [SRAM, DRAM],
}
options = make_options(
cascade_region=SRAM,
stripe_factors=stripe_factors,
max_plan_size=max_plan_size,
)
closed_plans = _generate_graph_plans(graph, home_map, options)
assert len(closed_plans) == num_part_groups
if ethosu_enabled:
def test_plan_generator_two_conv2d(FLASH, SRAM, TwoConv2DGraph):
num_part_groups = 3
graph = TwoConv2DGraph
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
stripe_factors=4,
max_plan_size=10,
)
closed_plans = _generate_graph_plans(graph, home_map, options)
assert len(closed_plans) == num_part_groups
def test_plan_generator_two_conv2d_with_slice(FLASH, SRAM, TwoConv2DWithSliceGraph):
num_part_groups = 4 # Note this is not 6 because 'slice' has an opaque Propagator
graph = TwoConv2DWithSliceGraph
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
stripe_factors=4,
max_plan_size=10,
)
closed_plans = _generate_graph_plans(graph, home_map, options)
assert len(closed_plans) == num_part_groups
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_propagator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from math import isclose
from tvm.contrib.ethosu.cascader import StripeConfig, Propagator
def test_propagator():
transform = [
[1, 0, 0, 0],
[0, 1 / 2, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
]
offset = [-1, 1, 2]
propagator = Propagator(
transform=transform,
offset=offset,
)
assert list(propagator.offset) == offset
for i, row in enumerate(transform):
for j, value in enumerate(row):
assert isclose(propagator.transform[i][j], value)
@pytest.mark.parametrize(
["propagator", "input_stripe_config", "output_stripe_config"],
[
(
Propagator(
transform=[
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
],
offset=[0, 0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 14, 36],
extent=[1, 24, 18, 72],
strides=[1, 12, 14, 36],
order=[1, 2, 3, 4],
stripes=[1, 2, 2, 2],
offset=[0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 3, 14, 16],
extent=[1, 24, 5, 18, 16],
strides=[1, 12, 2.25, 14, 0],
order=[1, 2, 4, 3, 0],
stripes=[1, 2, 2, 2, 1],
offset=[0, 0, 0, 0, 0],
),
),
(
Propagator(
transform=[
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 1],
],
offset=[0, 0],
),
StripeConfig(
shape=[3, 5],
extent=[27, 50],
strides=[3, 5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
StripeConfig(
shape=[2, 3],
extent=[14, 25],
strides=[1.5, 2.5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
),
(
Propagator(
transform=[
[2, 0, 0, 4],
[0, 1, 0, 2],
[0, 0, 0, 8],
[0, 0, 0, 1],
],
offset=[-2, -1, 0],
),
StripeConfig(
shape=[4, 6, 32],
extent=[48, 60, 64],
strides=[4, 6, 32],
order=[1, 2, 3],
stripes=[12, 10, 2],
offset=[0, 0, 0],
),
StripeConfig(
shape=[12, 8, 8],
extent=[100, 62, 8],
strides=[8, 6, 0],
order=[1, 2, 0],
stripes=[12, 10, 1],
offset=[-2, -1, 0],
),
),
],
)
def test_propagate(propagator, input_stripe_config, output_stripe_config):
result_stripe_config = propagator.propagate(input_stripe_config)
assert result_stripe_config == output_stripe_config
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_proposal_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm.contrib.ethosu.cascader.proposal_generator import generate_proposals
from .infra import make_simple_home_map, make_options, ethosu_enabled
if ethosu_enabled:
def test_generate_proposals(FLASH, SRAM, TwoConv2DGraph):
graph = TwoConv2DGraph
min_sram = 3700
max_sram = 11700
input_configs = 1
parts = 2
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=4,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_binary(FLASH, SRAM, BinaryGraph):
graph = BinaryGraph
input_configs = 2
parts = 3
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=4,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1_start(FLASH, SRAM, MobileNetv1StartGraph):
graph = MobileNetv1StartGraph
min_sram = 200000
max_sram = 1300000
input_configs = 1
parts = 8
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1(FLASH, SRAM, MobileNetv1Graph):
graph = MobileNetv1Graph
min_sram = 200000
max_sram = 1300000
input_configs = 1
parts = 27
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv2diamond(FLASH, SRAM, MobileNetv2DiamondGraph):
graph = MobileNetv2DiamondGraph
min_sram = 370000
max_sram = 990000
input_configs = 1
parts = 5
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1_disable_striping(FLASH, SRAM, MobileNetv1Graph):
graph = MobileNetv1Graph
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
enable_striping=False,
)
proposals = generate_proposals(graph, home_map, options)
assert len(proposals) == 1
proposal = proposals[0]
for plan in proposal.plans:
for stripe_config in plan.output_config.stripe_configs:
for shape_dim, stride_dim in list(zip(stripe_config.shape, stripe_config.strides)):
# The striding and shape sizes in each dimension should be the same
# if striping is disabled
assert int(shape_dim) == int(stride_dim)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position, invalid-name
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
from . import infra
def test_cascade(SRAM, FLASH, TwoConv2DWithSliceTE, TwoConv2DTE, MobileNetv1StartTE, MobileNetv1TE):
fixtures = [
TwoConv2DTE,
TwoConv2DWithSliceTE,
MobileNetv1StartTE,
MobileNetv1TE,
]
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
for sch, te_graph, const_dict in fixtures:
options = infra.make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
)
cs.cascade(sch, te_graph, const_dict, options, SRAM, FLASH, [SRAM], device_config)
def test_compute_cycles_annotation(SRAM, FLASH, TwoConv2DTE):
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
options = infra.make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
sch, te_graph, const_dict = TwoConv2DTE
cs.cascade(sch, te_graph, const_dict, options, SRAM, FLASH, [SRAM], device_config)
# Stages that should have compute cycle annotations
# [copy, copy, conv2d, copy, conv2d]
stages = [6, 8, 9, 18, 19]
# Expected hints for each operation
compute_cycles_hints = [4096, 5120, 1440, 2560, 3072]
for stage, compute_cycles_hint in zip(stages, compute_cycles_hints):
op = sch.stages[stage]
op_iter_vars = op.leaf_iter_vars[0]
op_attrs = op.iter_var_attrs[op_iter_vars]
assert op_attrs.pragma_keys[0] == "compute_cycles_hint"
assert op_attrs.pragma_values[0] == compute_cycles_hint
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_stripe_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm.contrib.ethosu.cascader.stripe_config import StripeConfig, count_stripes
def test_stripe_config():
shape = [1, 2, 3]
extent = [2, 3, 4]
strides = [3, 4, 5]
order = [4, 5, 6]
stripes = [5, 6, 7]
offset = [6, 7, 8]
hash_value = 3107995860559090954
stripe_config = StripeConfig(
shape=shape,
extent=extent,
strides=strides,
order=order,
stripes=stripes,
offset=offset,
)
assert stripe_config.shape == shape
assert stripe_config.extent == extent
assert stripe_config.strides == strides
assert stripe_config.order == order
assert stripe_config.stripes == stripes
assert stripe_config.offset == offset
assert hash(stripe_config) == hash_value
@pytest.mark.parametrize(
"mismatch", [None, "shape", "extent", "strides", "order", "stripes", "offset"]
)
def test_stripe_config_equal(mismatch):
init_dict = {
"shape": [1, 2, 3],
"extent": [2, 3, 4],
"strides": [3, 4, 5],
"order": [4, 5, 6],
"stripes": [5, 6, 7],
"offset": [6, 7, 8],
}
stripe_config_a = StripeConfig(**init_dict)
if mismatch:
init_dict[mismatch] = [1, 1, 1]
stripe_config_b = StripeConfig(**init_dict)
if not mismatch:
assert stripe_config_a == stripe_config_b
else:
assert stripe_config_a != stripe_config_b
@pytest.mark.parametrize(
["stripe_config", "expected_stripe_counts"],
[
(
StripeConfig(
shape=[3, 3, 3],
extent=[9, 9, 9],
strides=[3, 3, 3],
order=[1, 2, 3],
stripes=[3, 3, 3],
offset=[0, 0, 0],
),
{
(3, 3, 3): 27,
},
),
(
StripeConfig(
shape=[3, 3],
extent=[10, 10],
strides=[2, 2],
order=[1, 2],
stripes=[5, 5],
offset=[0, 0],
),
{
(3, 3): 16,
(2, 3): 4,
(3, 2): 4,
(2, 2): 1,
},
),
(
StripeConfig(
shape=[3, 3, 9],
extent=[9, 9, 9],
strides=[3, 3, 0],
order=[1, 2, 3],
stripes=[3, 3, 1],
offset=[0, 0, 0],
),
{
(3, 3, 9): 9,
},
),
(
StripeConfig(
shape=[5, 5],
extent=[8, 8],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
),
{
(5, 5): 1,
(3, 5): 1,
(5, 3): 1,
(3, 3): 1,
},
),
(
StripeConfig(
shape=[5, 5],
extent=[8, 8],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[-1, -2],
),
{
(4, 3): 2,
(4, 5): 2,
},
),
(
StripeConfig(
shape=[13, 7],
extent=[128, 73],
strides=[13, 7],
order=[1, 2],
stripes=[11, 12],
offset=[-10, -5],
),
{
(3, 1): 1,
(3, 2): 1,
(8, 7): 10,
(8, 2): 1,
(13, 7): 90,
(13, 1): 9,
(8, 1): 1,
(3, 7): 10,
(13, 2): 9,
},
),
],
)
def test_count_stripes(stripe_config, expected_stripe_counts):
assert count_stripes(stripe_config) == expected_stripe_counts
@pytest.mark.parametrize(
["stripe_config", "expected_stripe_counts"],
[
(
StripeConfig(
shape=[4, 4],
extent=[16, 16],
strides=[2, 2],
order=[1, 2],
stripes=[7, 7],
offset=[0, 0],
),
{
(4, 4): 1,
(2, 4): 6,
(4, 2): 6,
(2, 2): 36,
},
),
(
StripeConfig(
shape=[4, 4],
extent=[8, 8],
strides=[2, 2],
order=[1, 2],
stripes=[6, 3],
offset=[-5, 0],
),
{
(1, 4): 2,
(2, 4): 3,
(2, 2): 6,
(1, 2): 4,
},
),
],
)
def test_count_stripes_sliding_window(stripe_config, expected_stripe_counts):
assert count_stripes(stripe_config, enable_sliding_window=True) == expected_stripe_counts
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/cascader/test_tensor_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.contrib.ethosu.cascader import (
StripeConfig,
Tensor,
MemoryRegion,
TensorConfig,
TensorConfigState,
BufferMode,
)
import pytest
def test_tensor_config(DRAM, SRAM):
stripe_config = StripeConfig(
shape=[1, 2, 3],
extent=[2, 3, 4],
strides=[3, 4, 5],
order=[4, 5, 6],
stripes=[5, 6, 7],
offset=[6, 7, 8],
)
tensor = Tensor(
shape=[10, 10, 10],
dtype="int8",
)
home_region = DRAM
state = TensorConfigState.BOUNDARY
buffer_mode = BufferMode.ROLLING
copy_tensor = True
copy_region = SRAM
tensor_config = TensorConfig(
tensor=tensor,
home_region=home_region,
state=state,
buffer_mode=buffer_mode,
stripe_configs=[stripe_config],
copy_tensor=copy_tensor,
copy_region=copy_region,
)
assert tensor_config.tensor == tensor
assert tensor_config.home_region == home_region
assert tensor_config.state == state
assert tensor_config.buffer_mode == buffer_mode
assert tensor_config.stripe_configs == [stripe_config]
assert tensor_config.copy_tensor == copy_tensor
assert tensor_config.copy_region == copy_region
assert hash(tensor_config) != 0
def test_get_rolling_buffer(DRAM):
stripe_config = StripeConfig(
shape=[9, 4, 7],
extent=[9, 16, 21],
strides=[3, 5, 7],
order=[1, 3, 2],
stripes=[1, 3, 3],
offset=[0, 0, 0],
)
tensor = Tensor(shape=[9, 16, 21], dtype="int32", compression_ratio=0.5)
tensor_config = TensorConfig(
tensor=tensor,
home_region=DRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.ROLLING,
stripe_configs=[stripe_config],
)
assert tensor_config.get_buffer_size() == 2016
def test_get_recompute_buffer(DRAM):
stripe_config = StripeConfig(
shape=[4, 5, 7],
extent=[6, 7, 14],
strides=[2, 3, 7],
order=[1, 3, 2],
stripes=[2, 2, 2],
offset=[0, 0, 0],
)
tensor = Tensor(shape=[6, 7, 14], dtype="int32", compression_ratio=0.5)
tensor_config = TensorConfig(
tensor=tensor,
home_region=DRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.RECOMPUTE,
stripe_configs=[stripe_config],
)
assert tensor_config.get_buffer_size() == 280
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/infra.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides infrastructure to verify the correctness of
the command stream produced.
Currently it will invoke vela to generate a vela-optimized tflite
in which the command stream is contained as a custom operator.
This class include methods to parse the custom operator to extract
the command stream and perform an equivalency check for single operator
test cases.
"""
from typing import List
import os
import struct
import numpy as np
import tflite.Model
import math
from enum import IntEnum
import tensorflow as tf
from ethosu.vela.register_command_stream_generator import CmdMode
from ethosu.vela.register_command_stream_generator import cmd0
from ethosu.vela.register_command_stream_generator import cmd1
import tvm
from tvm import relay
import tvm.relay.backend.contrib.ethosu.op as ethosu_ops
from tvm.topi.nn.utils import get_pad_tuple
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.backend.contrib.ethosu import preprocess
import tvm.relay.testing.tf as tf_testing
from tvm import WorkspaceMemoryPools, WorkspacePoolInfo, PoolInfoProperties
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.testing.aot import (
AOTCompiledTestModel,
AOTDataLinkage,
AOTTestModel,
AOTTestRunner,
compile_models,
run_and_check,
)
class AttachType(IntEnum):
kGroupRoot = 1
kInline = 2
kInlinedAlready = 3
kScope = 4
kScanUpdate = 5
def print_payload(payload):
cmds = deserialize_command_stream(payload)
for cmd_val in cmds:
cmd, val = parse_cmd(cmd_val)
s = str(cmd)
s = s.ljust(40)
s += str(val)
print(s)
def parse_cmd(binary_cmd):
code = binary_cmd[0] & 0x0000FFFF # lower 16 bits
param = binary_cmd[0] >> 16 # higher 16 bits
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
command = cmd1(code & CmdMode.CmdOpMask)
value = binary_cmd[1]
else:
command = cmd0(code & CmdMode.CmdOpMask)
value = param
return command, value
def deserialize_command_stream(blob):
assert isinstance(blob, bytes)
payload_bytes = struct.unpack("<{0}I".format(len(blob) // 4), blob)
cmms = []
# remove_header
payload_bytes = payload_bytes[8:]
idx = 0
while idx < len(payload_bytes):
cmd = []
code = payload_bytes[idx]
idx += 1
cmd.append(code)
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
value = payload_bytes[idx]
idx += 1
cmd.append(value)
cmms.append(cmd)
return cmms
def _get_workspace_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
workspace pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_WORKSPACE_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def create_test_runner(
accel="ethos-u55-256",
enable_usmp=True,
enable_cascader=False,
enable_striping=False,
workspace_pools=None,
):
file_dir = os.path.dirname(os.path.abspath(__file__))
test_root = os.path.join(file_dir, "reference_system")
_, ethosu_variant, ethosu_macs = accel.split("-")
ethosu_variant = ethosu_variant.upper()
prologue = """
uart_init();
EthosuInit();
struct ethosu_driver* ethos_u = ethosu_reserve_driver();
"""
if workspace_pools:
for pool in workspace_pools.pools:
prologue = (
prologue
+ f"""
#ifdef {_get_workspace_size_define_macro(pool.pool_name)}
__attribute__((section(".bss.noinit.tvm"), aligned(16)))
static uint8_t {pool.pool_name}[{_get_workspace_size_define_macro(pool.pool_name)}];
#endif
"""
)
return AOTTestRunner(
makefile="corstone300",
prologue=prologue,
epilogue="""
ethosu_release_driver(ethos_u);
""",
includes=["uart.h", "ethosu_55.h", "ethosu_mod.h", "hard_fault.h"],
parameters={
"ETHOSU_TEST_ROOT": test_root,
"NPU_MACS": ethosu_macs,
"NPU_VARIANT": ethosu_variant,
},
pass_config={
"relay.ext.ethos-u.options": {
"accelerator_config": accel,
"enable_cascader": enable_cascader,
"enable_striping": enable_striping,
},
"tir.usmp.enable": enable_usmp,
"tir.usmp.algorithm": "hill_climb",
"tir.disable_storage_rewrite": enable_usmp,
},
)
def build_source(
module,
inputs,
outputs,
test_runner,
output_tolerance=0,
workspace_pools=None,
):
return compile_models(
models=AOTTestModel(
module=module,
inputs=inputs,
outputs=outputs,
output_tolerance=output_tolerance,
extra_memory_in_bytes=0,
),
interface_api="c",
use_unpacked_api=True,
workspace_memory_pools=workspace_pools,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
def verify_source(models: List[AOTCompiledTestModel], test_runner):
"""
This method verifies the generated source from an NPU module by building it and running on an FVP.
"""
interface_api = "c"
run_and_check(
models,
test_runner,
interface_api,
workspace_byte_alignment=16,
data_linkage=AOTDataLinkage(section="ethosu_scratch", alignment=16),
)
class InputGenerator:
def __init__(self, random_state):
self._random_state = random_state
def generate(self, size, dtype):
if dtype == np.float32:
print("random float32")
return self._random_state.uniform(-1, 1, size).astype(dtype)
else:
print("random (u)int min=%d max=%d", np.iinfo(dtype).min, np.iinfo(dtype).max)
low = np.iinfo(dtype).min
high = np.iinfo(dtype).max + 1
return self._random_state.randint(low, high, size, dtype)
def generate_ref_data_tflite(model):
"""
This method generates reference data by running the specified model on tflite with random input data.
The random input data and generated output data are returned.
"""
expected_output_data = {}
interpreter = tf.lite.Interpreter(
model_content=model,
experimental_op_resolver_type=tf.lite.experimental.OpResolverType.BUILTIN_REF,
)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Initialize random generators with a fixed seed to get deterministic results
seed = 0
random_state = np.random.RandomState(seed)
inputgen = InputGenerator(random_state)
# Generate input data
input_data = {
input_detail["name"]: inputgen.generate(
input_detail["shape"],
input_detail["dtype"],
)
for input_detail in input_details
}
input_index = {input_detail["name"]: input_detail["index"] for input_detail in input_details}
for input_name in input_data.keys():
data = input_data[input_name]
index = input_index[input_name]
interpreter.set_tensor(index, data)
interpreter.invoke()
expected_output_data = {
output_detail["name"]: interpreter.get_tensor(output_detail["index"])
for output_detail in output_details
}
return input_data, expected_output_data
def get_tflite_model(model_url):
"""Get a TFLite model from URL."""
tflite_model_file = tf_testing.get_workload_official(model_url[0], model_url[1])
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
return tflite_model_buf
def get_tflite_graph(tf_func, shapes, ranges=None):
tensor_specs = [tf.TensorSpec(shape, dtype=tf.float32) for shape in shapes]
if not ranges:
ranges = [(0, 1) for _ in shapes]
concrete_func = tf_func.get_concrete_function(*tensor_specs)
# Convert the model
def representative_dataset():
for _ in range(100):
inputs = []
for i, shape in enumerate(shapes):
data = np.random.uniform(
low=ranges[i][0], high=ranges[i][1], size=tuple(shape)
).astype("float32")
inputs.append(data)
yield inputs
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
return mod, tflite_graph
def compare_ethosu_with_reference(
mod,
input_data,
output_data,
accel_type: str,
output_tolerance=0,
print_cmm=False,
enable_cascader=None,
):
if enable_cascader is None:
enable_cascader = "u65" not in accel_type
pool_name = "my_memory_pool"
host_target = tvm.target.Target("c")
ethosu_target = tvm.target.Target("ethos-u")
workspace_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
pool_name,
[host_target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=2400000,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
)
]
)
test_runner = create_test_runner(
accel_type,
enable_usmp=True,
enable_cascader=enable_cascader,
enable_striping=False,
workspace_pools=workspace_pools,
)
compiled_models = build_source(
mod,
input_data,
output_data,
test_runner,
workspace_pools=workspace_pools,
output_tolerance=output_tolerance,
)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
if print_cmm:
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
print_payload(cmms)
verify_source(compiled_models, test_runner)
def compare_tvm_with_tflite(
tf_func,
shapes,
accel_type,
ranges=None,
output_tolerance=0,
print_cmm=False,
enable_cascader=None,
):
mod, tflite_graph = get_tflite_graph(tf_func, shapes, ranges)
# Generate reference data
input_data, output_data = generate_ref_data_tflite(tflite_graph)
compare_ethosu_with_reference(
mod,
input_data,
output_data,
accel_type,
output_tolerance=output_tolerance,
print_cmm=print_cmm,
enable_cascader=enable_cascader,
)
class EthosUAnnotator(ExprMutator):
"""Annotate entire graph for Ethos-U offload"""
def __init__(self):
super(EthosUAnnotator, self).__init__()
self.compiler = "ethos-u"
self.last_call = True
def visit_call(self, call):
curr_last = self.last_call
self.last_call = False
params = []
for arg in call.args:
param = super().visit(arg)
if isinstance(param, relay.expr.Var):
param = compiler_begin(param, self.compiler)
params.append(param)
new_call = relay.Call(call.op, params, call.attrs)
if curr_last:
new_call = compiler_end(new_call, self.compiler)
return new_call
def visit_constant(self, constant):
new_constant = compiler_begin(constant, self.compiler)
return new_constant
def create_ethosu_partition(mod):
mod["main"] = EthosUAnnotator().visit(mod["main"])
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph()(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
def generate_weights_data(shape, dtype):
size = 1
for dim in shape:
size *= dim
return (np.arange(size) % 255).reshape(shape).astype(dtype)
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.BufferLoad) and not include_buffers:
conv_args.append(arg.indices[0])
else:
conv_args.append(arg)
return conv_args
def compute_ofm_shape(ifm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if isinstance(padding, tuple):
h = (
ifm_shape[1] - (kernel_shape[0] - 1) * dilation[0] + padding[0] + padding[2]
) // strides[0]
w = (
ifm_shape[2] - (kernel_shape[1] - 1) * dilation[1] + padding[1] + padding[3]
) // strides[1]
elif padding.lower() == "valid":
h = math.ceil((ifm_shape[1] - (kernel_shape[0] - 1) * dilation[0]) / strides[0])
w = math.ceil((ifm_shape[2] - (kernel_shape[1] - 1) * dilation[1]) / strides[1])
elif padding.lower() == "same":
h = math.ceil(ifm_shape[1] / strides[0])
w = math.ceil(ifm_shape[2] / strides[1])
ofm_shape = [ifm_shape[0], h, w, ifm_shape[3]]
return ofm_shape
def compute_padding_shape(ifm_shape, ofm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
return [0, 0, 0, 0]
if padding.lower() == "same":
effective_kernel_shape = [
dilation[0] * (kernel_shape[0] - 1) + 1,
dilation[1] * (kernel_shape[1] - 1) + 1,
]
pad_along_height = max(
(ofm_shape[1] - 1) * strides[0] + effective_kernel_shape[0] - ifm_shape[1], 0
)
pad_along_width = max(
(ofm_shape[2] - 1) * strides[1] + effective_kernel_shape[1] - ifm_shape[2], 0
)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
lut=relay.const([], dtype="int8"),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
upscale="NONE",
):
# conv params
weight_shape = (ofm_channels, kernel_shape[0], kernel_shape[1], ifm_channels)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
conv = ethosu_ops.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut=lut,
ifm_scale=0.5,
ifm_zero_point=10,
weight_zero_point=12,
ofm_scale=0.25,
ofm_zero_point=14,
kernel_shape=kernel_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale=upscale,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return conv
def make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
):
# params
weight_shape = (channels, kernel_shape[0], kernel_shape[1], 1)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
depthwise = ethosu_ops.ethosu_depthwise_conv2d(
ifm,
weight,
scale_bias,
lut=relay.const([], dtype="int8"),
ifm_scale=0.6,
ifm_zero_point=11,
weight_zero_point=13,
ofm_scale=0.26,
ofm_zero_point=15,
kernel_shape=kernel_shape,
ofm_channels=channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=15 if activation == "CLIP" else 0,
clip_max=105 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return depthwise
def get_pooling_args(call, include_buffers=False):
args = call.args
pooling_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
pooling_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.BufferLoad) and not include_buffers:
pooling_args.append(arg.indices[0])
else:
pooling_args.append(arg)
return pooling_args
def make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
upscale="NONE",
):
pooling = ethosu_ops.ethosu_pooling(
ifm,
lut=relay.const([], dtype="int8"),
pooling_type=pooling_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
pool_shape=pool_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale=upscale,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return pooling
def get_binary_elementwise_args(call, include_buffers=False):
args = call.args
binary_elementwise_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
binary_elementwise_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.BufferLoad) and not include_buffers:
binary_elementwise_args.append(arg.indices[0])
else:
binary_elementwise_args.append(arg)
return binary_elementwise_args
def make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
ofm_dtype,
reversed_operands=False,
activation="NONE",
ifm_layout="NHWC",
ifm2_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm,
ifm2=ifm2,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ifm2_scale=1,
ifm2_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ifm_channels=ifm_channels,
ifm2_channels=ifm2_channels,
reversed_operands=reversed_operands,
activation=activation,
ofm_dtype=ofm_dtype,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ifm2_layout=ifm2_layout,
ofm_layout=ofm_layout,
)
return ethosu_binary_elementwise
def make_ethosu_identity(
ifm,
lut=relay.const([], dtype="int8"),
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
activation="NONE",
):
identity = ethosu_ops.ethosu_identity(
ifm,
lut=lut,
ifm_scale=ifm_scale,
ifm_zero_point=ifm_zero_point,
ofm_scale=ofm_scale,
ofm_zero_point=ofm_zero_point,
activation=activation,
)
return identity
def make_ethosu_unary_elementwise(
ifm,
ofm_channels,
operator_type,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=ifm,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ofm_channels=ofm_channels,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return ethosu_unary_elementwise
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/reference_system/ethosu_55.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_55_H_
#define TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_55_H_
/* Define Arm(R) Ethos(TM)-U55 specific IRQs & base address */
#define ETHOSU_NPU_FAIL (1 << 4)
#define ETHOSU_IRQ ((IRQn_Type)56)
#define ETHOSU_BASE_ADDRESS ((void*)0x48102000)
#endif // TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_55_H_
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/reference_system/ethosu_mod.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_MOD_H_
#define TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_MOD_H_
#include <ARMCM55.h>
// TODO(@grant-arm): Remove device specific information once RTOS support is available
#include <ethosu_driver.h>
#include <stdio.h>
#include "ethosu_55.h"
struct ethosu_driver ethosu0_driver;
void ethosuIrqHandler0() { ethosu_irq_handler(ðosu0_driver); }
// Initialize Arm(R) Ethos(TM)-U NPU driver
int EthosuInit() {
if (ethosu_init(ðosu0_driver, (void*)ETHOSU_BASE_ADDRESS, NULL, 0, 1, 1)) {
printf("Failed to initialize NPU.\n");
return -1;
}
// Assumes SCB->VTOR points to RW memory
NVIC_SetVector(ETHOSU_IRQ, (uint32_t)ðosuIrqHandler0);
NVIC_EnableIRQ(ETHOSU_IRQ);
return 0;
}
#endif // TVM_RUNTIME_CONTRIB_ETHOS_U_ETHOSU_MOD_H_
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/reference_system/hard_fault.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOS_U_HARD_FAULT_H_
#define TVM_RUNTIME_CONTRIB_ETHOS_U_HARD_FAULT_H_
struct ExcContext {
uint32_t r0;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r12;
uint32_t lr;
uint32_t pc;
uint32_t xPsr;
};
void HardFault_Handler() {
int irq;
struct ExcContext* e;
uint32_t sp;
asm volatile(
"mrs %0, ipsr \n" // Read IPSR (Exception number)
"sub %0, #16 \n" // Get it into IRQn_Type range
"tst lr, #4 \n" // Select the stack which was in use
"ite eq \n"
"mrseq %1, msp \n"
"mrsne %1, psp \n"
"mov %2, sp \n"
: "=r"(irq), "=r"(e), "=r"(sp));
printf("Hard fault. irq=%d, pc=0x%08lu, lr=0x%08lu, xpsr=0x%08lu, sp=0x%08lu\n", irq, e->pc,
e->lr, e->xPsr, sp);
printf("%11s cfsr=0x%08lu bfar=0x%08lu\n", "", SCB->CFSR, SCB->BFAR);
printf("EXITTHESIM\n");
while (1 == 1)
;
}
#endif // TVM_RUNTIME_CONTRIB_ETHOS_U_HARD_FAULT_H_
| https://github.com/zk-ml/tachikoma |
tests/python/contrib/test_ethosu/test_attr_passing.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu import util
def test_compiler_attr():
config = {
"accelerator_config": "ethos-u55-32",
}
with tvm.transform.PassContext(opt_level=3, config={"relay.ext.ethos-u.options": config}):
with tvm.target.Target("c"):
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
accel_config_str = compiler_attrs.accelerator_config
assert accel_config_str == config["accelerator_config"]
def test_compiler_attr_default():
default_config = {
"accelerator_config": "ethos-u55-256",
}
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("c"):
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
accel_config_str = compiler_attrs.accelerator_config
assert accel_config_str == default_config["accelerator_config"]
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.