text
stringlengths 1
2.05k
|
---|
import extract_from_program
def parse_arguments():
parser = argparse.ArgumentParser(description="Train a model for image classification.")
parser.add_argument(
"--model",
type=str,
default="resnet18_v1",
choices=["resnet18_v1"],
help="Input model name.",
)
parser.add_argument(
"--start-name",
type=str,
default="nn.max_pool2d",
help="The name of the node where packing starts",
)
parser.add_argument(
"--stop-name",
type=str,
default="nn.global_avg_pool2d",
help="The name of the node where packing stops",
)
parser.add_argument(
"--debug-profile", action="store_true", help="Show layer-wise time cost profiling results"
)
parser.add_argument(
"--device", default="vta", choices=["vta", "arm_cpu"], help="Select device target"
)
parser.add_argument(
"--measurements", type=int, default=1, help="Number of measurements during AutoTVM search"
)
parser.add_argument("--tuner", type=str, default="random", help="AutoTVM search strategy")
parser.add_argument(
"--log-filename", type=str, default="resnet-18.log", help="AutoTVM log file name"
)
return parser.parse_args()
def register_vta_tuning_tasks():
from tvm.autotvm.task.topi_integration |
import TaskExtractEnv, deserialize_args
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
TaskExtractEnv()
@autotvm.task.register("topi_nn_conv2d", override=True)
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.conv2d(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
@autotvm.task.register("topi_nn_dense", override=True)
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.dense(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_dense([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
def compile_network(opt, env, target):
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
gluon_model = vision.get_model(opt.model, pretrained=True)
mod, params = relay.fron |
tend.from_mxnet(gluon_model, shape_dict)
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=opt.start_name,
stop_name=opt.stop_name,
)
return relay_prog, params
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
try_winograd=True,
):
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
n_trial_ = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial_,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial_, prefix=prefix),
autotvm.c |
allback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
if __name__ == "__main__":
opt = parse_arguments()
assert tvm.runtime.enabled("rpc")
env = vta.get_env()
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
if env.TARGET != "sim":
reconfig_start = time.time()
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
else:
remote = rpc.LocalSession()
target = env.target if opt.device == "vta" else env.target_vta_cpu
ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0)
print("Initial compile...")
relay_prog, params = compile_network(opt, env, target)
register_vta_tuning_tasks()
print("Extracting tasks...")
tasks = extract_from_program(
func=relay_prog,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=tvm.target.Target(target, host=env.target_host),
)
print("Tuning...")
tuning_opt = {
"log_filename": opt.log_filename,
"tuner": opt.tuner,
"n_trial": 1e9,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=vta.vta_autotvm_build_func),
runner=autotvm.RPCRunner(
env.TARGET,
tracker_host,
tracker_port, |
number=4,
min_repeat_ms=150,
repeat=opt.measurements,
timeout=60,
),
),
}
tune_tasks(tasks, **tuning_opt)
with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
print("Compiling network with best tuning parameters...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
temp = utils.tempdir()
lib.save(temp.relpath("graphlib.o"))
remote.upload(temp.relpath("graphlib.o"))
lib = remote.load_module("graphlib.o")
if opt.debug_profile:
m = debug_executor.create(graph, lib, ctx)
else:
m = graph_executor.create(graph, lib, ctx)
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input(**params)
m.set_input("data", image)
timer = m.module.time_evaluator("run", ctx, number=4, repeat=opt.measurements)
tcost = timer()
prof_res = np.array(tcost.results) * 1000
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
if opt.debug_profile:
m.run() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, os
import tvm
from tvm import rpc
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
host = os.environ.get("VTA_RPC_HOST", "de10nano")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
def program_rpc_bitstream(path=None):
"""Program the FPGA on the RPC server
Parameters
----------
path : path to bitstream (optional)
"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
program_fpga(remote, path)
def reconfig_rpc_runtime():
"""Reconfig the RPC server runtime"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
reconfig_runtime(remote)
bitstream = sys.argv[1] if len(sys.argv) == 2 else None
program_rpc_bitstream(bitstream)
reconfig_rpc_runtime()
|
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np
from tvm.contrib |
import utils |
import vta.testing
from vta.testing |
import simulator
def test_gemm():
def run_gemm_packed(env, remote, batch_size, channel, block):
data_shape = (batch_size
weight_shape = (
channel
channel
env.BLOCK_OUT,
env.BLOCK_IN,
)
res_shape = (batch_size
num_ops = 2 * channel * channel * batch_size
ko = te.reduce_axis((0, channel
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
weight = te.placeholder(weight_shape, name="weight", dtype=env.wgt_dtype)
data_buf = te.compute(data_shape, lambda *i: data(*i), "data_buf")
weight_buf = te.compute(weight_shape, lambda *i: weight(*i), "weight_buf")
res_gem = te.compute(
res_shape,
lambda bo, co, bi, ci: te.sum(
data_buf[bo, ko, bi, ki].astype(env.acc_dtype)
* weight_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="res_gem",
)
res_shf = te.compute(res_shape, lambda *i: res_gem(*i) >> 8, name="res_shf")
res_max = te.compute(res_shape, lambda *i: tvm.te.max(res_shf(*i), 0), "res_max")
res_min = te.compute(
res_shape, lambda *i: tvm.te.min(res_max(*i), (1 << (env.INP_WIDTH - 1)) - 1), "res_min"
)
res = te.compute(res_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
def verify(s):
mod = vta.build(
s,
[data, weight, res],
tvm.target.Target("ext_dev", host=env.target_host),
name="gemm",
)
temp = utils.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
dev = remote.ext_dev(0)
data_orig = np.random.randint(-128, 128, size=(batch_size, channel)).astype( |
data.dtype)
weight_orig = np.random.randint(-128, 128, size=(channel, channel)).astype(weight.dtype)
data_packed = data_orig.reshape(
batch_size
).transpose((0, 2, 1, 3))
weight_packed = weight_orig.reshape(
channel
).transpose((0, 2, 1, 3))
res_np = np.zeros(res_shape).astype(res.dtype)
data_arr = tvm.nd.array(data_packed, dev)
weight_arr = tvm.nd.array(weight_packed, dev)
res_arr = tvm.nd.array(res_np, dev)
res_ref = np.zeros(res_shape).astype(env.acc_dtype)
for b in range(batch_size
for i in range(channel
for j in range(channel
res_ref[b, i, :] += np.dot(
data_packed[b, j, :].astype(env.acc_dtype),
weight_packed[i, j].T.astype(env.acc_dtype),
)
res_ref = np.right_shift(res_ref, 8)
res_ref = np.clip(res_ref, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(res.dtype)
time_f = f.time_evaluator("gemm", dev, number=20)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
cost = time_f(data_arr, weight_arr, res_arr)
if env.TARGET in ["sim", "tsim"]:
stats = simulator.stats()
print("Execution statistics:")
for k, v in stats.items():
print("\t{:<16}: {:>16}".format(k, v))
res_unpack = res_arr.numpy().reshape(
batch_size
)
return cost
def run_schedule(load_inp, load_wgt, gemm, alu, store_out, print_ir):
s = te.create_schedule(res.op)
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gem].set_scope(env.acc_scope)
s[res_shf].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_ |
max].set_scope(env.acc_scope)
if block:
bblock = block
iblock = block
oblock = block
xbo, xco, xbi, xci = s[res].op.axis
xb1, xco1, xb2, xco2 = s[res].tile(xbo, xco, bblock, oblock)
store_pt = xb2
s[res_gem].compute_at(s[res], xco1)
s[res_shf].compute_at(s[res], xco1)
s[res_min].compute_at(s[res], xco1)
s[res_max].compute_at(s[res], xco1)
xbo, xco, xbi, xci = s[res_gem].op.axis
ko1, ko2 = s[res_gem].split(ko, iblock)
s[res_gem].reorder(ko1, ko2, xbo, xco, xbi, xci, ki)
s[data_buf].compute_at(s[res_gem], ko1)
s[weight_buf].compute_at(s[res_gem], ko1)
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(store_pt, store_out)
else:
xbo, xco, xbi, xci = s[res_gem].op.axis
s[res_gem].reorder(ko, xbo, xco, xbi, xci, ki)
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(s[res].op.axis[0], store_out)
if print_ir:
print(tvm.lower(s, [data, weight, res], simple_mode=True))
return verify(s)
def gemm_normal(print_ir):
mock |
= env.mock
print("----- GEMM GOPS End-to-End Test-------")
def run_test(header, print_ir):
cost = run_schedule(
env.dma_copy,
env.dma_copy,
env.gemm,
env.alu,
env.dma_copy,
print_ir,
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
def gemm_unittest(print_ir):
mock = env.mock
print("----- GEMM Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, env.gemm, mock.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
def alu_unittest(print_ir):
mock = env.mock
print("----- ALU Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, env.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_inp_unittest(print_ir):
mock = env.mock
print("----- LoadInp Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
env.dma_copy, mock.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ |
ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (batch_size * channel * env.INP_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_wgt_unittest(print_ir):
mock = env.mock
print("----- LoadWgt Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, env.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (channel * channel * env.WGT_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def store_out_unittest(print_ir):
mock = env.mock
print("----- StoreOut Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, mock.alu, env.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (batch_size * channel * env.OUT_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
gemm_normal(False) |
gemm_unittest(False)
alu_unittest(False)
def _run(env, remote):
print("========GEMM 128=========")
run_gemm_packed(env, remote, 128, 128, 128)
vta.testing.run(_run)
if __name__ == "__main__":
test_gemm() |
"""Testing topi conv2d operator for VTA""" |
import json |
import os |
import pytest |
import numpy as np
from collections |
import namedtuple |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm |
import autotvm
from tvm.contrib |
import utils
from tvm.contrib.pickle_memoize |
import memoize
from tvm |
import topi |
import tvm.topi.testing |
import vta
from vta |
import program_fpga, reconfig_runtime |
import vta.testing
from vta.testing |
import simulator
Workload = namedtuple(
"Conv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
env = vta.get_env()
resnet_wkls = [
("resnet-18.C2", Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
("resnet-18.C3", Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
("resnet-18.C4", Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
("resnet-18.C5", Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
("resnet-18.C6", Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
("resnet-18.C7", Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
("resnet-18.C8", Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
("resnet-18.C9", Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
("resnet-18.C10", Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
("resnet-18.C11", Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):
assert wl.hpad == wl.wpad
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
conv2d_fcompute = topi.arm_cpu.conv2d_nchw_spatial_pack
conv2d_fschedule = topi.arm_cpu.schedule_conv2d_nchw_spatial_pack
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
conv2d_fcompute = vta.top.conv2d_p |
acked
conv2d_fschedule = vta.top.schedule_conv2d_packed
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, wl.in_filter, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (
wl.batch
wl.in_filter
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter
wl.in_filter
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
bias_shape = (
wl.batch
wl.out_filter
1,
1,
env.BATCH,
env.BLOCK_OUT,
)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
with target:
if data_pack:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), layout, env.acc_dtype
)
else:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), env.acc_dtype
)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
s = conv2d_fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel)
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel)
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl. |
hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
def get_ref_data():
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (
env.INP_WIDTH + env.WGT_WIDTH - 2
)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch
env.BATCH,
wl.in_filter
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter
env.BLOCK_OUT,
wl.in_filter
env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch
)
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
else:
mod = tvm.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
temp = uti |
ls.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
bias_arr = tvm.nd.array(bias_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d", dev, number=samples)
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if co |
rrect else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim", "intelfocl"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target):
for _, wl in resnet_wkls:
print(wl)
run_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta") |
"""Testing topi conv2d_transpose operator for VTA""" |
import json |
import os |
import pytest |
import numpy as np
from collections |
import namedtuple |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm |
import autotvm
from tvm.contrib |
import utils
from tvm.contrib.pickle_memoize |
import memoize
from tvm |
import topi |
import tvm.topi.testing |
import vta
from vta |
import program_fpga, reconfig_runtime |
import vta.testing
from vta.testing |
import simulator
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
env = vta.get_env()
dcgan_wklds = [
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def _find_factors(n):
factors = []
for f in range(1, n + 1):
if n % f == 0:
factors.append(f)
return factors
def run_conv2d_transpose(
env, remote, wl, target, check_correctness=True, print_ir=False, samples=4
):
assert wl.hpad == wl.wpad
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.arm_cpu.conv2d_transpose_nchw
fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.conv2d_transpose_packed
fschedule = vta.top.schedule_conv2d_transpose_packed
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
if data_pack:
data_shape = (
wl.batch
wl.in_filter
wl.height,
wl.width,
env.BATCH, |
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter
wl.in_filter
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
else:
data_shape = a_shape
kernel_shape = w_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad
fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
def get_ref_data():
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(
w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
).astype(kernel.dtype)
r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
(wl.o_hpad, wl.o_wpad),
).astype(env.acc_dtype)
return a_np, w_np, r_np |
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch
env.BATCH,
wl.in_filter
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.in_filter
env.BLOCK_IN,
wl.out_filter
env.BLOCK_OUT,
wl.hkernel,
wl.wkernel,
).transpose((2, 0, 4, 5, 3, 1))
kernel_np = np.flip(kernel_np, 2)
kernel_np = np.flip(kernel_np, 3)
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d_transpose.o"))
remote.upload(temp.relpath("conv2d_transpose.o"))
f = remote.load_module("conv2d_transpose.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d_transpose", dev, number=samples)
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr) |
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
res_ref = res_ref >> env.WGT_WIDTH
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d_transpose(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target):
for _, wl in dcgan_wklds:
print(wl)
run_conv2d_transpose(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d_transpose(device="arm_cpu")
test_conv2d_transpose(device="vta") |
"""Testing topi gemm operator for VTA""" |
import os |
import json
from collections |
import namedtuple |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm.contrib |
import utils
from tvm.contrib.pickle_memoize |
import memoize
from tvm |
import topi |
import tvm.topi.testing |
import vta
from vta |
import program_fpga, reconfig_runtime |
import vta.testing
from vta.testing |
import simulator
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_gemm(
env,
remote,
target,
batch_size,
in_feat,
out_feat,
check_correctness=True,
print_ir=True,
samples=4,
):
if "arm_cpu" in target.keys:
data_pack = False
elif "vta" in target.keys:
data_pack = True
a_shape = (batch_size, in_feat)
w_shape = (out_feat, in_feat)
if data_pack:
data_shape = (batch_size
kernel_shape = (
out_feat
in_feat
env.BLOCK_OUT,
env.BLOCK_IN,
)
fcompute = vta.top.dense_packed
fschedule = vta.top.schedule_dense_packed
else:
data_shape = a_shape
kernel_shape = w_shape
fcompute = topi.x86.dense_nopack
fschedule = topi.x86.schedule_dense_nopack
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with target:
res = fcompute(data, kernel, None, env.acc_dtype)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
num_ops = 2 * batch_size * in_feat * out_feat
def get_ref_data():
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).ast |
ype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
r_np = np.dot(a_np.astype(env.acc_dtype), w_np.T.astype(env.acc_dtype)).astype(
env.acc_dtype
)
return a_np, w_np, r_np
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
batch_size
).transpose((0, 2, 1, 3))
kernel_np = kernel_np.reshape(
out_feat
).transpose((0, 2, 1, 3))
if "vta" in target.keys:
mod = vta.build(
s,
[data, kernel, res],
target=tvm.target.Target(target, host=env.target_host),
name="dense",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=tvm.target.Target(target, host=env.target_host),
name="dense",
)
temp = utils.tempdir()
mod.save(temp.relpath("dense.o"))
remote.upload(temp.relpath("dense.o"))
f = remote.load_module("dense.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("dense", dev, number=samples)
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else: |
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.reshape(batch_size, out_feat)
res_ref = res_ref >> 8
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s DENSE TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
def test_gemm(device="vta", batch=128, in_feat=128, out_feat=128):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target):
run_gemm(env, remote, target, batch, in_feat, out_feat)
vta.testing.run(_run)
if __name__ == "__main__":
test_gemm("vta", 16, 512, 1008) |
"""Testing topi group conv2d operator for VTA""" |
import json |
import os |
import pytest |
import numpy as np
from collections |
import namedtuple |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm |
import autotvm
from tvm.contrib |
import utils
from tvm |
import topi |
import tvm.topi.testing |
import vta
from vta |
import program_fpga, reconfig_runtime |
import vta.testing
from vta.testing |
import simulator
Workload = namedtuple(
"GroupConv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"groups",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
env = vta.get_env()
mobilenet_wkls = [
("mobilenet.D1", Workload(env.BATCH, 112, 112, 32, 32, 2, 3, 3, 1, 1, 1, 1)),
("mobilenet.D2", Workload(env.BATCH, 112, 112, 64, 64, 4, 3, 3, 1, 1, 2, 2)),
("mobilenet.D3", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 1, 1)),
("mobilenet.D4", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 2, 2)),
("mobilenet.D5", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 1, 1)),
("mobilenet.D6", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 2, 2)),
("mobilenet.D7", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 1, 1)),
("mobilenet.D8", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 2, 2)),
("mobilenet.D9", Workload(env.BATCH, 7, 7, 1024, 1024, 64, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_group_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):
assert wl.hpad == wl.wpad
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.nn.group_conv2d_nchw
fschedule = topi.generic.schedule_group_conv2d_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.group_conv2d_packed
fschedule = vta.top.schedule_group_conv2d |
_packed
CI_G = wl.in_filter
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, CI_G, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (
wl.batch
wl.in_filter
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter
CI_G
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
bias_shape = (
wl.batch
wl.out_filter
1,
1,
env.BATCH,
env.BLOCK_OUT,
)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), wl.groups, env.acc_dtype
)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel)
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel)
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
def get_ref_data():
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (e |
nv.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (
env.INP_WIDTH + env.WGT_WIDTH - 2
)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
wl.groups,
).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch
env.BATCH,
wl.in_filter
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter
env.BLOCK_OUT,
CI_G
env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch
)
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
else:
mod = tvm.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(r |
es.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
bias_arr = tvm.nd.array(bias_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d", dev, number=samples)
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print(
"%s GROUP CONV2D TEST %s: Time cost = %g sec/op, %g GOPS"
% (devic |
e, status, cost.mean, gops)
)
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target):
for _, wl in mobilenet_wkls:
print(wl)
run_group_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.