text
stringlengths 1
2.05k
|
---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm import rpc
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
host = os.environ.get("VTA_RPC_HOST", "pynq")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
def program_rpc_bitstream(path=None):
"""Program the FPGA on the RPC server
Parameters
----------
path : path to bitstream (optional)
"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
program_fpga(remote, path)
def reconfig_rpc_runtime():
"""Reconfig the RPC server runtime"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
reconfig_runtime(remote)
program_rpc_bitstream()
reconfig_rpc_runtime()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import vta
def test_env():
env = vta.get_env()
mock = env.mock
assert mock.alu == "skip_alu"
def test_env_scope():
env = vta.get_env()
cfg = env.cfg_dict
cfg["TARGET"] = "xyz"
with vta.Environment(cfg):
assert vta.get_env().TARGET == "xyz"
assert vta.get_env().TARGET == env.TARGET
if __name__ == "__main__":
test_env()
test_env_scope()
|
"""Unit test VTA's instructions """ |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import topi
from tvm.contrib |
import utils |
import vta |
import vta.testing
from vta.testing |
import simulator
np.random.seed(0xDEADB)
def test_save_load_out():
"""Test save/store output command"""
def _run(env, remote):
n = 6
x = te.placeholder((n, n, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x(*i), "x_buf")
y_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x_buf(*i) >> 0, "y_buf")
y = te.compute(
(n, n, env.BATCH, env.BLOCK_OUT), lambda *i: y_buf(*i).astype(env.inp_dtype), "y"
)
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
with vta.build_config():
m = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
m.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = x_np.astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Save load execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_padded_load():
"""Test padded load."""
def _run(env, remote):
def check_padded_load(pad_before, pad_after, test_name=None): |
n = 3
m = 5
x = te.placeholder((n, m, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = topi.nn.pad(x, pad_before, pad_after, name="y")
y_buf = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: x_buf(*i) >> 0,
"y_buf",
)
y = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: y_buf(*i).astype(env.inp_dtype),
"y",
)
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
with vta.build_config():
mod = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("padded_load.o"))
remote.upload(temp.relpath("padded_load.o"))
f = remote.load_module("padded_load.o")
dev = remote.ext_dev(0)
x_np = np.random.randint(0, 10, size=(n, m, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = np.zeros(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
)
).astype(y.dtype)
y_np[pad_before[0] : pad_befor |
e[0] + n, pad_before[1] : pad_before[1] + m, :] = x_np
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Padded {} load execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_padded_load([2, 0, 0, 0], [0, 0, 0, 0], test_name="Y0")
check_padded_load([0, 2, 0, 0], [0, 0, 0, 0], test_name="Y1")
check_padded_load([0, 0, 0, 0], [2, 0, 0, 0], test_name="X0")
check_padded_load([0, 0, 0, 0], [0, 2, 0, 0], test_name="X1")
check_padded_load([1, 1, 0, 0], [1, 1, 0, 0], test_name="all")
vta.testing.run(_run)
def test_gemm():
"""Test GEMM."""
def _run(env, remote):
o = 4
n = 1
m = 4
x = te.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="x", dtype=env.inp_dtype)
w = te.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="w", dtype=env.wgt_dtype)
x_buf = te.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: x(*i), "x_buf")
w_buf = te.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: w(*i), "w_buf")
ko = te.reduce_axis((0, n), name="ko")
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
y_gem = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci: te.sum(
x_buf[bo, ko, bi, ki].astype(env.acc_dtype)
* w_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="y_gem",
)
y_shf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_gem(*i) >> 8, name="y_shf"
)
y_max |
= te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(y_shf(*i), 0), "y_max"
)
y_min = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(y_max(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"y_min",
)
y = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_min(*i).astype(env.inp_dtype), name="y"
)
if not remote:
return
def verify(s, name=None):
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(s, [x, w, y], tvm.target.Target("ext_dev", host=env.target_host))
temp = utils.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
dev = remote.ext_dev(0)
x_np = np.random.randint(-128, 128, size=(o, n, env.BATCH, env.BLOCK_IN)).astype(
x.dtype
)
w_np = np.random.randint(-128, 128, size=(m, n, env.BLOCK_OUT, env.BLOCK_IN)).astype(
w.dtype
)
y_np = np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
w_nd = tvm.nd.array(w_np, dev)
y_nd = tvm.nd.array(y_np, dev)
y_np = y_np.astype(env.acc_dtype)
for b in range(o):
for i in range(m):
for j in range(n):
y_np[b, i, :] += np.dot(
x_np[b, j, :].astype(env.acc_dtype), w_np[i, j].T.astype(env.acc_dtype)
)
y_np = np.right_shift(y_np, 8)
y_np = np.clip(y_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(y.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, w_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if |
env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("GEMM schedule:{} execution statistics:".format(name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
def test_schedule1():
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[y].pragma(s[y].op.axis[0], env.dma_copy)
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
verify(s, name="default")
def test_smt():
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
abo, aco, abi, aci = s[y].op.axis
abo1, abo2 = s[y].split(abo, nparts=2)
s[y].bind(abo1, te.thread_axis("cthread"))
s[y_gem].compute_at(s[y], abo1)
s[y_shf].compute_at(s[y], abo1)
s |
[y_max].compute_at(s[y], abo1)
s[y_min].compute_at(s[y], abo1)
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y].pragma(abo2, env.dma_copy)
verify(s, name="smt")
test_schedule1()
test_smt()
vta.testing.run(_run)
def test_alu():
def _run(env, remote):
def check_alu(tvm_op, np_op=None, use_imm=False, test_name=None):
"""Test ALU"""
m = 8
n = 8
imm = np.random.randint(1, 5)
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
)
if use_imm:
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm_op(a_buf(*i), imm), "res_buf"
)
else:
b = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="b", dtype=env.acc_dtype)
b_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: b(*i), "b_buf"
)
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm_op(a_buf(*i), b_buf(*i)),
"res_buf",
)
res = te.compute( |
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: res_buf(*i).astype(env.inp_dtype),
"res",
)
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope)
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy)
s[res_buf].set_scope(env.acc_scope)
s[res_buf].pragma(res_buf.op.axis[0], env.alu)
s[res].pragma(res.op.axis[0], env.dma_copy)
if not use_imm:
s[b_buf].set_scope(env.acc_scope)
s[b_buf].pragma(b_buf.op.axis[0], env.dma_copy)
if not remote:
return
with vta.build_config():
if use_imm:
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
else:
mod = vta.build(
s, [a, b, res], tvm.target.Target("ext_dev", host=env.target_host)
)
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
dev = remote.ext_dev(0)
a_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
if use_imm:
res_np = np_op(a_np, imm) if np_op else tvm_op(a_np, imm)
else:
b_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(
b.dtype
)
res_np = np_op(a_np, b_np) if np_op else tvm_op(a_np, b_np)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
if use_imm:
f(a_nd, res_nd)
else: |
b_nd = tvm.nd.array(b_np, dev)
f(a_nd, b_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("ALU {} execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_alu(lambda x, y: x << y, np.left_shift, use_imm=True, test_name="SHL")
check_alu(tvm.te.max, np.maximum, use_imm=True, test_name="MAX")
check_alu(tvm.te.max, np.maximum, test_name="MAX")
check_alu(lambda x, y: x + y, use_imm=True, test_name="ADD")
check_alu(lambda x, y: x + y, test_name="ADD")
check_alu(lambda x, y: x >> y, np.right_shift, use_imm=True, test_name="SHR")
vta.testing.run(_run)
def test_relu():
"""Test RELU on ALU"""
def _run(env, remote):
m = 8
n = 10
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
)
max_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(a_buf(*i), 0), "res_buf"
)
min_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(max_buf(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"max_buf",
)
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: min_buf(*i).astype(env.inp_dtype),
"min_buf",
)
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope)
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy)
s[max_buf].set_scope(env.acc_scope)
s[min_buf].set_scope(env.acc_scope)
s[max_buf].pragma(max_buf.op.axis[0], env.alu)
s[min_buf].pragma(min_buf.op.axis[0], env.alu)
s[res |
].pragma(res.op.axis[0], env.dma_copy)
with vta.build_config():
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
dev = remote.ext_dev(0)
a_np = np.random.randint(-256, 256, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.clip(a_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Relu execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_shift_and_scale():
"""Test shift and scale on ALU"""
def _run(env, remote):
m = 2
n = 8
imm_shift = np.random.randint(0, 8)
imm_scale = np.random.randint(1, 5)
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
)
res_shift = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a_buf(*i) + imm_shift, "res_shift"
)
res_scale = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_shift(*i) >> imm_scale, "res_scale"
)
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_scale(*i).astype(env.inp_dtype), "res"
)
s = te.create_schedule( |
res.op)
s[a_buf].set_scope(env.acc_scope)
s[res_shift].set_scope(env.acc_scope)
s[res_scale].set_scope(env.acc_scope)
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy)
s[res_shift].pragma(res_shift.op.axis[0], env.alu)
s[res_scale].pragma(res_scale.op.axis[0], env.alu)
s[res].pragma(res.op.axis[0], env.dma_copy)
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
dev = remote.ext_dev(0)
a_np = np.random.randint(-10, 10, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.right_shift((a_np + imm_shift), imm_scale)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Shift and scale execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_runtime_array():
def _run(env, remote):
n = 100
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype("int8")
x_nd = tvm.nd.array(x_np, dev)
np.testing.assert_equal(x_np, x_nd.numpy())
vta.testing.run(_run)
if __name__ == "__main__":
test_runtime_array()
test_save_load_out()
test_padded_load()
test_gemm()
test_alu()
test_relu()
test_shift_and_scale() |
"""
Auto-tuning a ALU fused op on VTA
---------------------------------
""" |
import os
from mxnet.gluon.model_zoo |
import vision |
import numpy as np
from PIL |
import Image
from tvm |
import topi |
import tvm
from tvm |
import te
from tvm |
import rpc, autotvm, relay
from tvm.contrib |
import download
from tvm.autotvm.measure.measure_methods |
import request_remote
from tvm.autotvm.tuner |
import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm |
import record |
import vta
from vta.testing |
import simulator
from vta.top |
import graph_pack |
import copy
def compile_network(env, target, model, start_pack, stop_pack):
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
tracker_host = os.environ.get("TVM_TRACKER_HOST", "0.0.0.0")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
env = vta.get_env()
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
network = "resnet50_v2"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
log_file = "%s.alu.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(n_parallel=1),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
),
),
}
def log_to_file(file_out, protocol="json"):
"""Log the tuning records into file.
The rows of the log are stored in the format of autotvm.record.encode.
for lhs == rhs, we add an extra rhs = [] record
Parameters
----------
file_out : str
The file to |
log to.
protocol: str, optional
The log protocol. Can be 'json' or 'pickle'
Returns
-------
callback : callable
Callback function to do the logging.
"""
def _callback(_, inputs, results):
with open(file_out, "a") as f:
for inp, result in zip(inputs, results):
f.write(record.encode(inp, result, protocol) + "\n")
if inp.task.args[0] == inp.task.args[1]:
args = list(inp.task.args)
args[1] = (args[0][0], (), args[0][2])
inp_copy = copy.deepcopy(inp)
inp_copy.task.args = tuple(args)
f.write(record.encode(inp_copy, result, protocol) + "\n")
return _callback
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=10,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_op |
tion=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
def register_vta_tuning_tasks():
from tvm.autotvm.task |
import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
TaskExtractEnv()
@autotvm.template("add.vta")
def _topi_add(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.add_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_add_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
@autotvm.template("multiply.vta")
def _topi_multiply(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.multiply_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_multiply_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
def tune_and_evaluate(tuning_opt):
if env.TARGET != "intelfocl":
print("ALU only op only available for intelfocl target")
return
register_vta_tuning_tasks()
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params, |
ops=(
relay.op.get("add"),
relay.op.get("multiply"),
),
target=tvm.target.Target(target, host=env.target_host),
)
tasks = list(filter(lambda t: len(t.args[0][1]) > 4, tasks))
tasks = list(filter(lambda t: t.args[0][2] != "float32", tasks))
tasks_set = {}
print("Extracted {} alu tasks:".format(len(tasks)))
for tsk in tasks:
print("tsk = ", tsk)
if len(tsk.args[1][1]) == 0:
args = list(tsk.args)
args[1] = args[0]
tsk.args = tuple(args)
if (tsk.name, tsk.args) in tasks_set:
print("task {} already exists".format(tsk))
tasks_set[(tsk.name, tsk.args)] = tsk
tasks = list(tasks_set.values())
print("After merged, final
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
tune_and_evaluate(tuning_option) |
"""
Auto-tuning a convolutional network on VTA
==========================================
**Author**: `Lianmin Zheng <https:
Auto-tuning for a specific accelerator design is critical for getting the best
performance for any given operator. This is a tutorial showcases how to tune a
whole convolutional network on VTA.
The operator implementation for VTA in TVM is written in template form.
The template has many tunable knobs (tile factor, virtual threads, etc).
We will tune all convolution operators in the neural network. After tuning,
we produce a log file which stores the best schedule parameters for all tuned
operators. When the TVM compiler compiles these operators, it will query this
log file to get the best knob parameters.
""" |
import os
from mxnet.gluon.model_zoo |
import vision |
import numpy as np
from PIL |
import Image
from tvm |
import topi |
import tvm
from tvm |
import te
from tvm |
import rpc, autotvm, relay
from tvm.contrib |
import graph_executor, utils, download
from tvm.autotvm.measure.measure_methods |
import request_remote
from tvm.autotvm.tuner |
import XGBTuner, GATuner, RandomTuner, GridSearchTuner |
import vta
from vta.testing |
import simulator
from vta.top |
import graph_pack
def compile_network(env, target, model, start_pack, stop_pack):
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
env = vta.get_env()
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
network = "resnet18_v1"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
log_file = "%s.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
module_loader=vta.module_loader(),
),
),
}
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_tra |
nsfer_learning=True,
):
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
if tuner == "xgb" or tuner == "xgb-rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
def register_vta_tuning_tasks():
from tvm.autotvm.task |
import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
TaskExtractEnv()
@autotvm.template("conv2d_packed.vta")
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, W = args[:2]
with tvm.target.vta():
res = vta.top.conv2d_packed(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.schedule_conv2d_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
def tune_and_evaluate(tuning_opt):
register_vta_tuning_tasks()
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=target,
target_host=env.target_host,
)
tasks = list(filter(lambda t: len(t.args[0][1]) > 4 and "conv" in t.name, tasks))
assert len(tasks) == 10
print("Extracted {} conv2d tasks:".format(len(tasks)))
for tsk in tasks:
inp = tsk.args[0][1]
wgt = tsk.args[1][1]
batch = inp[0] * inp[4]
in_filter = inp[1] * inp[5]
out_filter = wgt[0] * wgt[4]
height, width = inp[2], inp[3]
hkernel, wkernel = wgt[2], wgt[3]
hstride, wstride = tsk.args[2][0], tsk.args[2][1] |
hpad, wpad = tsk.args[3][0], tsk.args[3][1]
print(
"({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
batch,
height,
width,
in_filter,
out_filter,
hkernel,
wkernel,
hpad,
wpad,
hstride,
wstride,
)
)
return
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
if env.TARGET != "sim":
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, tracker_port, timeout=10000
)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
else:
remote = rpc.LocalSession()
with autotvm.tophub.context(target, extra_files=[log_file]):
print("Compile...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
print("Upload...")
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
m = graph_executor.GraphModule(lib["default"](ctx))
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input("data", image)
print("Evaluate inference time cost...")
timer = m.module.time_evaluator |
("run", ctx, number=1, repeat=10)
tcost = timer()
prof_res = np.array(tcost.results) * 1000
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
tune_and_evaluate(tuning_option) |
"""
Deploy Pretrained Vision Model from MxNet on VTA
================================================
**Author**: `Thierry Moreau <https:
This tutorial provides an end-to-end demo, on how to run ImageNet classification
inference onto the VTA accelerator design to perform ImageNet classification tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
from __future__ |
import absolute_import, print_function |
import argparse, json, os, requests, sys, time
from io |
import BytesIO
from os.path |
import join, isfile
from PIL |
import Image
from mxnet.gluon.model_zoo |
import vision |
import numpy as np
from matplotlib |
import pyplot as plt |
import tvm
from tvm |
import te
from tvm |
import rpc, autotvm, relay
from tvm.contrib |
import graph_executor, utils, download
from tvm.contrib.debugger |
import debug_executor
from tvm.relay |
import transform |
import vta
from vta.testing |
import simulator
from vta.top |
import graph_pack
assert tvm.runtime.enabled("rpc")
env = vta.get_env()
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
pack_dict = {
"resnet18_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet18_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet50_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet101_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
}
model = "resnet18_v1"
assert model in pack_dict
if env.TARGET not in ["sim", "tsim", "intelfocl"]:
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
device_host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
else:
remote = rpc.LocalSession()
if env.TARGET in ["intelfocl"]:
vta.program_fpga(remote, bitstream="vta.bitstream")
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
with autotvm.tophub.context(target):
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
gluon_model = vision.get_model(model, pretrained=True)
build_start = time.time()
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
shape_dict.update({k: v.shape for k, v in params.items()}) |
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
if target.device_name == "vta":
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1],
device_annot=(env.TARGET == "intelfocl"),
)
else:
relay_prog = mod["main"]
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
else:
if env.TARGET == "intelfocl":
target = {"cpu": env.target_vta_cpu, "ext_dev": target}
with vta.build_config(
opt_level=3, disabled_pass={"AlterOpLayout", "tir.CommonSubexprElimTIR"}
):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
build_time = time.time() - build_start
print(model + " inference graph built in {0:.2f}s!".format(build_time))
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
if env.TARGET == "intelfocl":
ctxes = [remote.ext_dev(0), remote.cpu(0)]
m = graph_executor.create(graph, lib, ctxes)
else:
m = graph_executor.create(graph, lib, ctx)
categ_url = "https:
categ_fn = "synset.txt"
download.download(joi |
n(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
image_url = "https:
image_fn = "cat.png"
download.download(image_url, image_fn)
image = Image.open(image_fn).resize((224, 224))
plt.imshow(image)
plt.show()
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
image = np.repeat(image, env.BATCH, axis=0)
m.set_input(**params)
m.set_input("data", image)
num = 4
rep = 3
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean / env.BATCH))
tvm_output = m.get_output(0, tvm.nd.empty((env.BATCH, 1000), "float32", remote.cpu(0)))
for b in range(env.BATCH):
top_categories = np.argsort(tvm_output.numpy()[b])
print("\n{} prediction for sample {}".format(model, b))
print("\t
print("\t
print("\t
print("\t
print("\t
cat_detected = False
for k in top_categories[-5:]:
if "cat" in synset[k]:
cat_detected = True
assert cat_detected |
"""
Deploy Pretrained Vision Detection Model from Darknet on VTA
============================================================
**Author**: `Hua Jiang <https:
This tutorial provides an end-to-end demo, on how to run Darknet YoloV3-tiny
inference onto the VTA accelerator design to perform Image detection tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
from __future__ |
import absolute_import, print_function |
import sys |
import os |
import time |
import matplotlib.pyplot as plt |
import numpy as np |
import tvm |
import vta
from tvm |
import rpc, autotvm, relay
from tvm.relay.testing |
import yolo_detection, darknet
from tvm.relay.testing.darknet |
import __darknetffi__
from tvm.contrib |
import graph_executor, utils
from tvm.contrib.download |
import download_testdata
from vta.testing |
import simulator
from vta.top |
import graph_pack
assert tvm.runtime.enabled("rpc")
MODEL_NAME = "yolov3-tiny"
REPO_URL = "https:
cfg_path = download_testdata(
"https:
MODEL_NAME + ".cfg",
module="darknet",
)
weights_path = download_testdata(
"https:
MODEL_NAME + ".weights",
module="darknet",
)
if sys.platform in ["linux", "linux2"]:
darknet_lib_path = download_testdata(
REPO_URL + "lib/" + "libdarknet2.0.so" + "?raw=true", "libdarknet2.0.so", module="darknet"
)
elif sys.platform == "darwin":
darknet_lib_path = download_testdata(
REPO_URL + "lib_osx/" + "libdarknet_mac2.0.so" + "?raw=true",
"libdarknet_mac2.0.so",
module="darknet",
)
else:
raise NotImplementedError("Darknet lib is not supported on {} platform".format(sys.platform))
coco_path = download_testdata(
REPO_URL + "data/" + "coco.names" + "?raw=true", "coco.names", module="data"
)
font_path = download_testdata(
REPO_URL + "data/" + "arial.ttf" + "?raw=true", "arial.ttf", module="data"
)
with open(coco_path) as f:
content = f.readlines()
names = [x.strip() for x in content]
env = vta.get_env()
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
pack_dict = {
"yolov3-tiny": ["nn.max_pool2d", "cast", 4, 186],
}
assert MODEL_NAME in pack_dict
if env.TARGET not in ["sim", "tsim"]:
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
device_host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = tim |
e.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
else:
remote = rpc.LocalSession()
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
with autotvm.tophub.context(target):
net = __darknetffi__.dlopen(darknet_lib_path).load_network(
cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0
)
dshape = (env.BATCH, net.c, net.h, net.w)
dtype = "float32"
build_start = time.time()
mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=dshape)
if target.device_name == "vta":
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
global_scale=23.0,
skip_conv_layers=[0],
store_lowbit_output=True,
round_for_shift=True,
):
mod = relay.quantize.quantize(mod, params=params)
mod = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[MODEL_NAME][0],
stop_name=pack_dict[MODEL_NAME][1],
start_name_idx=pack_dict[MODEL_NAME][2],
stop_name_idx=pack_dict[MODEL_NAME][3],
)
else:
mod = mod["main"]
with vta.build_config(disabled_pass={"AlterOpLayout", "tir.CommonSubexprElimTIR"}):
lib = relay.build(
mod, target=tvm.target.Target(target, host=env.target_host), params=params
)
build_time = time.time() - build_start
print(MODEL_NAME + " inference graph built in {0:.2f}s!".format(build_time))
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
m = graph_executor.GraphModule(lib["default"](ctx))
[neth, netw] = dshape[2:]
test_image = "person.jpg" |
img_url = REPO_URL + "data/" + test_image + "?raw=true"
img_path = download_testdata(img_url, test_image, "data")
data = darknet.load_image(img_path, neth, netw).transpose(1, 2, 0)
plt.imshow(data)
plt.show()
data = data.transpose((2, 0, 1))
data = data[np.newaxis, :]
data = np.repeat(data, env.BATCH, axis=0)
m.set_input("data", data)
num = 4
rep = 3
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean / env.BATCH))
thresh = 0.5
nms_thresh = 0.45
tvm_out = []
for i in range(2):
layer_out = {}
layer_out["type"] = "Yolo"
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1]
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
thresh = 0.560
img = darknet.load_image_color(img_path)
_, im_h, im_w = img.shape
dets = yolo_detection.fill_network_boxes((netw, neth), (im_w, im_h), thresh, 1, tvm_out)
last_layer = net.layers[net.n - 1]
yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh)
yolo_detection.draw_detections(font_path, img, dets, thresh, names, last_layer.classes)
plt.imshow(img.transpose(1, 2, 0))
plt.show() |
"""
.. _basic-mat-mult:
Simple Matrix Multiply
======================
**Author**: `Thierry Moreau <https:
In this tutorial, we will build on top of the :ref:`vta-get-started` tutorial
and introduce additional concepts required to implement matrix multiplication
on VTA with the TVM workflow.
"""
from __future__ |
import absolute_import, print_function |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.