text
stringlengths 1
2.05k
|
---|
"""Group conv2D operator declaration and schedule registration for VTA.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi
from ..environment |
import get_env
@autotvm.register_topi_compute("group_conv2d_packed.vta")
def group_conv2d_packed(cfg, data, kernel, strides, padding, dilation, group, out_dtype):
"""Packed group conv2d nchw function."""
assert dilation == (1, 1)
if padding[0]:
pad_data = topi.nn.pad(data, [0, 0, padding[0], padding[1], 0, 0], name="pad_data")
else:
pad_data = data
assert len(data.shape) == 6
assert len(kernel.shape) == 6
assert data.dtype == "int8", data.dtype
assert kernel.dtype == "int8", kernel.dtype
assert out_dtype == "int32", out_dtype
oheight = topi.utils.get_const_int((pad_data.shape[2] - kernel.shape[2])
owidth = topi.utils.get_const_int((pad_data.shape[3] - kernel.shape[3])
oshape = (data.shape[0], kernel.shape[0], oheight, owidth, data.shape[4], kernel.shape[4])
ishape = topi.utils.get_const_tuple(data.shape)
kshape = topi.utils.get_const_tuple(kernel.shape)
assert group * kshape[1] == ishape[1]
assert kshape[0] % group == 0
d_i = te.reduce_axis((0, kshape[2]), name="d_i")
d_j = te.reduce_axis((0, kshape[3]), name="d_j")
k_o = te.reduce_axis((0, kshape[1]), name="k_o")
k_i = te.reduce_axis((0, kshape[-1]), name="k_i")
hstride, wstride = strides
out = te.compute(
oshape,
lambda b_o, c_o, i, j, b_i, c_i: te.sum(
pad_data[
b_o,
c_o
i * hstride + d_i,
j * wstride + d_j,
b_i,
k_i,
].astype(out_dtype)
* kernel[c_o, k_o, d_i, d_j, c_i, k_i].astype(out_dtype),
axis=[k_o, d_i, d_j, k_i],
),
name="res",
tag="packed_group_conv2d",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* kshape[-1]
)
return out
@autotvm.register_topi_schedule("group_conv2d_packed.vta")
def schedule_group_conv2d_packed(cfg, outs):
"""Schedule |
the packed conv2d."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert output.dtype == "int8"
assert output.op.input_tensors[0].dtype == "int32"
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "packed_group_conv2d"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
b, c_o, x_i, x_j, _, _ = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise |
.append(s.cache_read(tensor, env.acc_scope, [consumer]))
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
s[conv2d_stage].reorder(x_bo, k_o, x_j, d_j, d_i, x_co, x_i, x_bi, x_ci, k_i)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_co1, env.dma_copy)
return s |
"""Additional Transformation Passes. for VTA""" |
import tvm
from tvm |
import te
from tvm.topi |
import utils
from .environment |
import get_env
def _match_pragma(stmt, key):
"""Internal helper to match stmt to pragma stmt.
Parameters
----------
stmt : Stmt
The AttrStmt
key : str
The pragma key
"""
return (stmt.attr_key == "pragma_" + key) or (
stmt.attr_key == "pragma_scope" and stmt.value.value == key
)
def FoldUopLoop():
"""Detect and fold uop loop.
VTA support uop programming model
that recognizes loop structure.
This pass detect the loop structure
and extract that into uop loop AST.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _fold_outermost_loop(body):
stmt = body
if not isinstance(stmt, tvm.tir.For):
return None, body, None
loop_var = stmt.loop_var
gemm_offsets = [None, None, None]
fail = [False]
builtin_uop_push = tvm.ir.Op.get("tir.vta.uop_push")
def _post_order(op):
assert isinstance(op, tvm.tir.Call)
base_args = 2
if op.op.same_as(builtin_uop_push):
args = []
args += op.args[:base_args]
for i in range(3):
m = tvm.arith.detect_linear_equation(op.args[i + base_args], [loop_var])
if not m:
fail[0] = True
return op
if gemm_offsets[i] is not None:
if not tvm.ir.structural_equal(m[0], gemm_offsets[i]):
fail[0] = True
return op
args.append(m[1])
else:
gemm_offsets[i] = m[0]
args.append(m[1])
args += op.args[base_args + 3 :]
return tvm.tir.call_intrin("int32", builtin_uop_push, *args)
if op.op.name not in ("tir.vta.command_handle", "tir.tvm_thread_context"):
raise RuntimeError("unexpected op %s" % op)
return op |
ret = tvm.tir.stmt_functor.ir_transform(stmt.body, None, _post_order, ["tir.Call"])
if not fail[0] and all(x is not None for x in gemm_offsets):
def _visit(op):
if op.same_as(loop_var):
fail[0] = True
tvm.tir.stmt_functor.post_order_visit(ret, _visit)
if not fail[0]:
begin = tvm.tir.call_extern("int32", "VTAUopLoopBegin", stmt.extent, *gemm_offsets)
end = tvm.tir.call_extern("int32", "VTAUopLoopEnd")
return [begin, ret, end]
raise ValueError("Failed to fold the GEMM instructions..")
def _do_fold(stmt):
env = get_env()
if (
stmt.attr_key == "coproc_uop_scope"
and isinstance(stmt.value, tvm.tir.StringImm)
and stmt.value.value == env.dev.vta_push_uop.value
):
body = stmt.body
begins = []
ends = []
try:
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
except ValueError:
pass
if body == stmt.body:
return stmt
ends = list(reversed(ends))
body = tvm.tir.stmt_seq(*(begins + [body] + ends))
return tvm.tir.AttrStmt(stmt.node, stmt.attr_key, stmt.value, body)
return None
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _do_fold, None, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(_ftransform, opt_level=0, name="tir.vta.FoldUopLoop")
def CPUAccessRewrite():
"""Detect CPU access to VTA buffer and get add |
ress correctly.
VTA's buffer is an opaque handle that do not
correspond to address in CPU.
This pass detect CPU access and rewrite to use pointer
returned VTABufferCPUPtr for CPU access.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
env = get_env()
var_remap = {}
buf_remap = {}
def find_var_remap(old_var):
if old_var in var_remap:
return var_remap[old_var]
new_var = tvm.tir.Var(old_var.name + "_ptr", dtype=old_var.type_annotation)
var_remap[old_var] = new_var
return new_var
def find_buf_remap(old_buf):
if old_buf in buf_remap:
return buf_remap[old_buf]
new_var = find_var_remap(old_buf.data)
new_buf = tvm.tir.decl_buffer(
shape=old_buf.shape,
dtype=old_buf.dtype,
data=new_var,
strides=old_buf.strides,
elem_offset=old_buf.elem_offset,
scope=old_buf.scope,
data_alignment=old_buf.data_alignment,
offset_factor=old_buf.offset_factor,
buffer_type="auto_broadcast" if (old_buf.buffer_type == 2) else "",
axis_separators=old_buf.axis_separators,
)
buf_remap[old_buf] = new_buf
return new_buf
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
buffer_var = op.buffer_var
if buffer_var not in var_remap:
return None
new_var = var_remap[buffer_var]
let_stmt = tvm.tir.LetStmt(
new_var,
tvm.tir.call_extern(
"handle", "VTABufferCPUPtr", env.dev.command_handle, buffer_var
),
op.body,
)
alloc = tvm.tir.Allocate(buffer_var, op.dtype, op.extents, op.condition |
, let_stmt)
del var_remap[buffer_var]
bufs_to_delete = [
old_buf for old_buf in buf_remap if old_buf.data.same_as(buffer_var)
]
for buf in bufs_to_delete:
del buf_remap[buf]
return alloc
if isinstance(op, tvm.tir.BufferLoad):
return tvm.tir.BufferLoad(find_buf_remap(op.buffer), op.indices)
if isinstance(op, tvm.tir.BufferStore):
return tvm.tir.BufferStore(find_buf_remap(op.buffer), op.value, op.indices)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, None, _post_order, ["tir.Allocate", "tir.BufferLoad", "tir.BufferStore"]
)
for old_var, new_var in var_remap.items():
stmt = tvm.tir.LetStmt(
new_var,
tvm.tir.call_extern("handle", "VTABufferCPUPtr", env.dev.command_handle, old_var),
stmt,
)
return f.with_body(stmt)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.CPUAccessRewrite"
)
def LiftAllocToScopeBegin():
"""Lift allocate to beginning of the current scope.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
lift_stmt = [[]]
def _merge_block(slist, body):
for op in slist:
if op.body == body:
body = op
elif isinstance(op, tvm.tir.Allocate):
body = tvm.tir.Allocate(op.buffer_var, op.dtype, op.extents, op.condition, body)
elif isinstance(op, tvm.tir.AttrStmt):
body = tvm.tir.AttrStmt(op.node, op.attr_key, op.value, body)
elif isinstance(op, tvm.tir.For):
body = tvm.tir.For(
op.loop_var,
op.min, |
op.extent,
op.kind,
body,
op.thread_binding,
op.annotations,
)
else:
raise RuntimeError("unexpected op")
del slist[:]
return body
def _pre_order(op):
if isinstance(op, tvm.tir.For):
lift_stmt.append([])
elif isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "virtual_thread":
lift_stmt.append([])
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
lift_stmt[-1].append(op)
return op.body
if isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "storage_scope":
lift_stmt[-1].append(op)
return op.body
if op.attr_key == "virtual_thread":
return _merge_block(lift_stmt.pop() + [op], op.body)
return op
if isinstance(op, tvm.tir.For):
return _merge_block(lift_stmt.pop() + [op], op.body)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, _pre_order, _post_order, ["tir.Allocate", "tir.AttrStmt", "tir.For"]
)
assert len(lift_stmt) == 1
return f.with_body(_merge_block(lift_stmt[0], stmt))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.LiftAllocToScopeBegin"
)
def InjectSkipCopy():
"""Pass to inject skip copy stmt, used for debug purpose.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _do_fold(stmt):
if _match_pragma(stmt, "skip_dma_copy"):
return tvm.tir.Evaluate(0)
return None
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _do_fold, None, ["tir.AttrStmt"]) |
)
return tvm.tir.transform.prim_func_pass(_ftransform, opt_level=0, name="tir.vta.InjectSkipCopy")
def InjectCoProcSync():
"""Pass inject coproc sync
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, *_):
success = [False]
def _do_fold(stmt):
if _match_pragma(stmt, "coproc_sync"):
success[0] = True
sync = tvm.tir.Call("int32", "vta.coproc_sync", [])
return tvm.tir.SeqStmt([stmt.body, tvm.tir.Evaluate(sync)])
if _match_pragma(stmt, "trim_loop"):
op = stmt.body
assert isinstance(op, tvm.tir.For)
return tvm.tir.For(
op.loop_var, op.min, 2, op.kind, op.body, op.thread_binding, op.annotations
)
return None
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, _do_fold, ["tir.AttrStmt"])
)
return tvm.transform.Sequential(
[
tvm.tir.transform.prim_func_pass(_ftransform, 0, "tir.vta.InjectCoProcSync"),
tvm.tir.transform.CoProcSync(),
],
opt_level=0,
name="tir.vta.InjectCoProcSync",
)
def InjectDMAIntrin():
"""Pass to inject DMA copy intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def _check_compact(buf):
ndim = len(buf.shape)
size = tvm.tir.const(1, buf.shape[0].dtype)
for i in reversed(range(ndim)):
if not utils.equal_const_int(size - buf.strides[i], 0):
raise RuntimeError(
"Cannot prove compact: shape=%s, strides=%s" % (buf.shape, buf.strides)
)
size = size * buf.shape[i]
def _fold_buffer_dim(buf, scope, elem_block):
ndim = len(buf.shape)
x_size = 1
base = 0
for i in range(1, ndim + 1):
if not utils.eq |
ual_const_int(buf.strides[ndim - i] - x_size, 0):
raise RuntimeError("scope %s needs to have block=%d" % (scope, elem_block))
x_size = x_size * buf.shape[ndim - i]
if utils.equal_const_int(x_size - elem_block, 0):
base = i + 1
break
if base == 0:
raise RuntimeError(
"scope %s need to have block=%d, shape=%s" % (scope, elem_block, buf.shape)
)
shape = [elem_block]
strides = [1]
if base < ndim + 1 and not utils.equal_const_int(buf.strides[ndim - base], elem_block):
shape.append(1)
strides.append(elem_block)
analyzer = tvm.arith.Analyzer()
while base < ndim + 1:
x_size = 1
x_stride = buf.strides[ndim - base]
next_base = base
if not utils.equal_const_int(idxm(x_stride, elem_block), 0):
raise RuntimeError(
"scope %s need to have block=%d, shape=%s, strides=%s"
% (scope, elem_block, buf.shape, buf.strides)
)
for i in range(base, ndim + 1):
k = ndim - i
if not utils.equal_const_int(x_size * x_stride - buf.strides[k], 0):
break
x_size = x_size * buf.shape[k]
next_base = i + 1
shape.append(analyzer.simplify(x_size))
strides.append(x_stride)
assert next_base != base
base = next_base
strides = list(reversed(strides))
shape = list(reversed(shape))
return shape, strides
def _get_2d_pattern(buf, elem_width, elem_bytes, dtype, scope, allow_fold):
elem_block = elem_bytes * 8
shape, strides = buf.shape, buf.strides
if not utils.equal_const_int(idxm(buf.elem_offset, elem_block), 0):
raise RuntimeError("scope %s need to have block=%d" % (scope, elem_block))
if allow_fold:
shape, strides = _fold_buffer_dim |
(buf, scope, elem_block)
else:
shape = list(x for x in shape)
strides = list(x for x in strides)
def raise_error():
"""Internal function to raise error"""
raise RuntimeError(
(
"Scope[%s]: cannot detect 2d pattern with elem_block=%d:"
+ " shape=%s, strides=%s"
)
% (scope, elem_block, buf.shape, buf.strides)
)
ndim = len(shape)
flat = utils.equal_const_int(shape[-1], elem_block)
if flat:
if not utils.equal_const_int(strides[-1], 1):
raise_error()
if ndim == 1:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(strides[-2] - elem_block, 0):
raise_error()
if ndim == 2:
x_size = shape[-2]
x_stride = shape[-2]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(idxm(strides[-3], elem_block), 0):
raise_error()
if ndim == 3:
x_size = shape[-2]
x_stride = idxd(strides[-3], elem_block)
y_size = shape[-3]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
else:
if not utils.equal_const_int(strides[-1], 1):
raise_error()
if not utils.equal_const_int(strides[-2] - shape[-1], 0):
raise_error()
if not utils.equal_const_int(shape[-1] * shape[-2], elem_block):
raise_error()
if ndim == 2:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not u |
tils.equal_const_int(strides[-3], elem_block):
raise_error()
if ndim == 3:
x_size = shape[-3]
x_stride = shape[-3]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(idxm(strides[-4], elem_block), 0):
raise_error()
if ndim == 4:
x_size = shape[-3]
x_stride = idxd(strides[-4], elem_block)
y_size = shape[-4]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
raise_error()
def _inject_copy(src, dst, pad_before, pad_after, pad_value):
env = get_env()
_ = pad_value
if dst.scope() == "global":
if pad_before or pad_after:
raise RuntimeError("Do not support copy into DRAM with pad")
if src.scope() == env.acc_scope:
elem_width = env.OUT_WIDTH
elem_bytes = env.OUT_ELEM_BYTES
mem_type = env.dev.MEM_ID_OUT
data_type = "int%d" % env.OUT_WIDTH
task_qid = env.dev.QID_STORE_OUT
else:
raise RuntimeError("Do not support copy %s->dram" % (src.scope()))
_check_compact(src)
x_size, y_size, x_stride, offset = _get_2d_pattern(
dst, elem_width, elem_bytes, data_type, src.scope(), allow_fold=True
)
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(task_qid))
irb.emit(
tvm.tir.call_extern(
"int32",
"VTAStoreBuffer2D",
env.dev.command_handle,
src.access_ptr("r", "int32"),
mem_type,
dst.data,
offset,
x_size,
y_size,
x_st |
ride,
)
)
return irb.get()
elif src.scope() == "global":
if dst.scope() == env.acc_scope:
elem_width = env.ACC_WIDTH
elem_bytes = env.ACC_ELEM_BYTES
mem_type = env.dev.MEM_ID_ACC
data_type = "int%d" % env.ACC_WIDTH
task_qid = env.dev.QID_LOAD_OUT
elif dst.scope() == env.inp_scope:
elem_width = env.INP_WIDTH
elem_bytes = env.INP_ELEM_BYTES
mem_type = env.dev.MEM_ID_INP
data_type = "int%d" % env.INP_WIDTH
task_qid = env.dev.QID_LOAD_INP
elif dst.scope() == env.wgt_scope:
elem_width = env.WGT_WIDTH
elem_bytes = env.WGT_ELEM_BYTES
mem_type = env.dev.MEM_ID_WGT
data_type = "int%d" % env.WGT_WIDTH
task_qid = env.dev.QID_LOAD_WGT
else:
raise RuntimeError("Do not support copy dram->%s" % (dst.scope()))
if pad_before:
assert pad_after
ndim = len(pad_before)
if ndim <= 2 or ndim > 5:
raise ValueError("Limitation of 2D pad load forbid ndim=%d" % ndim)
if ndim == 5:
y_pad_before = pad_before[1]
x_pad_before = pad_before[2]
y_pad_after = pad_after[1]
x_pad_after = pad_after[2]
for dim in range(3, ndim):
if not utils.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not utils.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
else:
y_pad_before = pad_before[0]
x_pad_before = pad_before[1] |
y_pad_after = pad_after[0]
x_pad_after = pad_after[1]
for dim in range(2, ndim):
if not utils.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not utils.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
allow_fold = False
else:
x_pad_before = 0
y_pad_before = 0
x_pad_after = 0
y_pad_after = 0
allow_fold = True
_check_compact(dst)
x_size, y_size, x_stride, offset = _get_2d_pattern(
src, elem_width, elem_bytes, data_type, dst.scope(), allow_fold=allow_fold
)
if data_type != src.dtype:
assert data_type == "int%d" % env.ACC_WIDTH and src.dtype == "int%d" % env.INP_WIDTH
mem_type = env.dev.MEM_ID_ACC_8BIT
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(task_qid))
irb.emit(
tvm.tir.call_extern(
"int32",
"VTALoadBuffer2D",
env.dev.command_handle,
src.data,
offset,
x_size,
y_size,
x_stride,
x_pad_before,
y_pad_before,
x_pad_after,
y_pad_after,
dst.access_ptr("r", "int32"),
mem_type,
)
)
return irb.get()
else:
raise RuntimeError("Do not support copy %s->%s" % (src.scope(), dst.scope()))
return tvm.tir.transform.InjectCopyIntrin("dma_copy", _inject_copy)
def _get_gemm_intrin_buffer():
env = get_env()
wgt_lanes = env. |
WGT_ELEM_BITS
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder(
(wgt_shape[0], wgt_shape[1]), dtype="int%d" % env.WGT_WIDTH, name=env.wgt_scope
)
inp = te.placeholder(
(inp_shape[0], inp_shape[1]), dtype="int%d" % env.INP_WIDTH, name=env.inp_scope
)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute(
(out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) * wgt[j, k].astype(out_dtype), axis=[k]),
name="out",
)
wgt_layout = tvm.tir.decl_buffer(
wgt.shape,
wgt.dtype,
env.wgt_scope,
scope=env.wgt_scope,
offset_factor=wgt_lanes,
data_alignment=wgt_lanes,
)
inp_layout = tvm.tir.decl_buffer(
inp.shape,
inp.dtype,
env.inp_scope,
scope=env.inp_scope,
offset_factor=inp_lanes,
data_alignment=inp_lanes,
)
out_layout = tvm.tir.decl_buffer(
out.shape,
out.dtype,
env.acc_scope,
scope=env.acc_scope,
offset_factor=out_lanes,
data_alignment=out_lanes,
)
return wgt_layout, inp_layout, out_layout
def InjectConv2DTransposeSkip():
"""Pass to skip 0-weights in conv2d transpose with stride > 1.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
dwgt, dinp, dout = _get_gemm_intrin_buffer()
calls = []
selects = []
def _find_ |
basics(op):
if isinstance(op, tvm.tir.BufferLoad):
calls.append(op)
elif isinstance(op, tvm.tir.Select):
selects.append(op)
def _do_fold(op):
if _match_pragma(op, "conv2d_transpose_gemm"):
is_init = ".init" in str(op)
tvm.tir.stmt_functor.post_order_visit(op, _find_basics)
if is_init:
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
1,
dout.access_ptr("rw", "int32"),
0,
0,
0,
0,
0,
)
)
inner = irb.get()
body = op.body.body
while isinstance(body, tvm.tir.IfThenElse):
body = body.then_case
args = body.indices
res_buffer = body.buffer
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_buffer],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
return inner
else:
conv_call, data_call, kernel_call = calls[-3:]
pad_data_tensor = data_call.buffer |
kernel_tensor = kernel_call.buffer
res_tensor = conv_call.buffer
if selects:
condition = selects[0].condition
else:
condition = tvm.tir.const(1, "int")
irb = tvm.tir.ir_builder.create()
with irb.if_scope(condition):
dev = env.dev
irb.scope_attr(
dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE)
)
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0,
0,
0,
)
)
inner = irb.get()
args = conv_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
args = kernel_call.indices
tpl = (
args[0],
1,
args[1],
1,
args[2],
1,
args[3],
1, |
0,
env.BLOCK_OUT,
0,
env.BLOCK_IN,
)
inner = tvm.tir.AttrStmt(
[dwgt, kernel_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
args = data_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_IN)
inner = tvm.tir.AttrStmt(
[dinp, pad_data_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
return inner
return None
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, _do_fold, None, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectConv2DTrasnposeSkip"
)
def AnnotateALUCoProcScope():
"""Pass to insert ALU instruction.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
def _do_fold(stmt):
if _match_pragma(stmt, "alu"):
irb = tvm.tir.ir_builder.create()
irb.scope_attr(
env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(env.dev.QID_COMPUTE)
)
irb.scope_attr(
env.dev.vta_axis, "coproc_uop_scope", tvm.tir.StringImm("VTAPushALUOp")
)
irb.emit(stmt)
return irb.get()
if _match_pragma(stmt, "skip_alu"):
return tvm.tir.Evaluate(0)
return stmt
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, None, _do_fold, |
["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.AnnotateALUCoProcScope"
)
def InjectALUIntrin():
"""Pass to inject ALU micro-ops.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
idxm = tvm.tir.indexmod
analyzer = tvm.arith.Analyzer()
def _do_fold(stmt):
def _flatten_loop(src_coeff, dst_coeff, extents):
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
rev_src_coeff = [src_coeff.pop()]
rev_dst_coeff = [dst_coeff.pop()]
rev_extents = []
assert src_coeff
vsrc = src_coeff.pop()
vdst = dst_coeff.pop()
vext = extents.pop()
while src_coeff:
next_src = src_coeff.pop()
next_dst = dst_coeff.pop()
next_ext = extents.pop()
if analyzer.can_prove_equal(next_src, vsrc * vext) and analyzer.can_prove_equal(
next_dst, vdst * vext
):
vext = analyzer.simplify(vext * next_ext)
else:
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
vsrc = next_src
vdst = next_dst
vext = next_ext
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
rev_src_coeff.reverse()
rev_dst_coeff.reverse()
rev_extents.reverse()
return rev_src_coeff, rev_dst_coeff, rev_extents
if _match_pragma(stmt, "alu"):
loop_body = stmt.body |
nest_size = 0
while isinstance(loop_body, tvm.tir.For):
loop_body = loop_body.body
nest_size += 1
dst_var = loop_body.buffer.data
dst_idx = loop_body.indices[0]
tmp_body = stmt.body
indices = []
extents = []
for _ in range(nest_size):
indices.append(tmp_body.loop_var)
extents.append(tmp_body.extent)
tmp_body = tmp_body.body
if isinstance(loop_body.value, tvm.tir.Add):
alu_opcode = env.dev.ALU_OPCODE_ADD
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Sub):
alu_opcode = env.dev.ALU_OPCODE_SUB
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Mul):
alu_opcode = env.dev.ALU_OPCODE_MUL
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Min):
alu_opcode = env.dev.ALU_OPCODE_MIN
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Max):
alu_opcode = env.dev.ALU_OPCODE_MAX
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Call):
if loop_body.value.op.name == "tir.shift_left":
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = analyzer.simplify(-loop_body.value.args[1])
elif loop_body.value.op.name == "tir.shift_right": |
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = loop_body.value.args[1]
else:
raise RuntimeError(
"Function call not recognized %s" % (loop_body.value.name)
)
elif isinstance(loop_body.value, tvm.tir.BufferLoad):
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value
rhs = tvm.tir.const(0, "int32")
else:
raise RuntimeError(
"Expression not recognized %s, %s, %s"
% (type(loop_body.value), str(loop_body.value), str(stmt))
)
dst_coeff = tvm.arith.detect_linear_equation(dst_idx, indices)
use_imm = False
imm_val = None
if isinstance(rhs, tvm.tir.IntImm):
assert lhs.buffer.data.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(lhs.indices[0], indices)
use_imm = True
imm_val = rhs
if isinstance(lhs, tvm.tir.IntImm):
assert rhs.buffer.data.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(rhs.indices[0], indices)
use_imm = True
imm_val = lhs
if imm_val is None:
imm_val = 0
assert lhs.buffer.data.same_as(dst_var) and rhs.buffer.data.same_as(dst_var)
src_lhs_coeff = tvm.arith.detect_linear_equation(lhs.indices[0], indices)
src_rhs_coeff = tvm.arith.detect_linear_equation(rhs.indices[0], indices)
lhs_equal = True
rhs_equal = True
for i, coef in enumerate(dst_coeff):
if not tvm.ir.struct |
ural_equal(coef, src_lhs_coeff[i]):
lhs_equal = False
if not tvm.ir.structural_equal(coef, src_rhs_coeff[i]):
rhs_equal = False
assert lhs_equal or rhs_equal
if lhs_equal:
src_coeff = src_rhs_coeff
else:
src_coeff = src_lhs_coeff
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
assert len(src_coeff) > 1
assert len(dst_coeff) > 1
assert len(extents) != 0
assert tvm.ir.structural_equal(
analyzer.simplify(idxm(src_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0
)
assert tvm.ir.structural_equal(
analyzer.simplify(idxm(dst_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0
)
assert tvm.ir.structural_equal(src_coeff[-2], 1)
assert tvm.ir.structural_equal(dst_coeff[-2], 1)
if env.BATCH > 1:
assert len(src_coeff) > 2
assert len(dst_coeff) > 2
assert len(extents) > 1
assert tvm.ir.structural_equal(src_coeff[-3], env.BLOCK_OUT)
assert tvm.ir.structural_equal(dst_coeff[-3], env.BLOCK_OUT)
src_offset = src_coeff[-1]
dst_offset = dst_coeff[-1]
if env.BATCH == 1:
src_coeff = src_coeff[:-2]
dst_coeff = dst_coeff[:-2]
extents = extents[:-1]
else:
src_coeff = src_coeff[:-3]
dst_coeff = dst_coeff[:-3]
extents = extents[:-2]
src_coeff.append(src_offset)
dst_coeff.a |
ppend(dst_offset)
src_coeff = [analyzer.simplify(c
dst_coeff = [analyzer.simplify(c
if extents:
src_coeff, dst_coeff, extents = _flatten_loop(src_coeff, dst_coeff, extents)
irb = tvm.tir.ir_builder.create()
for idx, extent in enumerate(extents):
irb.emit(
tvm.tir.call_extern(
"int32",
"VTAUopLoopBegin",
extent,
dst_coeff[idx],
src_coeff[idx],
0,
)
)
use_imm = int(use_imm)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
1,
0,
dst_coeff[len(dst_coeff) - 1],
src_coeff[len(src_coeff) - 1],
0,
alu_opcode,
use_imm,
imm_val,
)
)
for extent in extents:
irb.emit(tvm.tir.call_extern("int32", "VTAUopLoopEnd"))
return irb.get()
return stmt
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, None, _do_fold, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectALUIntrin"
) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime.h
* \brief VTA runtime library.
*/
extern "C" {
/*!
* \brief Allocate data buffer.
* \param size Buffer size.
* \return A pointer to the allocated buffer.
*/
TVM_DLL void* VTABufferAlloc(size_t size);
/*!
* \brief Free data buffer.
* \param buffer The data buffer to be freed.
*/
TVM_DLL void VTABufferFree(void* buffer);
/*!
* \brief Copy data buffer from one location to another.
* \param from The source buffer base address.
* \param from_offset The offset of the source buffer.
* \param to The target buffer base address.
* \param to_offset The offset of the target buffer.
* \param size Size of copy.
* \param kind_mask The memory copy kind.
*/
TVM_DLL void VTABufferCopy(const void* from, size_t from_offset, void* to, size_t to_offset,
size_t size, int kind_mask);
/*! \brief VTA command handle */
typedef void* VTACommandHandle;
/*! \brief Shutdown hook of VTA to cleanup resources */
TVM_DLL void VTARuntimeShutdown();
/*!
* \brief Get thread local command handle.
* \return A thread local command handle.
*/
TVM_DLL VTACommandHandle VTATLSCommandHandle();
/*!
* \brief Get the buffer access pointer on CPU.
* \param cmd The VTA command handle.
* \param buffer The dat |
a buffer.
* \return The pointer that can be accessed by the CPU.
*/
TVM_DLL void* VTABufferCPUPtr(VTACommandHandle cmd, void* buffer);
/*!
* \brief Perform a write barrier to make a memory region visible to the CPU.
* \param cmd The VTA command handle.
* \param buffer The head buffer pointer.
* \param elem_bits The size in bits of each element.
* \param start The start of the region (in elements).
* \param extent The end of the region (in elements).
*/
TVM_DLL void VTAWriteBarrier(VTACommandHandle cmd, void* buffer, uint32_t elem_bits, uint32_t start,
uint32_t extent);
/*!
* \brief Perform a read barrier to a memory region visible to VTA.
* \param cmd The VTA command handle.
* \param buffer The head buffer pointer.
* \param elem_bits The unit bits of each elements.
* \param start The start of the region (in elements).
* \param extent The end of the region (in elements).
*/
TVM_DLL void VTAReadBarrier(VTACommandHandle cmd, void* buffer, uint32_t elem_bits, uint32_t start,
uint32_t extent);
/*!
* \brief Set debug mode on the command handle.
* \param cmd The VTA command handle.
* \param debug_flag The debug flag.
*/
TVM_DLL void VTASetDebugMode(VTACommandHandle cmd, int debug_flag);
/*!
* \brief Perform a 2D data load from DRAM.
* Sizes are measured in units of vector elements.
* \param cmd The VTA command handle.
* \param src_dram_addr Source DRAM address.
* \param src_elem_offset The source DRAM offset in number of unit elements.
* \param x_size The lowest dimension (x axis) size in number of unit elements.
* \param y_size The number of rows (y axis).
* \param x_stride The x axis stride.
* \param x_pad_before The start padding on x axis.
* \param y_pad_before The start padding on y axis.
* \param x_pad_after The end padding on x axis.
* \param y_pad_after The end padding of y axis.
* \param dst_sram_index Destination SRAM index.
* \param dst_memory_type Destination memory type.
*/
TVM_DLL void VTALoadBuffer2D(VTAComman |
dHandle cmd, void* src_dram_addr, uint32_t src_elem_offset,
uint32_t x_size, uint32_t y_size, uint32_t x_stride,
uint32_t x_pad_before, uint32_t y_pad_before, uint32_t x_pad_after,
uint32_t y_pad_after, uint32_t dst_sram_index,
uint32_t dst_memory_type);
/*!
* \brief Perform a 2D data store into DRAM
* Sizes are measured in units of vector elements.
* \param cmd The VTA command handle.
* \param src_sram_index Source SRAM index.
* \param src_memory_type Source memory type.
* \param dst_dram_addr Destination DRAM address.
* \param dst_elem_offset The destination DRAM offset in number of unit elements.
* \param x_size The lowest dimension (x axis) size in number of unit elements.
* \param y_size The number of rows.
* \param x_stride The x axis stride.
*/
TVM_DLL void VTAStoreBuffer2D(VTACommandHandle cmd, uint32_t src_sram_index,
uint32_t src_memory_type, void* dst_dram_addr,
uint32_t dst_elem_offset, uint32_t x_size, uint32_t y_size,
uint32_t x_stride);
/*!
* \brief Push uop into kernel buffer.
* In GEMM mode, do a blocked GEMM with 2d access pattern.
* In ALU mode, do a vectorized ALU operation with 2d access pattern.
*
* \code
*
* DType accum[INP_BUFF_DEPTH][l][n];
* DType weight[WGT_BUFF_DEPTH][n][m];
* DType input[INP_BUFF_DEPTH][l][m];
* if reset_out == 1
* accum[dst_index] = 0
* elif mode == 0
* accum[dst_index] += GEMM(input[src_index], weight[wgt_index]);
* else
* if (use_imm)
* accum[dst_index] = opcode(accum[dst_index], imm_val);
* else
* accum[dst_index] = opcode(accum[dst_index], accum[src_index]);
*
* \endcode
*
* \param mode Set to GEMM mode if set to 0, ALU mode is set to 1.
* \param reset_out Resets the accum to 0.
* \param dst_index The accum memory index.
* \param src_index The input memory (gemm) / accum memory (alu) ind |
ex.
* \param wgt_index The weight memory index.
* \param opcode The ALU opcode.
* \param use_imm Use immediate in ALU mode if set to true.
* \param imm_val Immediate value in ALU mode.
*/
TVM_DLL void VTAUopPush(uint32_t mode, uint32_t reset_out, uint32_t dst_index, uint32_t src_index,
uint32_t wgt_index, uint32_t opcode, uint32_t use_imm, int32_t imm_val);
/*!
* \brief Mark start of a micro op loop.
* \param extent The extent of the loop.
* \param dst_factor The accum factor.
* \param src_factor The input factor.
* \param wgt_factor The weight factor.
*/
TVM_DLL void VTAUopLoopBegin(uint32_t extent, uint32_t dst_factor, uint32_t src_factor,
uint32_t wgt_factor);
/*!
* \brief Mark end of a micro op loop.
*/
TVM_DLL void VTAUopLoopEnd();
/*!
* \brief Push GEMM uop kernel into the command handle.
* \param uop_handle The uop cache handle.
* \param finit The initalization function to initialize uop.
* \param signature The closure arguments of the finit.
* \param nbytes Number of bytes to in the closure arguments.
* \return 0 if success.
*/
TVM_DLL int VTAPushGEMMOp(void** uop_handle, int (*finit)(void*), void* signature, int nbytes);
/*!
* \brief Push ALU uop kernel into the command handle.
* \param uop_handle The uop cache handle.
* \param finit The initalization function to initialize uop.
* \param signature The closure arguments of the finit.
* \param nbytes Number of bytes to in the closure arguments.
* \return 0 if success.
*/
TVM_DLL int VTAPushALUOp(void** uop_handle, int (*finit)(void*), void* signature, int nbytes);
/*!
* \brief Push dependence token.
* \param cmd The VTA command handle.
* \param from_qid The source queue.
* \param to_qid The destination queue.
* \return 0 if success.
*/
TVM_DLL int VTADepPush(VTACommandHandle cmd, int from_qid, int to_qid);
/*!
* \brief Pop dependence signal.
* \param cmd The VTA command handle.
* \param from_qid The source queue.
* \param to_qid The destination queue.
* \return |
0 if success.
*/
TVM_DLL int VTADepPop(VTACommandHandle cmd, int from_qid, int to_qid);
/*!
* \brief Synchronize the command handle.
* Commit all the instructions to VTA and wait until
* the accelerator finishes its job.
* Perform all of the out-of-order DRAM stores.
* \param cmd The VTA command handle.
* \param wait_cycles The limit of poll cycles.
*
*/
TVM_DLL void VTASynchronize(VTACommandHandle cmd, uint32_t wait_cycles);
} |
"""Tuning a single conv2d operator"""
from collections |
import namedtuple |
import logging |
import os |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import vta |
import vta.testing
env = vta.get_env()
Workload = namedtuple(
"Conv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
resnet_wkls = [
("resnet-18.C2", Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
("resnet-18.C3", Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
("resnet-18.C4", Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
("resnet-18.C5", Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
("resnet-18.C6", Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
("resnet-18.C7", Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
("resnet-18.C8", Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
("resnet-18.C9", Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
("resnet-18.C10", Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
("resnet-18.C11", Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def conv2d(N, CI, H, W, CO, KH, KW, strides, padding, dilation):
data_shape = (N
kernel_shape = (CO
bias_shape = (N
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
with tvm.target.vta():
res = topi.nn.conv2d(
input=data,
filter=kernel,
padding=padding,
strides= |
strides,
dilation=dilation,
layout="NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN),
out_dtype=env.acc_dtype,
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, bias, res]
if __name__ == "__main__":
logging.basicConfig()
log_file = "%s.conv2d.log" % (env.TARGET)
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(resnet_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(resnet_wkls))
N = wl.batch
CI = wl.in_filter
H = wl.height
W = wl.width
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
dilation = (1, 1)
task = autotvm.task.create(
conv2d,
args=(N, CI, H, W, CO, KH, KW, strides, padding, dilation),
target=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60, |
),
)
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file) |
"""Tuning a single conv2d transpose operator"""
from collections |
import namedtuple |
import logging |
import os |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import vta |
import vta.testing
env = vta.get_env()
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
dcgan_wkls = [
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def conv2d_transpose(N, CI, H, W, CO, KH, KW, strides, padding, opadding):
data_shape = (N
kernel_shape = (CO
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with tvm.target.vta():
res = topi.nn.conv2d_transpose_nchw(
Input=data,
Filter=kernel,
strides=strides,
padding=padding,
out_dtype=env.acc_dtype,
output_padding=opadding,
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_transpose_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, res]
if __name__ == "__main__":
logging.basicConfig()
log_file = "%s.conv2d_transpose.log" % |
(env.TARGET)
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(dcgan_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(dcgan_wkls))
N = wl.batch
H = wl.height
W = wl.width
CI = wl.in_filter
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
opadding = (wl.o_hpad, wl.o_wpad)
task = autotvm.task.create(
conv2d_transpose,
args=(N, CI, H, W, CO, KH, KW, strides, padding, opadding),
target=tvm.target.Target(tvm.target.vta(), host=env.target_host),
template_key="direct",
)
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60,
),
)
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file) |
"""Tuning a single dense operator"""
from collections |
import namedtuple |
import logging |
import os |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import vta |
import vta.testing
env = vta.get_env()
Workload = namedtuple("DenseWorkload", ["batch", "in_filter", "out_filter"])
dense_wkls = [
("lstm.dense.1", Workload(1, 256, 128)),
("lstm.dense.4", Workload(4, 256, 128)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def dense(N, CI, CO):
data_shape = (N
kernel_shape = (CO
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with tvm.target.vta():
res = topi.nn.dense(data, kernel, None, "int32")
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_dense([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, res]
if __name__ == "__main__":
logging.basicConfig()
log_file = "%s.dense.log" % (env.TARGET)
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
tracket_host = os.environ.get("TVM_TRACKER_HOST", None)
tracket_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracket_host or not tracket_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(dense_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(dense_wkls))
N = wl.batch
CI = wl.in_filter
CO = wl.out_filter
task = autotvm.task.create(
dense,
args=(N, CI, CO),
targe |
t=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracket_host,
port=int(tracket_port),
number=5,
timeout=60,
),
)
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file) |
"""Tuning a single group conv2d operator"""
from collections |
import namedtuple |
import logging |
import os |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi |
import vta |
import vta.testing
env = vta.get_env()
Workload = namedtuple(
"GroupConv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"groups",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
mobilenet_wkls = [
("mobilenet.D1", Workload(env.BATCH, 112, 112, 32, 32, 2, 3, 3, 1, 1, 1, 1)),
("mobilenet.D2", Workload(env.BATCH, 112, 112, 64, 64, 4, 3, 3, 1, 1, 2, 2)),
("mobilenet.D3", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 1, 1)),
("mobilenet.D4", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 2, 2)),
("mobilenet.D5", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 1, 1)),
("mobilenet.D6", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 2, 2)),
("mobilenet.D7", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 1, 1)),
("mobilenet.D8", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 2, 2)),
("mobilenet.D9", Workload(env.BATCH, 7, 7, 1024, 1024, 64, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def group_conv2d(N, CI, H, W, CO, KH, KW, strides, padding, dilation, group):
CI_G = CI
data_shape = (N
kernel_shape = (CO
bias_shape = (N
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
with tvm.target.vta():
res = topi.nn.group_conv2d_nchw(
data, kernel, strides, padding, dilation, groups, env.acc_dtype
) |
res = topi.right_shift(res, env.WGT_WIDTH)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_group_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, bias, res]
if __name__ == "__main__":
logging.basicConfig()
log_file = "%s.group_conv2d.log" % (env.TARGET)
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(mobilenet_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(mobilenet_wkls))
N = wl.batch
CI = wl.in_filter
H = wl.height
W = wl.width
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
dilation = (1, 1)
groups = wl.groups
task = autotvm.task.create(
group_conv2d,
args=(N, CI, H, W, CO, KH, KW, strides, padding, dilation, groups),
target=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60,
),
)
tuner = autotvm.tuner.RandomTuner(task)
tuner. |
tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file) |
"""Perform ResNet autoTVM tuning on VTA using Relay.""" |
import argparse, os, time
from mxnet.gluon.model_zoo |
import vision |
import numpy as np
from PIL |
import Image
from tvm |
import topi |
import tvm
from tvm |
import te
from tvm |
import rpc, autotvm, relay
from tvm.autotvm.measure.measure_methods |
import request_remote
from tvm.autotvm.tuner |
import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib |
import graph_executor, utils, download
from tvm.contrib.debugger |
import debug_executor |
import vta
from vta.testing |
import simulator
from vta.top |
import graph_pack
from tvm.autotvm.task |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.