file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/target/virtual_device.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Python bindings for creating VirtualDevices."""
import tvm
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object
class VirtualDevice(Object):
"""A compile time representation for where data is to be stored at runtime,
and how to compile code to compute it."""
def __init__(self, device=None, target=None, memory_scope="") -> None:
if device is None:
# The 'unconstrained' device has device type -1 and device id -1.
device = tvm.device(-1, -1)
self.__init_handle_by_constructor__(
_ffi_api.VirtualDevice_ForDeviceTargetAndMemoryScope, device, target, memory_scope
)
@property
def device_type(self) -> int:
return self.device_type_int
| https://github.com/zk-ml/tachikoma |
python/tvm/te/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin, wildcard-import
"""Namespace for Tensor Expression Language
"""
# expose all operators in tvm tir.op
from tvm.tir import any, all, min_value, max_value, trace
from tvm.tir import exp, erf, tanh, sigmoid, log, tan, cos, sin, sqrt, rsqrt, floor, ceil
from tvm.tir import sinh, cosh, log2, log10
from tvm.tir import asin, asinh, acos, acosh, atan, atanh
from tvm.tir import trunc, abs, round, nearbyint, power, popcount, fmod, if_then_else
from tvm.tir import isnan, isfinite, isinf
from tvm.tir import div, indexdiv, indexmod, truncdiv, truncmod, floordiv, floormod
from tvm.tir import comm_reducer, min, max, sum
from tvm.tir import add, subtract, multiply
from .schedule import (
Schedule,
Stage,
create_schedule,
SpecializedCondition,
AXIS_SEPARATOR,
)
from .tensor import TensorSlice, Tensor
from .tensor_intrin import decl_tensor_intrin
from .tag import tag_scope
from .operation import placeholder, compute, scan, extern, var, size_var, const
from .operation import thread_axis, reduce_axis
from .operation import create_prim_func
from .operation import extern_primfunc
from .tensor import PlaceholderOp, ComputeOp, TensorComputeOp, ScanOp, ExternOp, HybridOp
from .autodiff import gradient
from . import hybrid
| https://github.com/zk-ml/tachikoma |
python/tvm/te/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.te"""
import tvm._ffi
tvm._ffi._init_api("te", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/autodiff.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Automatic differentiation of tensor expressions."""
from . import _ffi_api
def gradient(output, inputs, head=None):
"""Perform reverse-mode automatic differentiation.
Parameters
----------
output : Tensor
The tensor to differentiate.
inputs : List[Tensor]
The list of input tensors to be differentiated wrt.
head : Tensor
The adjoint of the output, in other words, some tensor, by which the Jacobians
will be multiplied. Its shape must be of the form `prefix + output.shape`.
If `None` is passed, the identity tensor of shape `output.shape + output.shape`
will be used.
Returns
-------
tensors: List[Tensor]
The result gradient, in the same order as the inputs
Example
-------
.. code-block:: python
x = tvm.placeholder((32, 3, 28, 28), name='x')
w1 = tvm.placeholder((10, 3, 3, 3), name='w1')
w2 = tvm.placeholder((10, 10, 3, 3), name='w2')
z1 = topi.nn.conv2d(x, w1, 1, 1, 1)
z2 = topi.nn.conv2d(z1, w2, 1, 1, 1)
y = topi.sum(z2)
# produce gradients
[dw1, dw2] = tvm.gradient(y, [w1, w2])
# produce Jacobians
[jw1, jw2] = tvm.gradient(z2, [w1, w2])
# produce gradients, the head adjoint for z2 is provided manually
[dw1, dw2] = tvm.gradient(z2, [w1, w2], topi.full_like(z2, 1.0))
"""
if not isinstance(inputs, list):
inputs = [inputs]
return _ffi_api.Gradient(output, inputs, head)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hybrid Programming APIs of TVM Python Package.
This package maps a subset of python to HalideIR so that:
1. Users can write some preliminary versions of the computation patterns
have not been supported yet and verify it across the real execution and
python semantic emulation.
2. So far, it is a text format dedicated to HalideIR Phase 0. Refer tvm.lower
for more details. A larger ambition of this module is to support all levels of
HalideIR.
"""
# TODO(@were): Make this module more complete.
# 1. Support HalideIR dumping to Hybrid Script
# 2. Support multi-level HalideIR
import inspect
import tvm._ffi
import tvm.te.schedule
from tvm._ffi.base import decorate
from .module import HybridModule
from .parser import source_to_op
from .utils import _pruned_source
def script(pyfunc):
"""Decorate a python function function as hybrid script.
The hybrid function support emulation mode and parsing to
the internal language IR.
Returns
-------
hybrid_func : function
A decorated hybrid script function.
"""
# pylint: disable=import-outside-toplevel, missing-docstring
def wrapped_func(func, *args, **kwargs):
from .utils import _is_tvm_arg_types
if _is_tvm_arg_types(args):
src = _pruned_source(func)
closure_vars = inspect.getclosurevars(func).nonlocals
closure_vars.update(inspect.getclosurevars(func).globals)
return source_to_op(src, args, func.__globals__, closure_vars)
from .runtime import _enter_hybrid_runtime, _restore_runtime
intersect = _enter_hybrid_runtime(func)
value = func(*args, **kwargs)
_restore_runtime(func, intersect)
return value
return decorate(pyfunc, wrapped_func)
def build(sch, inputs, outputs, name="hybrid_func"):
"""Dump the current schedule to hybrid module
Parameters
----------
sch: tvm.te.Schedule
The schedule to be dumped
inputs: An array of Tensors or Vars
The inputs of the function body
outputs: An array of Tensors
The outputs of the function body
Returns
-------
module: HybridModule
The built results is wrapped in a HybridModule.
The usage of HybridModule is roughly the same as normal TVM-built modules.
"""
sch = sch.normalize()
bounds = tvm.te.schedule.InferBound(sch)
stmt = tvm.te.schedule.ScheduleOps(sch, bounds)
src = _Dump(stmt, inputs, outputs, name)
return HybridModule(src, name)
tvm._ffi._init_api("tvm.hybrid", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/calls.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Intrinsics of TVM-Python Hybrid Script for Python compilation time
semantic support."""
from tvm.runtime import const, convert
import tvm.te
from tvm.ir.container import Array
from tvm.target import Target
from tvm.tir import expr as _expr
from tvm.tir import call_intrin
from tvm.tir.stmt import ForKind
from .utils import _internal_assert
# pylint: disable=redefined-builtin,invalid-name
LOOP_INTRIN = {
"range": ForKind.SERIAL,
"unroll": ForKind.UNROLLED,
"parallel": ForKind.PARALLEL,
"vectorize": ForKind.VECTORIZED,
"const_range": (ForKind.UNROLLED,),
}
def _range(annotation, args):
"""Handling TVM loop types"""
n = args.__len__()
if n == 1:
low, ext = const(0, dtype="int32"), args[0]
else:
_internal_assert(n == 2, "A loop intrinsic should only have 1 or 2 arguments!")
low, ext = args[0], args[1]
if not tvm.tir.analysis.expr_deep_equal(low, const(0, dtype="int32")):
ext = ext - low
kind = LOOP_INTRIN[annotation]
iter_var = None
return iter_var, low, ext, kind
range = unroll = vectorize = parallel = const_range = _range # pylint: disable=invalid-name
def bind(func_id, args):
"""Handling TVM thread binding"""
_internal_assert(func_id == "bind", "This function cannot be directly invoked!")
_internal_assert(args.__len__() == 2, "A loop bind should only have 2 arguments!")
_internal_assert(isinstance(args[0], str), "A loop bind's first argument should be a string!")
low, ext = const(0, "int32"), args[1]
iter_var = tvm.te.thread_axis((low, ext), args[0])
kind = None
return iter_var, low, ext, kind
def _math_intrin(func_id, args):
# pylint: disable=import-outside-toplevel
from tvm.tir import op
return getattr(op, func_id)(*args)
sqrt = (
log
) = exp = tanh = sigmoid = power = popcount = round = _math_intrin # pylint: disable=invalid-name
def _min_max(func_id, args):
_internal_assert(args.__len__() == 2, "Max/Min function should have 2 elements")
return getattr(_expr, func_id.title())(args[0], args[1])
min = max = _min_max # pylint: disable=invalid-name
def _allocate_tensor(func_id, args):
"""Handling TVM tensor allocation.
You may refer hybrid.intrin.allocate for more details."""
n = args.__len__()
_internal_assert(
isinstance(convert(args[0]), Array), "allocate's first argument should be a tuple of shape!"
)
shape = args[0]
for i in shape:
_internal_assert(isinstance(i, _expr.PrimExpr), "The shape should be an expression")
if n > 1:
_internal_assert(isinstance(args[1], str), "The data type should be an str")
_internal_assert(
args[1].startswith("int") or args[1].startswith("float"),
"The data type should be either int or float!",
)
dtype = args[1]
else:
dtype = "float32"
if n > 2:
_internal_assert(isinstance(args[2], str), "The data scope should be an string")
_internal_assert(func_id != "output_tensor", "Output tensor cannot specify scope")
scope = args[2]
else:
scope = "global" if func_id != "output_tensor" else "output"
return (shape, dtype, scope)
output_tensor = allocate = _allocate_tensor # pylint: disable=invalid-name
def len(func_id, args):
"""Iterpret the len function"""
_internal_assert(args.__len__() == 1, "Only 1 argument is expected!")
_internal_assert(func_id == "len", "This function cannot be directly invoked!")
try:
return convert(args[0].__len__())
except: # pylint: disable=bare-except
_internal_assert(args[0].shape.__len__() == 1, "Only one-dimension array can get len")
return convert(args[0].shape[0])
def _cast(func_id, args):
_internal_assert(
args.__len__() == 1 and isinstance(args[0], _expr.PrimExpr),
"Only one expression can be cast",
)
return _expr.Cast(func_id, args[0])
float16 = float32 = float64 = _cast # pylint: disable=invalid-name
int8 = int16 = int32 = int64 = _cast # pylint: disable=invalid-name
uint8 = uint16 = uint32 = uint64 = _cast # pylint: disable=invalid-name
def ceil_div(func_id, args):
_internal_assert(func_id == "ceil_div", "This function cannot be directly invoked!")
_internal_assert(args.__len__() == 2, "2 arguments expected for division!")
_internal_assert(isinstance(args[0], _expr.PrimExpr), "Only expressions can div")
_internal_assert(isinstance(args[1], _expr.PrimExpr), "Only expressions can div")
a, b = args[0], args[1]
return (a + b - 1) // b
def likely(func_id, args):
_internal_assert(args.__len__() == 1, "Only one expression can be likely")
_internal_assert(func_id == "likely", "This function cannot be directly invoked!")
return call_intrin(args[0].dtype, "tir.likely", *args)
def max_num_threads(func_id, args):
"""Set the maximum number of threads."""
_internal_assert(func_id == "max_num_threads", "This function cannot be directly invoked!")
_internal_assert(args.__len__() <= 1, "At most one argument accepted!")
if args.__len__() == 0:
res = Target.current().max_num_threads
else:
_internal_assert(isinstance(args[0], _expr.IntImm), "In tvm bool should be uint")
res = Target.current(args[0].value).max_num_threads
return convert(res)
def inf(func_id, args):
"""Infinity"""
_internal_assert(func_id == "inf", "This function cannot be directly invoked!")
_internal_assert(args.__len__() == 1, "One argument accepted!")
return tvm.tir.max_value(args[0])
def ninf(func_id, args):
"""Negative infinity"""
_internal_assert(func_id == "ninf", "This function cannot be directly invoked!")
_internal_assert(args.__len__() == 1, "One argument accepted!")
return tvm.tir.min_value(args[0])
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Methods and data structures to support dumping HalideIR to Hybrid Script.
This allows users to do quick hack to generated HalideIR and cast it back to
TVM modules.
To enable this feature, you need to build with -DUSE_HYBRID_DUMP=ON.
"""
import ast
from tvm.contrib import utils
from .utils import _internal_assert
from .utils import _is_tvm_arg_types
from .parser import source_to_op
class HybridModule(object):
"""The usage of Hybrid Module is very similar to conventional TVM module,
but conventional TVM module requires a function body which is already fully
lowered. This contradicts to the fact that Hybrid Module is originally a text
format for Phase 0 HalideIR. Thus, a totally separated module is defined."""
def __init__(self, src=None, name=None):
"""The constructor of this a hybrid module
Parameters
----------
src : str
The source code of this module
name : str
The name of this module
"""
self.src_ = self.name = self.func_ = self.root_ = None
if src is not None:
temp = utils.tempdir()
dst = temp.relpath("script.py")
with open(dst, "w") as f:
f.write("import tvm\[email protected]\n%s" % src)
if name is not None:
self.name = name
self.load(dst)
def __call__(self, *args):
if _is_tvm_arg_types(args):
return source_to_op(self.root_, args, globals(), {})
return self.func_(*args)
def get_source(self):
return self.src_
def save(self, path):
if not path.endswith(".py"):
path = path + ".py"
with open(path, "w") as f:
f.write(self.src_)
def load(self, path):
"""Load the module from a python file
Parameters
----------
path : str
Path to the given python file
"""
with open(path, "r") as f:
self.src_ = f.read()
src = self.src_
class FindFunc(ast.NodeVisitor):
"""Find the function in module to be loaded module."""
# pylint: disable=invalid-name
def __init__(self):
self.name = None
self.root = None
def visit_FunctionDef(self, node):
_internal_assert(self.name is None, "For now, only one function supported!")
self.name = node.name
_internal_assert(self.root is None, "For now, only one function supported!")
self.root = node
root = ast.parse(src)
finder = FindFunc()
finder.visit(root)
_internal_assert(finder.name is not None and finder.root is not None, "No function found!")
if self.name is None:
self.name = finder.name
self.root_ = finder.root
_, local_ = {}, {}
exec(self.src_, _, local_) # pylint: disable=exec-used
local_.pop("tvm")
assert len(local_) == 1
self.func_ = list(local_.values())[0]
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hybrid Script Parser"""
import ast
import operator
import logging
import sys
import types
import numbers
from enum import Enum
from tvm.ir import Array, Range
import tvm.runtime
import tvm.tir
import tvm.te
import tvm.te._ffi_api
import tvm.arith
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from tvm.te.tensor import Tensor, Operation
from tvm.tir import all as _all
from tvm.tir import any as _any
from .utils import _internal_assert
from . import calls
from . import utils
from .preprocessor import determine_variable_usage
def concat_list_to_block(lst):
"""Concatenate a list of Python IR nodes to HalideIR Block"""
if not lst:
return utils.make_nop()
n = len(lst)
if n == 1:
return lst[0]
return _stmt.SeqStmt(lst)
def visit_list_to_block(visit, lst):
"""Visit and concatenate a list of Python IR nodes to HalideIR Block"""
lst = [visit(stmt) for stmt in lst if not utils.is_docstring(stmt)]
lst = [stmt for stmt in lst if not tvm.ir.structural_equal(stmt, utils.make_nop())]
if not lst:
return utils.make_nop()
return concat_list_to_block(lst)
class Symbol(Enum):
"""Enumerates types in the symbol table"""
Callable = 0
Input = 1
OutputBuffer = 2
GlobalBuffer = 3
LocalBuffer = 4
SharedBuffer = 5
ConstVar = 6
BufferVar = 7
LoopVar = 8
ConstLoopVar = 9
ThreadBind = 10
def _floordiv(x, y):
if isinstance(x, _expr.ExprOp) or isinstance(y, _expr.ExprOp):
return tvm.tir.floordiv(x, y)
return operator.floordiv(x, y)
def _floormod(x, y):
if isinstance(x, _expr.ExprOp) or isinstance(y, _expr.ExprOp):
return tvm.tir.floormod(x, y)
return operator.mod(x, y)
class HybridParser(ast.NodeVisitor):
"""Python AST visitor pass which finally lowers it to HalideIR"""
_binop_maker = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div if sys.version_info[0] == 2 else operator.truediv,
ast.FloorDiv: _floordiv,
ast.Mod: _floormod,
ast.BitOr: operator.or_,
ast.BitAnd: operator.and_,
ast.BitXor: operator.xor,
ast.Gt: operator.gt,
ast.GtE: operator.ge,
ast.Lt: operator.lt,
ast.LtE: operator.le,
ast.Eq: operator.eq,
ast.NotEq: operator.ne,
ast.And: _all,
ast.Or: _any,
}
_unaryop_maker = {ast.USub: operator.neg, ast.Invert: operator.invert, ast.Not: operator.not_}
def __init__(self, args, usage, symbols, closure_vars, func_name=None):
"""
Parameters
----------
args: A list of tvm.te.placeholder or te.var
Provided by the user, the argument list of the function to be lowered.
usage: A dict of variables used in last in this function
Provided by last lower pass, which collects this information
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
func_name: str
The name of the function to be lowered; if not provided,
the compiler will use the name in the AST
"""
self.args = list(args)
self.usage = usage.copy()
self.symbols = {} # Symbol table
for k, v in symbols.items():
if isinstance(v, types.FunctionType):
self.add_symbol(k, Symbol.Callable, v)
self.closure_vars = closure_vars
self.binds = {} # Thread binds
self.device = 0 # Is it generating device
self.func_name = func_name # The name of the function to be lowered
self.outputs = [] # Output tensors' name
self.side_effect = set() # Tensors with side effects
self.parsed_body = None # The parsed HalideIR body
self.analyzer = tvm.arith.Analyzer()
self.returned = False # If this function has a valid return
def add_symbol(self, key, ty, val): # pylint: disable=invalid-name
"""Add value to the symbol table context"""
if key in self.symbols.keys():
old = str(self.symbols[key])
new = str((ty, val))
_internal_assert(
False, "Name conflict in symbol table! [%s] %s -> %s" % (key, old, new)
)
self.symbols[key] = ty, val
if ty == Symbol.ThreadBind:
if val.var.name not in self.binds.keys():
self.binds[val.var.name] = val
return
val_ = self.binds[val.var.name]
_internal_assert(
tvm.tir.analysis.expr_deep_equal(val_.dom.extent, val.dom.extent),
"Thread extents should be uniform!",
)
self.symbols[key] = ty, val_
def wrap_up_realize(self, node, body):
"""Wrap up all the variables which will no longer be used"""
to_pop = []
for key, val in self.usage.items():
_, level, _ = val
if key not in self.symbols:
# don't realize the symbols that are never visited
continue
if level != node:
continue
_internal_assert(key in self.symbols.keys(), "Unknown symbol %s!" % key)
ty, entry = self.symbols[key] # pylint: disable=invalid-name
if ty in [Symbol.Input, Symbol.OutputBuffer]:
continue
if "Buffer" in ty.name:
_buf = entry
_scope = "global" if ty is Symbol.BufferVar else ty.name[:-6].lower()
to_pop.append(key)
else:
continue
if _scope == "global":
body = self.wrap_up_binds(body)
_domain = [Range.from_min_extent(0, i) for i in _buf.shape]
_dtype = _buf.dtype
_true = tvm.runtime.convert(True)
body = tvm.tir.ProducerRealize(_buf, _domain, _true, body, tvm.runtime.convert(_scope))
for elem in to_pop:
self.symbols.pop(elem)
return body
def wrap_up_binds(self, body):
for _, iter_var in self.binds.items():
ext = iter_var.dom.extent
body = tvm.tir.AttrStmt(iter_var, "thread_extent", ext, body)
self.binds = {}
return body
# pylint: disable=invalid-name, missing-docstring
def visit_Module(self, node):
_internal_assert(
len(node.body) == 1, "Only one-function source code will be fed to this parser!"
)
return self.visit(node.body[0])
def visit_FunctionDef(self, node):
_internal_assert(
len(node.args.args) == len(self.args),
"The number of arguments passed to the \
function should be the same as it is defined!",
)
if self.func_name is None:
self.func_name = node.name
for idx, arg in enumerate(node.args.args):
_attr = "id" if sys.version_info[0] < 3 else "arg" # To make py2 and 3 compatible
self.add_symbol(getattr(arg, _attr), Symbol.Input, self.args[idx])
res = visit_list_to_block(self.visit, node.body)
res = self.wrap_up_realize(node, res)
return self.wrap_up_binds(res)
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Name(self, node):
name = node.id
if sys.version_info[0] == 2 and name in ["True", "False"]:
return tvm.runtime.convert(ast.literal_eval(name))
if name in self.closure_vars:
return tvm.runtime.convert(self.closure_vars[name])
ty, entry = self.symbols[name]
_internal_assert(name in self.symbols, "Unknown symbol %s!" % name)
if ty in [Symbol.LoopVar, Symbol.Input, Symbol.ConstLoopVar]:
return entry
if ty is Symbol.ThreadBind:
return entry.var
if ty is Symbol.ConstVar:
return entry if isinstance(node.ctx, ast.Load) else None
if ty is Symbol.BufferVar:
if isinstance(node.ctx, ast.Load):
return tvm.tir.ProducerLoad(entry, [tvm.runtime.const(0, "int32")])
return entry, [tvm.runtime.const(0, "int32")]
# Do I need any assertion here?
return entry
def visit_Num(self, node):
if isinstance(node.n, numbers.Integral):
dtype = "int32"
elif isinstance(node.n, float):
dtype = "float32"
else:
_internal_assert(
isinstance(node.n, bool), "The data type should be one of (int, float, bool)"
)
dtype = "bool"
return tvm.runtime.const(node.n, dtype)
def visit_NameConstant(self, node):
return tvm.runtime.convert(node.value)
def visit_AugAssign(self, node):
buf = self.visit(node.target)
rhs = self.visit(node.value)
if isinstance(buf, tuple):
_internal_assert(len(buf) == 2, "LHS is supposed to be (buf, args)!")
buf, args = buf
else:
args = [tvm.runtime.const(0, "int32")]
_internal_assert(isinstance(buf, Tensor), "LHS is supposed to be Tensor!")
read = tvm.tir.ProducerLoad(buf, args)
value = HybridParser._binop_maker[type(node.op)](read, rhs)
return tvm.tir.ProducerStore(buf, value, args)
def visit_Assign(self, node):
rhs = self.visit(node.value)
if isinstance(rhs, Operation):
rmap = {}
_internal_assert(
len(node.targets) == rhs.num_outputs, "Unable to detuple the outs to targets"
)
for i in range(rhs.num_outputs):
_internal_assert(
isinstance(node.targets[i], ast.Name),
"You should bind a pure name to the tensors",
)
self.add_symbol(node.targets[i].id, Symbol.GlobalBuffer, rhs.output(i))
rmap[rhs.outputs[i].op] = rhs.output(i)
return utils.replace_io(rhs.body, rmap)
_internal_assert(len(node.targets) == 1, "So far only one-valued assignment is supported!")
lhs = node.targets[0]
if isinstance(rhs, _expr.PrimExpr):
rhs = self.analyzer.simplify(rhs)
if isinstance(lhs, ast.Name):
# TODO: support defined intermediate buffer later
lhs_ = lhs
lhs = lhs.id
if lhs in self.symbols.keys():
ty, _ = self.symbols[lhs]
_internal_assert(ty != Symbol.LoopVar, "Loop variable cannot be overwritten!")
decl, _, rw = self.usage[lhs]
if decl == lhs_:
_internal_assert(
lhs not in self.symbols.keys(),
"This value should not be defined before this point!",
)
if isinstance(rhs, tuple):
shape, dtype, scope = rhs
ph = tvm.te.placeholder(shape, dtype=dtype, name=lhs)
self.add_symbol(lhs, getattr(Symbol, scope.title() + "Buffer"), ph)
if scope == "output":
self.outputs.append(lhs)
return utils.make_nop()
if isinstance(rhs, utils.halide_imm_types) and ast.Store not in rw:
self.add_symbol(lhs, Symbol.ConstVar, rhs)
else:
_internal_assert(
self.device == 0,
"Single variable not supported in devices' side!\n"
+ "If you are using GPU, please allocate a 'local' spad "
+ "outside the bind body",
)
ph = tvm.te.placeholder((1,), dtype=rhs.dtype, name=lhs)
self.add_symbol(lhs, Symbol.BufferVar, ph)
lhs = self.visit(lhs_)
if lhs is not None:
buf, args = lhs
return tvm.tir.ProducerStore(buf, rhs, args)
return utils.make_nop()
lhs, args = self.visit(lhs)
_internal_assert(
isinstance(lhs, Tensor), "An array access's LHS is expected to be a expr.Call!"
)
res = tvm.tir.ProducerStore(lhs, rhs, args)
return res
def visit_Index(self, node):
if isinstance(node.value, ast.Tuple):
return self.visit(node.value)
return [self.visit(node.value)]
def visit_Attribute(self, node):
buf = self.visit(node.value)
return getattr(buf, node.attr)
def visit_Subscript(self, node):
args = self.visit(node.slice)
if sys.version_info >= (3, 9):
if not isinstance(node.slice, ast.Tuple):
args = [args]
arr = self.visit(node.value)
if isinstance(arr, Array):
for i in args:
if isinstance(i, numbers.Integral):
arr = arr[i]
else:
_internal_assert(
isinstance(i, (_expr.IntImm,)), "All indices are supposed to be constants"
)
arr = arr[i.value]
return arr
if isinstance(node.ctx, ast.Load):
return tvm.tir.ProducerLoad(arr, args)
return arr, args
def visit_With(self, node):
if sys.version_info[0] < 3:
context = node.context_expr
option = node.optional_vars
else:
_internal_assert(len(node.items) == 1, "Only one with element is supported so far!")
context = node.items[0].context_expr
option = node.items[0].optional_vars
_internal_assert(isinstance(context, ast.Call), "The object must be a Python func call!")
_internal_assert(isinstance(option, ast.Name), "The object after 'as' must be an id!")
self.annotation[option.id] = context.func.id
return visit_list_to_block(self.visit, node.body)
def visit_If(self, node):
cond = self.analyzer.simplify(self.visit(node.test))
# Return no IfThenElse if proven
if isinstance(cond, _expr.IntImm):
if cond.value:
return visit_list_to_block(self.visit, node.body)
if node.orelse:
return visit_list_to_block(self.visit, node.orelse)
return utils.make_nop()
if_body = visit_list_to_block(self.visit, node.body)
if node.orelse:
else_body = visit_list_to_block(self.visit, node.orelse)
else:
else_body = None
return tvm.tir.IfThenElse(cond, if_body, else_body)
def visit_IfExp(self, node):
cond = self.visit(node.test)
if_body = self.visit(node.body)
else_body = self.visit(node.orelse)
return tvm.tir.Select(cond, if_body, else_body)
def visit_Compare(self, node):
_internal_assert(len(node.ops) == len(node.comparators), "#compare ops != #comparators")
ops = [self.visit(node.left)]
ops += [self.visit(i) for i in node.comparators]
res = []
for i in range(len(node.ops)):
lhs = ops[i]
rhs = ops[i + 1]
res.append(HybridParser._binop_maker[type(node.ops[i])](lhs, rhs))
return _all(*res)
def visit_BoolOp(self, node):
n = len(node.values)
if n == 1:
_internal_assert(isinstance(node.op, ast.Not), "Unary is supposed to be not!")
return operator.not_(self.visit(node.values[0]))
_internal_assert(isinstance(node.op, (ast.And, ast.Or)), "Binary is supposed to be and/or!")
values = [self.visit(i) for i in node.values]
return HybridParser._binop_maker[type(node.op)](*values)
def visit_UnaryOp(self, node):
operand = self.visit(node.operand)
return HybridParser._unaryop_maker[type(node.op)](operand)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
return HybridParser._binop_maker[type(node.op)](lhs, rhs)
def visit_Call(self, node):
# Yet, no function pointer supported
_internal_assert(
isinstance(node.func, ast.Name), "Only id-function function call is supported so far!"
)
func_id = node.func.id
args = [self.visit(i) for i in node.args]
# Intrinsics'
if hasattr(calls, func_id):
return getattr(calls, func_id)(func_id, args)
# Contexts'
_internal_assert(
func_id in self.symbols.keys(),
"The function called (%s) is not in the context either!" % func_id,
)
ty, entry = self.symbols[func_id]
_internal_assert(ty is Symbol.Callable, "Are you sure what you call is a function?!")
outs = entry(*args)
op = outs.op if isinstance(outs, Tensor) else outs[0].op
return op
def visit_For(self, node):
iter_var, low, ext, kind = self.visit(node.iter)
_internal_assert(
isinstance(node.target, ast.Name), "The loop iterator should be a variable!"
)
_name = node.target.id
if isinstance(kind, tuple):
low = self.analyzer.simplify(low)
ext = self.analyzer.simplify(ext)
_internal_assert(
isinstance(low, _expr.ConstExpr) and isinstance(ext, _expr.ConstExpr),
"Const range should start from a const " + "and iterate const times",
)
low, ext = low.value, ext.value
if ext > 114514:
logging.log(
logging.CRITICAL, "[Warning] Are you sure to unroll a large loop in Python?"
)
bodies = []
for i in range(low, low + ext):
self.add_symbol(_name, Symbol.ConstLoopVar, i)
body = visit_list_to_block(self.visit, node.body)
body = self.wrap_up_realize(node, body)
bodies.append(body)
self.symbols.pop(_name)
return concat_list_to_block(bodies)
if iter_var is None:
_internal_assert(kind is not None, "The loop iterating function parse error!")
if isinstance(ext, _expr.PrimExpr):
dtype = ext.dtype
elif isinstance(ext, int):
dtype = "int32"
else:
raise NotImplementedError(f"Unsupported type of ext: {type(ext)}")
offset = iter_var = tvm.te.var(_name, dtype=dtype)
if not tvm.tir.analysis.expr_deep_equal(low, tvm.runtime.const(0, "int32")):
offset = iter_var + low
self.add_symbol(_name, Symbol.LoopVar, offset)
_body = visit_list_to_block(self.visit, node.body)
else:
_internal_assert(kind is None, "The loop bind function parse error!")
self.add_symbol(_name, Symbol.ThreadBind, iter_var)
self.device += 1
_body = visit_list_to_block(self.visit, node.body)
self.device -= 1
_body = self.wrap_up_realize(node, _body)
if kind is None:
res = _body
else:
_internal_assert(
not isinstance(kind, tuple), "Micro expansion should be handled before!"
)
res = tvm.tir.For(iter_var, tvm.runtime.const(0, "int32"), ext, kind, _body)
self.symbols.pop(_name)
return res
def visit_Return(self, node):
_internal_assert(
all(ty != Symbol.LoopVar for ty, _ in self.symbols.values()),
"Return should not be in a loop body!",
)
ids = []
if isinstance(node.value, ast.Name):
ids = [node.value.id]
else:
_internal_assert(
isinstance(node.value, ast.Tuple),
"You should return either a single tensor or a tuple",
)
_internal_assert(
all(isinstance(i, ast.Name) for i in node.value.elts), "What do you return?"
)
ids = [i.id for i in node.value.elts]
_internal_assert(len(set(ids)) == len(ids), "Duplicated tensors in the return tuples")
if len(ids) < len(self.outputs):
logging.log(logging.CRITICAL, "[Warning] Not all the output buffers returned!")
self.outputs = [self.symbols[i][1] for i in ids]
self.returned = True
return utils.make_nop()
def visit_Tuple(self, node):
return tuple(self.visit(i) for i in node.elts)
def visit_Str(self, node):
return node.s
def visit_Assert(self, node):
test = self.visit(node.test)
mesg = tvm.runtime.convert(self.visit(node.msg))
return tvm.tir.AssertStmt(test, mesg, utils.make_nop())
def parse_python(src, args, symbols, closure_vars):
"""The helper function of calling the AST visitor
Parameters
----------
src : ast.node or str
If an ast.node, then directly lower it.
If a str, then parse it to ast and lower it.
args : list of Tensors or Vars
The argument lists to the function.
It is NOT encouraged to write a function without arguments.
It is NOT encouraged to write a function with side effect.
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
root : Stmt
The result Halide IR and the parser class instance.
"""
root = ast.parse(src) if isinstance(src, str) else src
_internal_assert(root, ast.AST)
var_usage = determine_variable_usage(root, args, symbols, closure_vars)
parser = HybridParser(args, var_usage, symbols, closure_vars)
parser.parsed_body = parser.visit(root)
_internal_assert(parser.returned, "No valid return found in the function body!")
return parser
def source_to_op(src, args, symbols, closure_vars):
"""Another level of wrapper
Parameters
----------
src : ast.node or str
If an ast.node, then directly lower it.
If a str, then parse it to ast and lower it.
args : list of Tensors or Vars
The argument lists to the function.
It is NOT encouraged to write a function without arguments.
It is NOT encouraged to write a function with side effect.
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
res : list of output tensors
The result of output tensors of the formed OpNode.
"""
parser = parse_python(src, args, symbols, closure_vars)
input_tensors = []
def get_input_tensors(arg):
if isinstance(arg, Tensor):
input_tensors.append(arg)
elif isinstance(arg, Array):
for i in arg:
get_input_tensors(i)
for i in args:
get_input_tensors(i)
op = tvm.te._ffi_api.HybridOp(
parser.func_name, "HybridOp", None, input_tensors, parser.outputs, parser.parsed_body
)
res = [op.output(i) for i in range(len(parser.outputs))]
return res[0] if len(res) == 1 else res
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/preprocessor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Determines the declaration, r/w status, and last use of each variable"""
import ast
import sys
from .runtime import HYBRID_GLOBALS
from .utils import _internal_assert
class PyVariableUsage(ast.NodeVisitor):
"""The vistor class to determine the declaration, r/w status, and last use of each variable"""
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
def __init__(self, args, symbols, closure_vars):
self.status = {}
self.scope_level = []
self._args = {}
self.args = args
self.aug_assign_ = False
self.symbols = symbols
self.closure_vars = closure_vars
def visit_FunctionDef(self, node):
self.scope_level.append(node)
_internal_assert(
len(node.args.args) == len(self.args),
"#arguments passed should be the same as #arguments defined",
)
for idx, arg in enumerate(node.args.args):
_attr = "id" if sys.version_info[0] < 3 else "arg" # To make py2 and 3 compatible
self._args[getattr(arg, _attr)] = self.args[idx]
for i in node.body:
self.visit(i)
def visit_For(self, node):
_internal_assert(isinstance(node.target, ast.Name), "For's iterator should be an id")
self.visit(node.iter)
self.scope_level.append(node)
for i in node.body:
self.visit(i)
self.scope_level.pop()
def visit_Call(self, node):
# No function pointer supported so far
_internal_assert(isinstance(node.func, ast.Name), "Function call should be an id")
func_id = node.func.id
_internal_assert(
func_id
in list(HYBRID_GLOBALS.keys())
+ ["range", "max", "min", "len"]
+ list(self.symbols.keys()),
"Function call id " + func_id + " not in intrinsics' list",
)
for elem in node.args:
self.visit(elem)
def visit_AugAssign(self, node):
self.aug_assign_ = True
self.generic_visit(node)
self.aug_assign_ = False
def visit_Name(self, node):
# If it is True or False, we do not worry about it!
if sys.version_info[0] == 2 and node.id in ["True", "False"]:
return
# If it is from the argument list or loop variable, we do not worry about it!
if node.id in self._args.keys():
return
fors = [loop.target.id for loop in self.scope_level if isinstance(loop, ast.For)]
if node.id in fors:
return
# The loop variable cannot be overwritten when iteration
_internal_assert(
not isinstance(node.ctx, ast.Store) or node.id not in fors,
"Iter var cannot be overwritten",
)
if node.id not in self.status.keys():
# It is a captured value in closure
if node.id in self.closure_vars:
try:
ast.literal_eval(str(self.closure_vars[node.id]))
except ValueError:
raise ValueError("Only support capturing constant values in closure")
return
_internal_assert(isinstance(node.ctx, ast.Store), "Undeclared variable %s" % node.id)
if self.aug_assign_:
raise ValueError('"First store" cannot be an AugAssign')
self.status[node.id] = (node, self.scope_level[-1], set())
else:
decl, loop, usage = self.status[node.id]
usage.add(type(node.ctx))
_internal_assert(
loop in self.scope_level, "%s is used out of the scope it is defined!" % node.id
)
self.status[node.id] = (decl, loop, usage)
def determine_variable_usage(root, args, symbols, closure_vars):
"""The helper function for calling the dedicated visitor."""
visitor = PyVariableUsage(args, symbols, closure_vars)
visitor.visit(root)
return visitor.status
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Intrinsics of TVM-Python Hybrid Script for Python emulation runtime"""
import numpy
from tvm.target import Target
class bind(object): # pylint: disable=invalid-name
"""GPU bind software emulataion runtime."""
def __init__(self, _, ext):
self.ext = ext
def __iter__(self):
i = 0
while i < self.ext:
yield i
i += 1
def allocate(shape, dtype="float32", scope="global"): # pylint: disable=unused-argument
"""Allocate a buffer with given shape
Parameters
----------
shape: Tuple
The shape of the tensor to be allocated
dtype: string
The data type of the tensor
scope: string
The storage scope of the tensor
Returns
-------
tensor: numpy.array
The tensor allocated
"""
return numpy.zeros(shape).astype(dtype)
def rsqrt(x):
"""
Computes reciprocal of square root of x element-wise
Parameters
----------
x: Tensor
Returns
-------
res: Tensor
The result of reciprocal of square root of x
"""
return numpy.ones_like(x) / numpy.sqrt(x)
def popcount(x):
"""
Count ones in the binary representation of number x
Parameters
----------
x: Integer
The number to be counted
Returns
-------
cnt: Integer
The number of ones in the binary representation of number x
"""
cnt = 0
while x:
x -= x & -x
cnt += 1
return cnt
def sigmoid(x):
"""
Sigmoid function of x, aka 1/(1+exp(-x)).
Parameters
----------
x: a real number
Returns
-------
res: a real number
The result of sigmoid function
"""
return 1 / (1 + numpy.exp(-x))
def max_num_threads(allow_none=True):
"""Get max number of threads for GPU targets."""
return Target.current(allow_none).max_num_threads
def inf(dtype):
return numpy.iinfo(dtype).max
def ninf(dtype):
return numpy.iinfo(dtype).min
HYBRID_GLOBALS = {
"unroll": range,
"vectorize": range,
"parallel": range,
"const_range": range,
"bind": bind,
"allocate": allocate,
"output_tensor": allocate,
"sqrt": numpy.sqrt,
"rsqrt": rsqrt,
"log": numpy.log,
"tanh": numpy.tanh,
"power": numpy.power,
"exp": numpy.exp,
"sigmoid": sigmoid,
"popcount": popcount,
"round": round,
"likely": lambda cond: cond,
"uint8": numpy.uint8,
"uint16": numpy.uint16,
"uint32": numpy.uint32,
"uint64": numpy.uint64,
"int8": numpy.int8,
"int16": numpy.int16,
"int32": numpy.int32,
"int64": numpy.int64,
"float16": numpy.float16,
"float32": numpy.float32,
"float64": numpy.float64,
"ceil_div": lambda a, b: (a + b - 1) // b,
"max_num_threads": max_num_threads,
"inf": inf,
"ninf": inf,
}
def _enter_hybrid_runtime(func):
"""Put hybrid runtime variables into the global scope"""
_globals = func.__globals__
intersect = []
for elem in list(HYBRID_GLOBALS.keys()):
if elem in _globals.keys():
intersect.append((elem, _globals[elem]))
_globals[elem] = HYBRID_GLOBALS[elem]
return intersect
def _restore_runtime(func, intersect):
"""Rollback the modification caused by hybrid runtime"""
_globals = func.__globals__
for elem in list(HYBRID_GLOBALS.keys()):
_globals.pop(elem)
for k, v in intersect:
_globals[k] = v
| https://github.com/zk-ml/tachikoma |
python/tvm/te/hybrid/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal utilities for parsing Python subset to TIR"""
import ast
import inspect
import logging
import sys
import numpy
import tvm.runtime
from tvm._ffi.base import numeric_types
from tvm.ir.container import Array
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from tvm.te.tensor import Tensor
# pylint: disable=invalid-name
np_arg_types = tuple(list(numeric_types) + [numpy.ndarray])
tvm_arg_types = (Tensor, Array, _expr.Var, _expr.ConstExpr)
halide_imm_types = (_expr.IntImm, _expr.FloatImm)
def _internal_assert(cond, err):
"""Simplify the code segment like if not XXX then raise an error"""
if not cond:
raise ValueError(err)
# Useful constants. In avoid of runtime dependences, we use function calls to return them.
def make_nop():
"""Returns a 'no operation' node in HalideIR."""
return _stmt.Evaluate(tvm.runtime.const(0, dtype="int32"))
def is_docstring(node):
"""Checks if a Python AST node is a docstring"""
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def _pruned_source(func):
"""Prune source code's extra leading spaces"""
try:
lines = inspect.getsource(func).split("\n")
leading_space = len(lines[0]) - len(lines[0].lstrip(" "))
lines = [line[leading_space:] for line in lines]
return "\n".join(lines)
except IOError as err:
if sys.version_info[0] == 2 and str(err) == "could not get source code":
logging.log(
logging.CRITICAL,
"This module is not fully operated under Python2... " "Please move to Python3!",
)
raise err
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from tvm.tir import stmt_functor
def replace(op):
if isinstance(op, _stmt.ProducerStore) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _stmt.ProducerStore(buf, op.value, op.indices)
if isinstance(op, _expr.ProducerLoad) and op.producer.op in rmap.keys():
buf = rmap[op.producer.op]
return _expr.ProducerLoad(buf, op.indices)
return None
return stmt_functor.ir_transform(body, None, replace, ["tir.ProducerStore", "tir.ProducerLoad"])
def _is_tvm_arg_types(args):
"""Determine a list of element is either a list of tvm arguments of a list of numpy arguments.
If neither is true, raise a value error."""
if isinstance(args[0], tvm_arg_types):
for elem in args[1:]:
_internal_assert(
isinstance(elem, tvm_arg_types),
"Expecting a Var, Tensor or ConstExpr instance but %s get!" % str(type(elem)),
)
return True
_internal_assert(
isinstance(args[0], np_arg_types), "Expect a numpy type but %s get!" % str(type(args[0]))
)
for elem in args[1:]:
_internal_assert(
isinstance(elem, np_arg_types), "Expect a numpy type but %s get!" % str(type(elem))
)
return False
| https://github.com/zk-ml/tachikoma |
python/tvm/te/operation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Operation class for computation declaration."""
import inspect
# pylint: disable=invalid-name
from numbers import Integral as _Integral
from typing import List
import tvm._ffi
import tvm.arith._ffi_api
import tvm.tir
import tvm.tir._ffi_api
from tvm._ffi.base import string_types
from tvm.ir import Array
from tvm.runtime import convert
from . import _ffi_api
from . import tag as _tag
from . import tensor as _tensor
def placeholder(shape, dtype=None, name="placeholder"):
"""Construct an empty tensor object.
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
Returns
-------
tensor: Tensor
The created tensor
"""
shape = (shape,) if isinstance(shape, tvm.tir.PrimExpr) else shape
dtype = "float32" if dtype is None else dtype
return _ffi_api.Placeholder(shape, dtype, name)
def compute(shape, fcompute, name="compute", tag="", attrs=None, varargs_names=None):
"""Construct a new tensor by computing over the shape domain.
The compute rule is result[axis] = fcompute(axis)
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
fcompute: lambda function of indices-> value
Specifies the input source expression
name: str, optional
The name hint of the tensor
tag: str, optional
Additional tag information about the compute.
attrs: dict, optional
The additional auxiliary attributes about the compute.
varargs_names: list, optional
The names to use for each of the varargs. If not supplied, the varargs
will be called i1, i2, ...
Returns
-------
tensor: Tensor
The created tensor
"""
if _tag.TagScope.get_current() is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.get_current().tag
shape = (shape,) if isinstance(shape, tvm.tir.PrimExpr) else shape
# for python3
shape = tuple([int(s) if isinstance(s, float) else s for s in shape])
out_ndim = len(shape)
argspec = inspect.getfullargspec(fcompute)
if len(argspec.args) == 0 and argspec.varargs is None:
arg_names = ["i%d" % i for i in range(out_ndim)]
elif argspec.varargs is not None:
# if there is a varargs, it takes the remaining dimensions of out_ndim
num_remaining_args = out_ndim - len(argspec.args)
if varargs_names is not None:
if len(varargs_names) != num_remaining_args:
raise RuntimeError(
f"Number of varargs ({num_remaining_args}) does not match number"
f"of varargs_names ({len(varargs_names)})"
)
arg_names = argspec.args + varargs_names
else:
arg_names = argspec.args + [f"i{i}" for i in range(out_ndim - len(argspec.args))]
else:
arg_names = argspec.args
# if there are fewer args than out dimensions, the remaining dimensions
# are implicitly broadcast
out_ndim = len(arg_names)
assert argspec.varkw is None, "Variable keyword arguments not supported in fcompute"
assert argspec.defaults is None, "Default arguments not supported in fcompute"
assert len(argspec.kwonlyargs) == 0, "Keyword arguments are not supported in fcompute"
if out_ndim != len(arg_names):
raise ValueError(
"Number of args to fcompute does not match dimension, "
"args=%d, dimension=%d" % (len(arg_names), out_ndim)
)
dim_var = [tvm.tir.IterVar((0, s), x, 0) for x, s in zip(arg_names, shape[:out_ndim])]
body = fcompute(*[v.var for v in dim_var])
if isinstance(body, _tensor.TensorIntrinCall):
for i, s in enumerate(shape[out_ndim:]):
var_name = "ax" + str(i)
dim_var.append(tvm.tir.IterVar((0, s), var_name, 4))
op_node = _ffi_api.TensorComputeOp(
name,
tag,
dim_var,
body.reduce_axis,
out_ndim,
body.intrin,
body.tensors,
body.regions,
body.scalar_inputs,
)
else:
if not isinstance(body, (list, tuple)):
body = [body]
body = convert(body)
op_node = _ffi_api.ComputeOp(name, tag, attrs, dim_var, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def scan(init, update, state_placeholder, inputs=None, name="scan", tag="", attrs=None):
"""Construct new tensors by scanning over axis.
Parameters
----------
init: Tensor or list of Tensor
The initial condition of first init.shape[0] timestamps
update: Tensor or list of Tensor
The update rule of the scan given by symbolic tensor.
state_placeholder: Tensor or list of Tensor
The placeholder variables used by update.
inputs: Tensor or list of Tensor, optional
The list of inputs to the scan. This is not required, but can
be useful for the compiler to detect scan body faster.
name: str, optional
The name hint of the tensor
tag: str, optional
Additonal tag information about the compute.
attrs: dict, optional
The additional auxiliary attributes about the compute.
Returns
-------
tensor: Tensor or list of Tensors
The created tensor or tuple of tensors contains multiple outputs.
Example
-------
.. code-block:: python
# The following code is equivalent to numpy.cumsum
m = te.var("m")
n = te.var("n")
X = te.placeholder((m, n), name="X")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t-1, i] + X[t, i])
res = tvm.te.scan(s_init, s_update, s_state, X)
"""
if _tag.TagScope.get_current() is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.get_current().tag
if isinstance(init, _tensor.Tensor):
init = [init]
if isinstance(update, _tensor.Tensor):
update = [update]
if isinstance(state_placeholder, _tensor.Tensor):
state_placeholder = [state_placeholder]
if isinstance(inputs, _tensor.Tensor):
inputs = [inputs]
if inputs is None:
inputs = []
if len(init) != len(update) or len(init) != len(state_placeholder):
raise ValueError("init, update, state_placeholder must have same length")
axis = tvm.tir.IterVar((init[0].shape[0], update[0].shape[0]), "%s.idx" % name, 3)
op = _ffi_api.ScanOp(name, tag, attrs, axis, init, update, state_placeholder, inputs)
res = [op.output(i) for i in range(len(update))]
return res[0] if len(res) == 1 else res
def extern(
shape,
inputs,
fcompute,
name="extern",
dtype=None,
in_buffers=None,
out_buffers=None,
tag="",
attrs=None,
):
"""Compute several tensors via an extern function.
Parameters
----------
shape: tuple or list of tuples.
The shape of the outputs.
inputs: list of Tensor
The inputs
fcompute: lambda function of inputs, outputs-> stmt
Specifies the IR statement to do the computation.
See the following note for function signature of fcompute
.. note::
**Parameters**
- **ins** (list of :any:`tvm.tir.Buffer`) - Placeholder for each inputs
- **outs** (list of :any:`tvm.tir.Buffer`) - Placeholder for each outputs
**Returns**
- **stmt** (:any:`tvm.tir.Stmt`) - The statement that carries out array computation.
name: str, optional
The name hint of the tensor
dtype: str or list of str, optional
The data types of outputs,
by default dtype will be same as inputs.
in_buffers: tvm.tir.Buffer or list of tvm.tir.Buffer, optional
Input buffers.
out_buffers: tvm.tir.Buffer or list of tvm.tir.Buffer, optional
Output buffers.
tag: str, optional
Additonal tag information about the compute.
attrs: dict, optional
The additional auxiliary attributes about the compute.
Returns
-------
tensor: Tensor or list of Tensors
The created tensor or tuple of tensors contains multiple outputs.
Example
-------
In the code below, C is generated by calling external PackedFunc
`tvm.contrib.cblas.matmul`
.. code-block:: python
A = te.placeholder((n, l), name="A")
B = te.placeholder((l, m), name="B")
C = te.extern((n, m), [A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul",
ins[0], ins[1], outs[0], 0, 0), name="C")
"""
if _tag.TagScope.get_current() is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.get_current().tag
shape = (shape,) if isinstance(shape, (tvm.tir.PrimExpr, _Integral)) else shape
if shape == () or isinstance(shape[0], (tvm.tir.PrimExpr, _Integral)):
shape = [shape]
if in_buffers is not None:
in_buffers = [in_buffers] if not isinstance(in_buffers, list) else in_buffers
if len(inputs) != len(in_buffers):
raise RuntimeError(
"Number of inputs and in_buffers mismatch: %d vs %d."
% (len(inputs), len(in_buffers))
)
if out_buffers is not None:
out_buffers = [out_buffers] if not isinstance(out_buffers, list) else out_buffers
if len(shape) != len(out_buffers):
raise RuntimeError(
"Number of outputs and out_buffers mismatch: %d vs %d."
% (len(shape), len(out_buffers))
)
input_placeholders = in_buffers or []
output_placeholders = out_buffers or []
types = set()
for t in inputs:
if not isinstance(t, _tensor.Tensor):
raise ValueError("expect inputs to be tensor")
if in_buffers is None:
input_placeholders.append(
tvm.tir.decl_buffer(
t.shape, t.dtype, t.op.name, elem_offset=tvm.tir.Var("elem_offset", "int32")
)
)
types.add(t.dtype)
if dtype is None:
if len(types) != 1:
raise ValueError("Cannot infer output type, please provide dtype argument")
infered_type = types.pop()
dtype = [infered_type for _ in shape]
if isinstance(dtype, str):
dtype = [dtype]
if out_buffers is None:
for shp, dt in zip(shape, dtype):
output_placeholders.append(
tvm.tir.decl_buffer(shp, dt, name, elem_offset=tvm.tir.Var("elem_offset", "int32"))
)
body = fcompute(input_placeholders, output_placeholders)
if isinstance(body, tvm.tir.PrimExpr):
body = tvm.tir.Evaluate(body)
if not isinstance(body, tvm.tir.Stmt):
raise ValueError(
"Function '{}' should return PrimExpr or Stmt, but it returned '{}'".format(
fcompute.__name__, type(body)
)
)
op = _ffi_api.ExternOp(name, tag, attrs, inputs, input_placeholders, output_placeholders, body)
res = [op.output(i) for i in range(len(output_placeholders))]
return res[0] if len(res) == 1 else res
def extern_primfunc(input_tensors: List[_tensor.Tensor], primfunc: tvm.tir.PrimFunc, **kwargs):
"""Compute tensors via a schedulable TIR PrimFunc
Parameters
----------
input_tensors: list of Tensor
Input tensors that map to the corresponding primfunc input params.
primfunc: PrimFunc
The TIR PrimFunc
Returns
-------
tensor: Tensor or list of Tensors
The created tensor or tuple of tensors if it contains multiple outputs.
Example
-------
In the code below, a TVMScript defined TIR PrimFunc is inlined into
a TE ExternOp. Applying te.create_prim_func on this
.. code-block:: python
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
@T.prim_func
def before_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
C = te.extern_primfunc([A, B], func)
"""
access_map = {
k: tuple(v) for k, v in tvm.arith._ffi_api.DomainTouchedAccessMap(primfunc).items()
}
in_buffers = [buf for buf, access in access_map.items() if len(access[0])]
out_buffers = [buf for buf, access in access_map.items() if len(access[1])]
assert in_buffers, "PrimFunc has no input buffers"
assert out_buffers, "PrimFunc has no output buffers"
outputs = []
inplace = []
input_buffers = in_buffers
for obuf in out_buffers:
if obuf in in_buffers:
inplace.append(obuf)
else:
outputs.append(obuf)
if not outputs:
iobuf = inplace.pop()
input_buffers.remove(iobuf)
outputs = [iobuf]
assert len(input_buffers) == len(input_tensors), (
"The number of provided input input_tensors does not match the number of ",
"input buffers in the primfunc",
)
for tensor, buffer in zip(input_tensors, input_buffers):
# TODO(csullivan): Can a stronger comparison between Tensor<>Buffer be made?
assert len(tensor.shape) == len(buffer.shape)
for d1, d2 in zip(tensor.shape, buffer.shape):
assert d1 == d2, (
"The input input_tensors provided do not match the input buffers in the ",
"primfunc. Please check that the order of input te.Input_Tensors and the ",
"order of the primfunc variables in the params list agree.",
)
output = extern(
[buf.shape for buf in outputs],
input_tensors,
lambda ins, outs: primfunc.body,
in_buffers=input_buffers,
out_buffers=outputs,
**kwargs,
)
return output
def var(name="tindex", dtype="int32", span=None):
"""Create a new variable with specified name and dtype
Parameters
----------
name : str
The name
dtype : str
The data type
span : Optional[Span]
The location of this variable in the source.
Returns
-------
var : Var
The result symbolic variable.
"""
return tvm.tir.Var(name, dtype, span)
def const(dtype="int32", span=None):
"""Create a new constant with specified name and dtype
Parameters
----------
name : str
The name
dtype : str
The data type
span : Optional[Span]
The location of this variable in the source.
Returns
-------
var : Var
The result symbolic variable.
"""
return tvm.tir.const(dtype, span)
def size_var(name="size", dtype="int32", span=None):
"""Create a new variable represents a tensor shape size, which is non-negative.
Parameters
----------
name : str
The name
dtype : str
The data type
span : Optional[Span]
The location of this variable in the source.
Returns
-------
var : SizeVar
The result symbolic shape variable.
"""
return tvm.tir.SizeVar(name, dtype, span)
def thread_axis(dom=None, tag="", name="", span=None):
"""Create a new IterVar to represent thread index.
Parameters
----------
dom : Range or str
The domain of iteration
When str is passed, dom is set to None and str is used as tag
tag : str, optional
The thread tag
name : str, optional
The name of the var.
span : Optional[Span]
The location of this variable in the source.
Returns
-------
axis : IterVar
The thread itervar.
"""
if isinstance(dom, string_types):
tag, dom = dom, None
if not tag:
raise ValueError("tag must be given as Positional or keyword argument")
name = name if name else tag
return tvm.tir.IterVar(dom, name, 1, tag, span)
def reduce_axis(dom, name="rv", thread_tag="", span=None):
"""Create a new IterVar for reduction.
Parameters
----------
dom : Range
The domain of iteration.
name : str
The name of the variable.
thread_tag : Optional[str]
The name of the thread_tag.
span : Optional[Span]
The location of this variable in the source.
Returns
-------
axis : IterVar
An iteration variable representing the value.
"""
return tvm.tir.IterVar(dom, name, 2, thread_tag, span)
def create_prim_func(ops: List[_tensor.Tensor]) -> tvm.tir.PrimFunc:
"""Create a TensorIR PrimFunc from tensor expression
Parameters
----------
ops : List[Tensor]
The source expression.
Example
-------
We define a matmul kernel using following code:
.. code-block:: python
import tvm
from tvm import te
from tvm.te import create_prim_func
import tvm.script
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
k = te.reduce_axis((0, 128), "k")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
func = create_prim_func([A, B, C])
print(func.script())
If we want to use TensorIR schedule to do transformations on such kernel,
we need to use `create_prim_func([A, B, C])` to create a schedulable PrimFunc.
The generated function looks like:
.. code-block:: python
@T.prim_func
def tir_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grip(128, 128, 128):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] += A[vi, vk] * B[vj, vk]
Returns
-------
func : tir.PrimFunc
The created function.
"""
if not isinstance(ops, (list, tuple, Array)):
ops = [ops]
return _ffi_api.CreatePrimFunc(ops)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""The computation schedule api of TVM."""
import collections
import inspect
from typing import Callable, List
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.runtime import Object, convert
from tvm.ir import container as _container
from tvm.tir import IterVar, Buffer, Var, IndexMap
from . import tensor as _tensor
from . import _ffi_api
@tvm._ffi.register_object
class Split(Object):
"""Split operation on axis."""
@tvm._ffi.register_object
class Fuse(Object):
"""Fuse operation on axis."""
@tvm._ffi.register_object
class Singleton(Object):
"""Singleton axis."""
def create_schedule(ops):
"""Create a schedule for list of ops
Parameters
----------
ops : list of Operations
The source expression.
Returns
-------
sch : schedule.Schedule
The created schedule.
"""
if not isinstance(ops, (list, _container.Array)):
ops = [ops]
return _ffi_api.CreateSchedule(ops)
@tvm._ffi.register_object
class Schedule(Object):
"""Schedule for all the stages."""
def __getitem__(self, k):
if isinstance(k, _tensor.Tensor):
k = k.op
if not isinstance(k, _tensor.Operation):
raise ValueError("Expect schedule key to be Tensor or Operation")
if k not in self.stage_map:
raise ValueError("Cannot find the operation %s in schedule" % (str(k)))
return self.stage_map[k]
def normalize(self):
"""Build a normalized schedule from the current schedule.
Insert necessary rebase to make certain iter var to start from 0.
This is needed before bound inference and followup step.
Returns
-------
sch : Schedule
The normalized schedule.
"""
return _ffi_api.ScheduleNormalize(self)
def create_group(self, outputs, inputs, include_inputs=False):
"""Create stage group by giving output and input boundary.
The operators between outputs and inputs are placed as member of group.
outputs are include in the group, while inputs are not included.
Parameters
----------
outputs : list of Tensors
The outputs of the group.
inputs : list of Tensors
The inputs of the group.
include_inputs : boolean, optional
Whether include input operations in the group if they are used by outputs.
Returns
-------
group : Stage
A virtual stage represents the group, user can use compute_at to move
the attachment point of the group.
"""
if isinstance(outputs, _tensor.Tensor):
outputs = [outputs]
if isinstance(inputs, _tensor.Tensor):
inputs = [inputs]
return _ffi_api.ScheduleCreateGroup(self, outputs, inputs, include_inputs)
def cache_read(self, tensor, scope, readers):
"""Create a cache read of original tensor for readers.
This will mutate the body of the readers.
A new cache stage will be created for the tensor.
Call this before doing any split/fuse schedule.
Parameters
----------
tensor : Tensor
The tensor to be cached.
scope : str
The scope of cached
readers : list of Tensor or Operation
The readers to read the cache.
Returns
-------
cache : Tensor
The created cache tensor.
"""
if isinstance(readers, (_tensor.Tensor, _tensor.Operation)):
readers = [readers]
readers = [t.op if isinstance(t, _tensor.Tensor) else t for t in readers]
return _ffi_api.ScheduleCacheRead(self, tensor, scope, readers)
def cache_write(self, tensor, scope):
"""Create a cache write of original tensor, before storing into tensor.
This will mutate the body of the tensor.
A new cache stage will created before feed into the tensor.
This function can be used to support data layout transformation.
If there is a split/fuse/reorder on the data parallel axis of tensor
before cache_write is called. The intermediate cache stores
the data in the layout as the iteration order of leave axis.
The data will be transformed back to the original layout in the original tensor.
User can further call compute_inline to inline the original layout and keep
the data stored in the transformed layout.
Parameters
----------
tensor : Tensor, list or tuple
The tensors to be feed to. All the tensors must be produced by one computeOp
scope : str
The scope of cached
Returns
-------
cache : Tensor
The created cache tensor.
"""
return _ffi_api.ScheduleCacheWrite(self, tensor, scope)
def rfactor(self, tensor, axis, factor_axis=0):
"""Factor a reduction axis in tensor's schedule to be an explicit axis.
This will create a new stage that generated the new tensor with axis
as the first dimension. The tensor's body will be rewritten as a reduction
over the factored tensor.
Parameters
----------
tensor : Tensor
The tensor to be factored.
axis : IterVar
The reduction axis in the schedule to be factored.
factor_axis : int
The position where the new axis is placed.
Returns
-------
tfactor : Tensor or Array of Tensor
The created factored tensor.
"""
factored = _ffi_api.ScheduleRFactor(self, tensor, axis, factor_axis)
return factored[0] if len(factored) == 1 else factored
@tvm._ffi.register_object
class Stage(Object):
"""A Stage represents schedule for one operation."""
def split(self, parent, factor=None, nparts=None):
"""Split the stage either by factor providing outer scope, or both
Parameters
----------
parent : IterVar
The parent iter var.
factor : Expr, optional
The splitting factor
nparts : Expr, optional
The number of outer parts.
Returns
-------
outer : IterVar
The outer variable of iteration.
inner : IterVar
The inner variable of iteration.
"""
if nparts is not None:
if factor is not None:
raise ValueError("Do not need to provide both outer and nparts")
outer, inner = _ffi_api.StageSplitByNParts(self, parent, nparts)
else:
if factor is None:
raise ValueError("Either nparts or factor need to be provided")
outer, inner = _ffi_api.StageSplitByFactor(self, parent, factor)
return outer, inner
def fuse(self, *args):
"""Fuse multiple consecutive iteration variables into a single iteration variable.
fused = fuse(...fuse(fuse(args[0], args[1]), args[2]),..., args[-1])
The order is from outer to inner.
Parameters
----------
args : list of IterVars
Itervars that proceeds each other
Returns
-------
fused : IterVar
The fused variable of iteration.
"""
fused = _ffi_api.StageFuse(self, args)
return fused
def set_scope(self, scope):
"""Set the thread scope of this stage
Parameters
----------
scope : str
The thread scope of this stage
"""
return _ffi_api.StageSetScope(self, scope)
def bind(self, ivar, thread_ivar):
"""Bind ivar to thread index thread_ivar
Parameters
----------
ivar : IterVar
The iteration to be binded to thread.
thread_ivar : IterVar
The thread to be binded.
"""
_ffi_api.StageBind(self, ivar, thread_ivar)
def env_threads(self, threads):
"""Mark threads to be launched at the outer scope of composed op.
Parameters
----------
threads : list of threads
The threads to be launched.
"""
if isinstance(threads, IterVar):
threads = [threads]
_ffi_api.StageEnvThreads(self, threads)
def set_store_predicate(self, predicate):
"""Set predicate under which store to the array can be performed.
Use this when there are duplicated threads doing the same store and we only
need one of them to do the store.
Parameters
----------
predicate : Expr
The guard condition fo store.
"""
_ffi_api.StageSetStorePredicate(self, predicate)
def compute_at(self, parent, scope):
"""Attach the stage at parent's scope
Parameters
----------
parent : Stage
The parent stage
scope : IterVar
The loop scope t be attached to.
"""
_ffi_api.StageComputeAt(self, parent, scope)
def compute_inline(self):
"""Mark stage as inline
Parameters
----------
parent : Stage
The parent stage
"""
_ffi_api.StageComputeInline(self)
def compute_root(self):
"""Attach the stage at parent, and mark it as root
Parameters
----------
parent : Stage
The parent stage
"""
_ffi_api.StageComputeRoot(self)
def reorder(self, *args):
"""reorder the arguments in the specified order.
Parameters
----------
args : list of IterVar
The order to be ordered
"""
_ffi_api.StageReorder(self, args)
def tile(self, x_parent, y_parent, x_factor, y_factor):
"""Perform tiling on two dimensions
The final loop order from outmost to inner most are
[x_outer, y_outer, x_inner, y_inner]
Parameters
----------
x_parent : IterVar
The original x dimension
y_parent : IterVar
The original y dimension
x_factor : Expr
The stride factor on x axis
y_factor : Expr
The stride factor on y axis
Returns
-------
x_outer : IterVar
Outer axis of x dimension
y_outer : IterVar
Outer axis of y dimension
x_inner : IterVar
Inner axis of x dimension
p_y_inner : IterVar
Inner axis of y dimension
"""
x_outer, y_outer, x_inner, y_inner = _ffi_api.StageTile(
self, x_parent, y_parent, x_factor, y_factor
)
return x_outer, y_outer, x_inner, y_inner
def vectorize(self, var):
"""Vectorize the iteration.
Parameters
----------
var : IterVar
The iteration to be vectorize
"""
_ffi_api.StageVectorize(self, var)
def tensorize(self, var, tensor_intrin):
"""Tensorize the computation enclosed by var with tensor_intrin
Parameters
----------
var : IterVar
The iteration boundary of tensorization.
tensor_intrin : TensorIntrin
The tensor intrinsic used for computation.
"""
_ffi_api.StageTensorize(self, var, tensor_intrin)
def unroll(self, var):
"""Unroll the iteration.
Parameters
----------
var : IterVar
The iteration to be unrolled.
"""
_ffi_api.StageUnroll(self, var)
def parallel(self, var):
"""Parallelize the iteration.
Parameters
----------
var : IterVar
The iteration to be parallelized.
"""
_ffi_api.StageParallel(self, var)
def pragma(self, var, pragma_type, pragma_value=None):
"""Annotate the iteration with pragma
This will translate to a pragma_scope surrounding
the corresponding loop generated.
Useful to support experimental features and extensions.
Parameters
----------
var : IterVar
The iteration to be anotated
pragma_type : str
The pragma string to be annotated
pragma_value : Expr, optional
The pragma value to pass along the pragma
Note
----
Most pragmas are advanced/experimental features
and may subject to change. List of supported pragmas:
- **debug_skip_region**
Force skip the region marked by the axis and turn it into no-op.
This is useful for debug purposes.
- **parallel_launch_point**
Specify to launch parallel threads outside the
specified iteration loop. By default the threads
launch at the point of parallel construct.
This pragma moves the launching point to even outer scope.
The threads are launched once and reused across multiple
parallel constructs as BSP style program.
- **parallel_barrier_when_finish**
Insert a synchronization barrier between working threads
after the specified loop iteration finishes.
- **parallel_stride_pattern**
Hint parallel loop to execute in strided pattern.
:code:`for (int i = task_id; i < end; i += num_task)`
"""
if isinstance(pragma_value, string_types):
pragma_value = convert(pragma_value)
_ffi_api.StagePragma(self, var, pragma_type, pragma_value)
def prefetch(self, tensor, var, offset):
"""Prefetch the specified variable
Parameters
----------
tensor : Tensor
The tensor to be prefetched
var : IterVar
The loop point at which the prefetching is applied
offset : Expr
The number of iterations to be prefetched before actual execution
"""
_ffi_api.StagePrefetch(self, tensor, var, offset)
def storage_align(self, axis, factor, offset):
"""Set alignment requirement for specific axis
This ensures that stride[axis] == k * factor + offset for some k.
This is useful to set memory layout to for more friendly memory
access pattern. For example, we can set alignment to be
factor=2, offset=1 to avoid bank conflict for thread access on
higher dimension in GPU shared memory.
Parameters
----------
axis : IterVar
The axis dimension to be aligned.
factor : int
The factor in alignment specification.
offset : int
The offset in the alignment specification.
"""
_ffi_api.StageStorageAlign(self, axis, factor, offset)
def double_buffer(self):
"""Compute the current stage via double buffering.
This can only be applied to intermediate stage.
This will double the storage cost of the current stage.
Can be useful to hide load latency.
"""
_ffi_api.StageDoubleBuffer(self)
def rolling_buffer(self):
"""Compute the current stage via rolling buffering.
This can only be applied to intermediate stage.
This will change the storage cost of the current stage.
"""
_ffi_api.StageRollingBuffer(self)
def transform_layout(self, mapping_function: Callable[..., List[tvm.tir.PrimExpr]]):
"""Defines the layout transformation for the current stage's tensor.
The map from initial_indices to final_indices must be an
invertible affine transformation. This method may be called
more than once for a given tensor, in which case each
transformation is applied sequentially.
If the stage is a ComputeOp, then the iteration order of the
compute stage is rewritten to be a row-major traversal of the
tensor, and the new loop iteration variables are returned.
For all other stages, the loop iteration order is unmodified,
and the return value is None.
Parameters
----------
mapping_function : Callable[..., List[tvm.tir.PrimExpr]]
A callable that accepts N arguments of type tvm.tir.Var,
and outputs a list of PrimExpr. The input arguments
represent the location of a value in the current stage's
tensor, using the pre-transformation layout. The return
value of the function gives the location of that value in
the current stage's tensor, using the post-transformation
layout.
Returns
-------
new_iter_vars : Optional[List[tvm.tir.IterVar]]
If the stage is a ComputeOp, then the return will be the
updated loop iteration variables over the data array, in
the same order as the output values from the
`mapping_function`.
Otherwise, the return value is None.
Examples
--------
.. code-block:: python
# ``A`` is a tensor whose compute definition is in NHWC
# format, and should be transformed into NCHWc format.
s[A].transform_layout(
lambda n,h,w,c: [n, c//4, h, w, c%4]
)
.. code-block:: python
# ``A`` is a tensor whose compute definition is in an
# arbitrary format, and should be transformed such that
# the last index is split, with the slower-changing index
# of the split placed at the slowest changing dimension.
s[A].transform_layout(
lambda *indices, i: [i//4, *indices, i%4]
)
.. code-block:: python
# ``B`` is a tensor defined by te.compute to be a copy of
# ``A`, and should be transformed such that ``B``'s layout
# is a transpose of ``A``'s layout. The loop iteration
# that computes ``B`` will correspond to ``B``'s memory
# layout.
A = te.placeholder([n,m])
B = te.compute(A.shape, lambda i,j: A[i,j])
s = te.create_schedule(B.op)
s[B].transform_layout(lambda i,j: [j,i])
"""
ndim = len(self.op.output(0).shape)
index_map, axis_separators = IndexMap.from_func_with_separators(mapping_function, ndim=ndim)
new_iter_vars = _ffi_api.StageTransformLayout(
self, index_map.initial_indices, index_map.final_indices
)
_ffi_api.StageSetAxisSeparators(self, axis_separators)
return new_iter_vars or None
@tvm._ffi.register_object
class SpecializedCondition(Object):
"""Specialized condition to enable op specialization."""
def __init__(self, conditions):
"""Create a specialized condition.
.. note::
Conditions are represented in conjunctive joint form (CNF).
Each condition should be a simple expression, e.g., n > 16,
m % 8 == 0, etc., where n, m are tvm.Var that represents a
dimension in the tensor shape.
Parameters
----------
conditions : List of tvm.Expr
List of conditions in conjunctive joint form (CNF).
"""
if not isinstance(conditions, (list, _container.Array)):
conditions = [conditions]
self.__init_handle_by_constructor__(_ffi_api.CreateSpecializedCondition, conditions)
@staticmethod
def current():
"""Returns the current specialized condition"""
return _ffi_api.GetCurrentSpecialization()
def __enter__(self):
_ffi_api.EnterSpecializationScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.ExitSpecializationScope(self)
# Sentinel value used to indicate which groups of pre-flattening axes
# should be used to post-flattening axes axes. Moved from
# te.AXIS_SEPARATOR to tir.IndexMap.AXIS_SEPARATOR for general use,
# maintained here for backwards compatibility.
AXIS_SEPARATOR = IndexMap.AXIS_SEPARATOR
tvm._ffi._init_api("schedule", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tag class for TVM operators."""
import warnings
from tvm._ffi.base import decorate
class TagScope(object):
"""Tag scope object to set tag for operators, working as context
manager and decorator both. See also tag_scope.
"""
_current = None
@classmethod
def get_current(cls):
if cls._current:
cls._current.accessed = True
return cls._current
def __init__(self, tag):
self._old_scope = None
self.tag = tag
self.accessed = False
def __enter__(self):
if TagScope._current is not None:
raise ValueError("nested op_tag is not allowed for now")
self._old_scope = TagScope._current
TagScope._current = self
return self
def __exit__(self, ptype, value, trace):
assert self._old_scope is None
if not self.accessed:
warnings.warn("Tag '%s' declared via TagScope was not used." % (self.tag,))
TagScope._current = self._old_scope
def __call__(self, fdecl):
def tagged_fdecl(func, *args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate(fdecl, tagged_fdecl)
def tag_scope(tag):
"""The operator tag scope.
Parameters
----------
tag: str
The tag name.
Returns
-------
tag_scope: TagScope
The tag scope object, which can be used as decorator or
context manger.
Example
-------
.. code-block:: python
n = te.var('n')
m = te.var('m')
l = te.var('l')
A = te.placeholder((n, l), name='A')
B = te.placeholder((m, l), name='B')
k = te.reduce_axis((0, l), name='k')
with tvm.te.tag_scope(tag='matmul'):
C = te.compute((n, m), lambda i, j: te.sum(A[i, k] * B[j, k], axis=k))
# or use tag_scope as decorator
@tvm.te.tag_scope(tag="conv")
def compute_relu(data):
return te.compute(data.shape, lambda *i: tvm.tir.Select(data(*i) < 0, 0.0, data(*i)))
"""
return TagScope(tag)
| https://github.com/zk-ml/tachikoma |
python/tvm/te/tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor class for computation declaration."""
# pylint: disable=invalid-name
import tvm._ffi
from tvm.runtime import Object, ObjectGeneric, convert_to_object
from tvm.tir import expr as _expr, DataProducer
from . import _ffi_api
class TensorSlice(ObjectGeneric, _expr.ExprOp):
"""Auxiliary data structure for enable slicing syntax from tensor."""
def __init__(self, tensor, indices):
if not isinstance(indices, tuple):
indices = (indices,)
self.tensor = tensor
self.indices = indices
def __getitem__(self, indices):
if not isinstance(indices, tuple):
indices = (indices,)
return TensorSlice(self.tensor, self.indices + indices)
def asobject(self):
"""Convert slice to object."""
return self.tensor.__call__(*self.indices)
@property
def dtype(self):
"""Data content of the tensor."""
return self.tensor.dtype
@tvm._ffi.register_object
class TensorIntrinCall(Object):
"""Intermediate structure for calling a tensor intrinsic."""
@tvm._ffi.register_object
class Tensor(DataProducer, _expr.ExprOp):
"""Tensor object, to construct, see function.Tensor"""
def __call__(self, *indices):
ndim = self.ndim
if len(indices) != ndim:
raise ValueError(
"Need to provide %d index in tensor but %d was provided" % (ndim, len(indices))
)
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def __getitem__(self, indices):
return TensorSlice(self, indices)
def __hash__(self):
return _ffi_api.TensorHash(self)
def __eq__(self, other):
if not isinstance(other, Tensor):
if isinstance(other, _expr.ExprOp):
return _expr.EqualOp(self, other)
return False
if self.ndim == 0 and other.ndim == 0:
raise ValueError(
"Equal == comparison among rank-0 tensor is ambiguous, "
"use Tensor.equal for content expression equvalence, "
"use Tensor.same_as for exact reference comparison"
)
return _ffi_api.TensorEqual(self, other)
@property
def ndim(self):
"""Dimension of the tensor."""
return len(self.shape)
@property
def axis(self):
"""Axis of the tensor."""
return self.__getattr__("axis")
@property
def op(self):
"""The corressponding :py:class:`Operation`."""
return self.__getattr__("op")
@property
def value_index(self):
"""The output value index the tensor corresponds to."""
return self.__getattr__("value_index")
@property
def shape(self):
"""The output shape of the tensor."""
return self.__getattr__("shape")
@property
def name(self):
op = self.op
if op.num_outputs == 1:
return op.name
return "%s.v%d" % (op.name, self.value_index)
class Operation(Object):
"""Represent an operation that generates a tensor"""
def output(self, index):
"""Get the index-th output of the operation
Parameters
----------
index : int
The index size.
Returns
-------
out : Tensor
The i-th output.
"""
return _ffi_api.OpGetOutput(self, index)
@property
def num_outputs(self):
"""Number of outputs from this op."""
return _ffi_api.OpNumOutputs(self)
@property
def input_tensors(self):
"""List of input tensors to this op."""
return _ffi_api.OpInputTensors(self)
@tvm._ffi.register_object
class PlaceholderOp(Operation):
"""Placeholder operation."""
@tvm._ffi.register_object
class BaseComputeOp(Operation):
"""Compute operation."""
@property
def axis(self):
"""Represent the IterVar axis, defined when it is a ComputeOp"""
return self.__getattr__("axis")
@property
def reduce_axis(self):
"""Represent axis of reductions, only defined when it is a ComputeOp"""
return self.__getattr__("reduce_axis")
@tvm._ffi.register_object
class ComputeOp(BaseComputeOp):
"""Scalar operation."""
@tvm._ffi.register_object
class TensorComputeOp(BaseComputeOp):
"""Tensor operation."""
@tvm._ffi.register_object
class ScanOp(Operation):
"""Scan operation."""
@property
def scan_axis(self):
"""Represent the scan axis, only defined when it is a ScanOp"""
return self.__getattr__("scan_axis")
@tvm._ffi.register_object
class ExternOp(Operation):
"""External operation."""
@tvm._ffi.register_object
class HybridOp(Operation):
"""Hybrid operation."""
@property
def axis(self):
"""Represent the IterVar axis, also defined when it is a HybridOp"""
return self.__getattr__("axis")
| https://github.com/zk-ml/tachikoma |
python/tvm/te/tensor_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor intrinsics"""
import tvm._ffi
import tvm.tir
from tvm.runtime import Object, convert
from tvm.ir import Range
from .tensor import PlaceholderOp
from . import tensor as _tensor
from . import _ffi_api
def _get_region(tslice):
region = []
for idx in tslice.indices:
if isinstance(idx, slice):
assert idx.step is None
region.append(Range(idx.start, idx.stop))
else:
if isinstance(idx, tvm.tir.IterVar):
begin = idx.var
else:
begin = idx
region.append(Range.from_min_extent(begin, 1))
return region
@tvm._ffi.register_object
class TensorIntrin(Object):
"""Tensor intrinsic functions for certain computation.
See Also
--------
decl_tensor_intrin: Construct a TensorIntrin
"""
def __call__(self, *args, **kwargs):
tensors = [x.tensor for x in args if isinstance(x, _tensor.TensorSlice)]
scalar_inputs = [x for x in args if not isinstance(x, _tensor.TensorSlice)]
regions = [_get_region(x) for x in args if isinstance(x, _tensor.TensorSlice)]
reduce_axis = []
if "reduce_axis" in kwargs:
reduce_axis = kwargs["reduce_axis"]
if not isinstance(reduce_axis, (list, tuple)):
reduce_axis = [reduce_axis]
reduce_axis = convert(reduce_axis)
if scalar_inputs:
scalar_inputs = convert(scalar_inputs)
return _ffi_api.TensorIntrinCall(self, tensors, regions, reduce_axis, scalar_inputs)
def decl_tensor_intrin(
op, fcompute, name="tensor_intrin", binds=None, scalar_params=None, default_buffer_params=None
):
"""Declare a tensor intrinsic function.
Parameters
----------
op: Operation
The symbolic description of the intrinsic operation
fcompute: lambda function of inputs, outputs-> stmt
Specifies the IR statement to do the computation.
See the following note for function signature of fcompute
.. note::
**Parameters**
- **ins** (list of :any:`tvm.tir.Buffer`) - Placeholder for each inputs
- **outs** (list of :any:`tvm.tir.Buffer`) - Placeholder for each outputs
**Returns**
- **stmt** (:any:`tvm.tir.Stmt`, or tuple of three stmts)
- If a single stmt is returned, it represents the body
- If tuple of three stmts are returned they corresponds to body,
reduce_init, reduce_update
name: str, optional
The name of the intrinsic.
binds: dict of :any:`Tensor` to :any:`tvm.tir.Buffer`, optional
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
scalar_params: a list of variables used by op, whose values will be passed
as scalar_inputs when the tensor intrinsic is called.
default_buffer_params: Optional[dict]
Dictionary of buffer arguments to be passed when constructing a buffer.
Returns
-------
intrin: TensorIntrin
A TensorIntrin that can be used in tensorize schedule.
"""
if not isinstance(op, _tensor.Operation):
raise TypeError("expect Operation")
inputs = op.input_tensors
binds = binds if binds else {}
tensors = list(inputs)
for i in range(op.num_outputs):
tensors.append(op.output(i))
binds_list = []
for t in inputs:
if not isinstance(t.op, PlaceholderOp):
raise ValueError("Do not yet support composition op")
default_buffer_params = {} if default_buffer_params is None else default_buffer_params
for t in tensors:
buf = (
binds[t]
if t in binds
else tvm.tir.decl_buffer(t.shape, t.dtype, t.op.name, **default_buffer_params)
)
binds_list.append(buf)
if scalar_params:
body = fcompute(binds_list[: len(inputs)], binds_list[len(inputs) :], scalar_params)
else:
body = fcompute(binds_list[: len(inputs)], binds_list[len(inputs) :])
scalar_params = []
if isinstance(body, (tvm.tir.PrimExpr, tvm.tir.Stmt)):
body = [body]
body = [tvm.tir.Evaluate(x) if isinstance(x, tvm.tir.PrimExpr) else x for x in body]
if len(body) < 3:
body += [None] * (3 - len(body))
return _ffi_api.TensorIntrin(name, op, inputs, binds_list, scalar_params, *body)
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""Utility Python functions for TVM testing"""
from .utils import *
from ._ffi_api import nop, echo, device_test, run_check_signal, object_use_count
from ._ffi_api import test_wrap_callback, test_raise_error_callback, test_check_eq_callback
from ._ffi_api import ErrorTest, FrontendTestModule, identity_cpp
from .popen_pool import initializer, after_initializer, register_ffi, call_cpp_ffi
from .popen_pool import call_py_ffi, call_cpp_py_ffi, fast_summation, slow_summation
from .popen_pool import timeout_job
from . import auto_scheduler
from . import autotvm
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.testing"""
import tvm._ffi
tvm._ffi._init_api("testing", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common functions for AOT test cases"""
import sys
import datetime
import os
import pathlib
import re
import shutil
import subprocess
import tarfile
import logging
from typing import Any, NamedTuple, Union, Optional, List, Dict
import numpy as np
import tvm
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils, graph_executor
from tvm.relay.backend import Executor, Runtime
from tvm.relay.backend.utils import mangle_module_name
from tvm.micro import export_model_library_format
from tvm.micro.testing.utils import mlf_extract_workspace_size_bytes
_LOG = logging.getLogger(__name__)
NP_TYPE_TO_C = {
"int8": "int8_t",
"uint8": "uint8_t",
"int16": "int16_t",
"uint16": "uint16_t",
"int32": "int32_t",
"uint32": "uint32_t",
"float32": "float",
}
AOT_SUCCESS_TOKEN = "AOT_TEST_SUCCESS"
AOT_FAILURE_TOKEN = "AOT_TEST_FAILURE"
class AOTTestModel(NamedTuple):
"""Class to describe a model under test
Parameters
----------
module: tvm.IRModule
IRModule to generate AOT executor for
inputs: Dict[str, np.array]
Dict of input names to value arrays
outputs: List[np.array]
Dict of output names to value arrays
output_tolerance: Optional[Union[int, float]]
Allowed tolerance of the output
name: str
Name to use for this model
params: Optional[Dict[str, np.array]]
Dict of parameter names to value arrays
extra_memory_in_bytes: int
Extra memory to allocate after planned memory
"""
module: tvm.IRModule
inputs: Dict[str, np.array]
outputs: Dict[str, np.array]
output_tolerance: Optional[Union[int, float]] = None
name: str = "default"
params: Optional[Dict[str, np.array]] = None
extra_memory_in_bytes: int = 0
class AOTCompiledTestModel(NamedTuple):
"""A compiled AOTTestModel with associated module
Parameters
----------
model: AOTTestModel
Input model to be compiled
module: tvm.runtime.Module
The compiled Module for the associated AOTTestModel
"""
model: AOTTestModel
executor_factory: tvm.relay.backend.executor_factory.AOTExecutorFactoryModule
class AOTDataLinkage(NamedTuple):
"""A compiled AOTTestModel with associated module
Parameters
----------
section: str
Named section to place data into
alignment: int
Section alignment
"""
section: str
alignment: int
class AOTTestRunner(NamedTuple):
"""Class to describe a test runner for AOT code
Parameters
----------
makefile: str
Premade Makefile to use from the AOT test folder
prologue: str
Code to prepend to the main function
epilogue: str
Code to append to the main function
includes: List[str]
Additional includes required to run the AOT test runner
parameters: Dict[str, str]
Additional parameters to pass to the make command
pass_config: Dict[str, Any]
Additional pass configuration when building the model
"""
makefile: str = "default"
prologue: str = ""
epilogue: str = ""
includes: List[str] = []
parameters: Dict[str, str] = {}
pass_config: Dict[str, Any] = {}
def _subprocess_check_log_output(cmd, cwd, logfile):
"""
This method runs a process and logs the output to both a log file and stdout
"""
_LOG.info("Execute (%s): %s", cwd, cmd)
cmd_base = cmd[0] if isinstance(cmd, (list, tuple)) else cmd.split(" ", 1)[0]
proc = subprocess.Popen(
cmd,
cwd=cwd,
shell=True,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
stdout = ""
with open(logfile, "a") as f:
msg = (
"\n"
+ "-" * 80
+ f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}: Execute ({cwd}): {cmd}\n"
+ "-" * 80
)
f.write(msg)
stdout += msg + "\n"
while True:
data = proc.stdout.readline()
stdout += data
_LOG.debug("%s: %s", cmd_base, data.rstrip("\n"))
f.write(data)
# process is done if there is no data and the result is valid
if not data: # EOF
break
proc.wait()
if proc.returncode != 0:
raise RuntimeError(f"Subprocess failed: {cmd}\nstdout:\n{stdout}")
def _mangle_name(mod_name, name):
mod_name = mangle_module_name(mod_name)
return mod_name + "_" + name
# TODO: Move to linker script with list of symbols rather than coding into source
def _emit_data_linkage(output_file, data_linkage):
if data_linkage is not None:
output_file.write(
f'__attribute__((section("{data_linkage.section}"), '
f"aligned({data_linkage.alignment}))) "
)
def _emit_main_prologue(
main_file,
custom_prologue,
workspace_bytes,
data_linkage,
compiled_models,
interface_api,
use_stack_allocator=True,
):
if use_stack_allocator:
workspace_define = f"#define WORKSPACE_SIZE ({workspace_bytes}"
if interface_api == "c":
for compiled_model in compiled_models:
model = compiled_model.model
workspace_define += f" + TVMGEN_{model.name.upper()}_WORKSPACE_SIZE"
# Add TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES because of memory alignment.
workspace_define += " + TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)\n"
main_file.write(workspace_define)
_emit_data_linkage(main_file, data_linkage)
main_file.write("static uint8_t g_aot_memory[WORKSPACE_SIZE];\n")
main_file.write("tvm_workspace_t app_workspace;\n")
main_file.write(
"""\n
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return StackMemoryManager_Allocate(&app_workspace, num_bytes, out_ptr);
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return StackMemoryManager_Free(&app_workspace,ptr);
}
"""
)
else:
# An implementation is not needed for these if the stack allocator is not used
main_file.write(
"""\n
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
return kTvmErrorFunctionCallNotImplemented;
}
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
return kTvmErrorFunctionCallNotImplemented;
}
"""
)
main_file.write(
"""\n
void TVMPlatformAbort(tvm_crt_error_t code) { exit(-1); }
void TVMLogf(const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stdout, msg, args);
va_end(args);
}\n
TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) {}
int main(){\n
"""
)
main_file.write(custom_prologue)
def _emit_main_data(main_file, input_map, output_map, mod_name):
for key in input_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(
f'#include "{_mangle_name(mod_name,"input_data")}_{sanitized_tensor_name}.h"\n'
)
for key in output_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(
f'#include "{_mangle_name(mod_name,"expected_output_data")}_'
f'{sanitized_tensor_name}.h"\n'
f'#include "{_mangle_name(mod_name,"output_data")}_'
f'{sanitized_tensor_name}.h"\n'
)
def _emit_main_device_structs(main_file, devices, mod_name):
if devices:
main_file.write(
f"struct {_mangle_name(mod_name, 'devices')} {_mangle_name(mod_name, 'devices')} = {{"
)
for device in devices:
main_file.write(f"\t.{device} = {device},\n")
main_file.write("};\n")
def _emit_main_workspace_pool_structs(main_file, workspace_pool_names, mod_name):
if workspace_pool_names and len(workspace_pool_names) > 0:
main_file.write(
f"struct {_mangle_name(mod_name, 'workspace_pools')} "
f"{_mangle_name(mod_name, 'workspace_pools')} = {{"
)
for workspace_pool_name in workspace_pool_names.keys():
main_file.write(
f"\t.{workspace_pool_name} = {workspace_pool_names[workspace_pool_name]}"
f"{workspace_pool_name},\n"
)
main_file.write("};\n")
def _emit_main_data_structs(main_file, input_map, output_map, mod_name):
main_file.write(
f"struct {_mangle_name(mod_name, 'inputs')} {_mangle_name(mod_name, 'inputs')} = {{"
)
for key in input_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(
f"\t.{sanitized_tensor_name} = "
f"{_mangle_name(mod_name, 'input_data')}_{sanitized_tensor_name},\n"
)
main_file.write("};\n")
main_file.write(
f"struct {_mangle_name(mod_name, 'outputs')} {_mangle_name(mod_name, 'outputs')} = {{"
)
for key in output_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(
f"\t.{sanitized_tensor_name} = {_mangle_name(mod_name, 'output_data')}_"
f"{sanitized_tensor_name},\n"
)
main_file.write("};\n")
def _emit_main_data_setup(main_file, input_map, output_map, mod_name):
num_outputs = len(output_map)
num_inputs = len(input_map)
main_file.write(f'void* {_mangle_name(mod_name,"inputs")}[{num_inputs}] = {{ ')
for key in input_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(f'{_mangle_name(mod_name,"input_data")}_{sanitized_tensor_name}, ')
main_file.write("};\n")
main_file.write(f'void* {_mangle_name(mod_name,"outputs")}[{num_outputs}] = {{ ')
for key in output_map:
sanitized_tensor_name = re.sub(r"\W", "_", key)
main_file.write(f'{_mangle_name(mod_name, "output_data")}_{sanitized_tensor_name}, ')
main_file.write("};\n")
def _emit_main_c_interface_call(
main_file, devices, workspace_pool_names, mod_name, use_workspace_io
):
sub_strings = list()
sub_strings.append(f'{_mangle_name(mod_name,"run")}(')
if not use_workspace_io:
sub_strings.append(f'&{_mangle_name(mod_name,"inputs")}, ')
sub_strings.append(f'&{_mangle_name(mod_name,"outputs")}, ')
if workspace_pool_names:
sub_strings.append(f'&{_mangle_name(mod_name,"workspace_pools")}, ')
if devices:
sub_strings.append(f'&{_mangle_name(mod_name,"devices")}, ')
# Removing the last two characters that is a comma and a space
sub_strings[-1] = sub_strings[-1][:-2]
# Adding brackets and newline instead
sub_strings[-1] = sub_strings[-1] + ");\n"
main_file_string = "".join(sub_strings)
main_file.write(main_file_string)
def _emit_main_fake_packed_values(main_file):
main_file.write(
"""
static DLDevice fake_device = {kDLCPU, 0};
static int64_t fake_dims = 0;
static int64_t fake_shape = {0};
"""
)
def _emit_main_packed_call(main_file, input_map, output_list, mod_name):
tensors_name = _mangle_name(mod_name, "tensors")
values_name = _mangle_name(mod_name, "values")
typeids_name = _mangle_name(mod_name, "typeids")
def fake_tensor(source, source_index, packed_index):
main_file.write(
f"""
{tensors_name}[{packed_index}].device = fake_device;
{tensors_name}[{packed_index}].data = {source}[{source_index}];
{tensors_name}[{packed_index}].shape = &fake_shape;
{tensors_name}[{packed_index}].ndim = fake_dims;
{tensors_name}[{packed_index}].byte_offset = 0;
{tensors_name}[{packed_index}].strides = NULL;
{values_name}[{packed_index}].v_handle = &{tensors_name}[{packed_index}];
"""
)
num_outputs = len(output_list)
num_inputs = len(input_map)
num_tensors = num_inputs + num_outputs
main_file.write(
f"""
DLTensor {tensors_name}[{num_tensors}];
TVMValue {values_name}[{num_tensors}];
int32_t {typeids_name}[{num_tensors}];
"""
)
for i in range(0, num_inputs):
fake_tensor(_mangle_name(mod_name, "inputs"), i, i)
for i in range(0, num_outputs):
fake_tensor(_mangle_name(mod_name, "outputs"), i, i + num_inputs)
main_file.write(
f'{_mangle_name(mod_name, "run")}({values_name}, {typeids_name}, 0, NULL, 0, NULL);\n'
)
main_file.write("\n")
def _emit_main_compare(main_file, outputs, output_tolerance, mod_name, use_interface_c=False):
for key in outputs:
sanitized_tensor_name = re.sub(r"\W", "_", key)
expected_data_name = _mangle_name(mod_name, f"expected_output_data_{sanitized_tensor_name}")
is_float_dtype = outputs[key].dtype == "float32"
comparison_function = "abs"
tolerance = output_tolerance or 0
if is_float_dtype:
comparison_function = "fabs"
tolerance = output_tolerance or 0.001
data_length_var_name = (
_mangle_name(mod_name, f"output_data_{sanitized_tensor_name}") + "_len"
)
if use_interface_c:
c_type = NP_TYPE_TO_C[str(outputs[key].dtype)]
actual_data_name = f"(({c_type}*)" + _mangle_name(
mod_name, f"outputs.{sanitized_tensor_name})"
)
else:
actual_data_name = _mangle_name(mod_name, f"output_data_{sanitized_tensor_name}")
main_file.write(
f"for (int i = 0; i<{data_length_var_name}; i++) {{\n"
f"\tif ({comparison_function}({actual_data_name}[i]-"
f"{expected_data_name}[i]) > {tolerance}) {{\n"
f'\t\tprintf("{AOT_FAILURE_TOKEN}\\n");\n'
f"\t\treturn -1;\n"
f"\t}}\n"
f"}}"
)
def _emit_main_init_memory_manager(main_file):
main_file.write("StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE);")
main_file.write("\n")
def _emit_main_epilogue(main_file, custom_epilogue):
main_file.write(custom_epilogue)
main_file.write(f'printf("{AOT_SUCCESS_TOKEN}\\n");')
main_file.write("return 0;")
main_file.write("}\n")
def _emit_main_common_includes(main_file, custom_includes):
main_file.write("#include <stdio.h>\n")
main_file.write("#include <stdarg.h>\n")
main_file.write("#include <stdlib.h>\n")
main_file.write("#include <math.h>\n")
main_file.write('#include "tvm/runtime/c_runtime_api.h"\n')
main_file.write('#include "tvm/runtime/crt/stack_allocator.h"\n')
for include in custom_includes:
main_file.write(f'#include "{include}"\n')
def _emit_main_micro_include(main_file, mod_name):
main_file.write(f"#include <{mangle_module_name(mod_name)}.h>\n")
def _create_main(
test_name,
compiled_models,
output_path,
custom_includes,
custom_prologue,
custom_epilogue,
data_linkage,
interface_api,
workspace_bytes,
use_stack_allocator=True,
use_workspace_io=False,
):
file_path = pathlib.Path(f"{output_path}/" + test_name).resolve()
# create header file
raw_path = file_path.with_suffix(".c").resolve()
with open(raw_path, "w") as main_file:
_emit_main_common_includes(main_file, custom_includes)
if interface_api == "c":
for compiled_model in compiled_models:
model = compiled_model.model
_emit_main_micro_include(main_file, model.name)
for compiled_model in compiled_models:
model = compiled_model.model
_emit_main_data(main_file, model.inputs, model.outputs, model.name)
_emit_main_prologue(
main_file,
custom_prologue,
workspace_bytes,
data_linkage,
compiled_models,
interface_api,
use_stack_allocator,
)
if use_stack_allocator:
_emit_main_init_memory_manager(main_file)
if interface_api == "c":
for compiled_model in compiled_models:
model = compiled_model.model
executor_codegen_metadata = (
compiled_model.executor_factory.executor_codegen_metadata
)
devices = compiled_model.executor_factory.get_devices()
workspace_pool_names = {}
if executor_codegen_metadata.pool_inputs:
workspace_pool_names = {
allocated_pool.pool_info.pool_name: "&"
if isinstance(
allocated_pool.pool_info, tvm.ir.memory_pools.ConstantPoolInfo
)
else ""
for allocated_pool in dict(executor_codegen_metadata.pool_inputs).values()
if not allocated_pool.pool_info.is_internal
}
_emit_main_device_structs(main_file, devices, model.name)
if not use_workspace_io:
_emit_main_workspace_pool_structs(main_file, workspace_pool_names, model.name)
_emit_main_data_structs(main_file, model.inputs, model.outputs, model.name)
_emit_main_c_interface_call(
main_file,
devices,
list(workspace_pool_names.keys()),
model.name,
use_workspace_io,
)
else:
_emit_main_fake_packed_values(main_file)
for compiled_model in compiled_models:
model = compiled_model.model
_emit_main_data_setup(main_file, model.inputs, model.outputs, model.name)
_emit_main_packed_call(main_file, model.inputs, model.outputs, model.name)
for compiled_model in compiled_models:
model = compiled_model.model
_emit_main_compare(
main_file, model.outputs, model.output_tolerance, model.name, interface_api == "c"
)
_emit_main_epilogue(main_file, custom_epilogue)
def _create_header_file(tensor_name, npy_data, output_path, data_linkage):
"""
This method generates a header file containing the data contained in the numpy array provided.
It is used to capture the tensor data (for both inputs and expected outputs)
to be bundled into the standalone application.
"""
file_path = pathlib.Path(f"{output_path}/" + tensor_name).resolve()
# create header file
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write("#include <stddef.h>\n")
header_file.write("#include <stdint.h>\n")
header_file.write("#include <dlpack/dlpack.h>\n")
header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n")
_emit_data_linkage(header_file, data_linkage)
header_file.write(f"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =")
header_file.write("{")
for i in np.ndindex(npy_data.shape):
header_file.write(f"{npy_data[i]}, ")
header_file.write("};\n\n")
def convert_to_relay(tflite_model_buf, bind_params_by_name=True):
"""Convert a tflite model buffer in a Relay module"""
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model # pylint: disable=import-outside-toplevel
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite # pylint: disable=import-outside-toplevel
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
mod, params = relay.frontend.from_tflite(tflite_model)
if bind_params_by_name:
mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params)
return mod, params
def compile_models(
models: Union[List[AOTTestModel], AOTTestModel],
interface_api: str,
use_unpacked_api: bool,
workspace_byte_alignment: int = 8,
constant_byte_alignment: int = 8,
enable_op_fusion: bool = True,
pass_config: Dict[str, Any] = None,
use_runtime_executor: bool = True,
target: tvm.target.Target = tvm.target.Target("c"),
workspace_memory_pools=None,
constant_memory_pools=None,
schedule_name: str = None,
) -> List[AOTCompiledTestModel]:
"""
This method generates runtime.Modules for the tests
"""
if not isinstance(models, list):
models = [models]
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
"constant-byte-alignment": constant_byte_alignment,
"interface-api": interface_api,
"unpacked-api": use_unpacked_api,
},
)
config = {"tir.disable_vectorize": True}
if pass_config:
config = {**config, **pass_config}
if not enable_op_fusion:
config["relay.FuseOps.max_depth"] = 1
compiled_mods = list()
for model in models:
if schedule_name:
# Testing with deterministic schedule
task_list = autotvm.task.extract_from_program(
model.module, target=target, params=model.params
)
with tvm.autotvm.apply_fixed_config(task_list, schedule_name):
with tvm.transform.PassContext(opt_level=3, config=config):
if use_runtime_executor:
executor_factory = tvm.relay.build(
model.module,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
constant_memory_pools=constant_memory_pools,
params=model.params,
mod_name=model.name,
)
compiled_mods.append(
AOTCompiledTestModel(model=model, executor_factory=executor_factory)
)
else:
executor_factory = tvm.relay.build(
model.module,
tvm.target.Target(target, host=target),
params=model.params,
mod_name=model.name,
)
compiled_mods.append(
AOTCompiledTestModel(model=model, executor_factory=executor_factory)
)
else:
with tvm.transform.PassContext(opt_level=3, config=config):
# TODO(Mousius) - Remove once executor/runtime are fully removed from Target
if use_runtime_executor:
executor_factory = tvm.relay.build(
model.module,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
constant_memory_pools=constant_memory_pools,
params=model.params,
mod_name=model.name,
)
compiled_mods.append(
AOTCompiledTestModel(model=model, executor_factory=executor_factory)
)
else:
executor_factory = tvm.relay.build(
model.module,
tvm.target.Target(target, host=target),
params=model.params,
mod_name=model.name,
)
compiled_mods.append(
AOTCompiledTestModel(model=model, executor_factory=executor_factory)
)
return compiled_mods
def run_and_check(
models: List[AOTCompiledTestModel],
runner: AOTTestRunner,
interface_api: str,
debug_calculated_workspaces=False,
workspace_byte_alignment=8,
constant_byte_alignment=8,
data_linkage: AOTDataLinkage = None,
test_dir: str = None,
verbose: bool = False,
use_workspace_io: bool = False,
):
"""
This method uses the original test data and compiled runtime.Modules
to run in the test runner to verify the results.
"""
def run_and_check_body(base_path):
cflags = (
f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} "
f" -DTVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES={constant_byte_alignment} "
)
# The calculated workspaces will not account for stack allocator tags used for debugging
if debug_calculated_workspaces:
cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK "
base_path = os.path.abspath(base_path)
build_path = os.path.join(base_path, "build")
os.makedirs(build_path, exist_ok=True)
include_path = os.path.join(base_path, "include")
os.mkdir(include_path)
crt_root = tvm.micro.get_standalone_crt_dir()
shutil.copy2(
os.path.join(crt_root, "template", "crt_config-template.h"),
os.path.join(include_path, "crt_config.h"),
)
workspace_bytes = 0
for compiled_model in models:
model = compiled_model.model
tar_file = os.path.join(base_path, f"{model.name}.tar")
export_model_library_format(compiled_model.executor_factory, tar_file)
t = tarfile.open(tar_file)
t.extractall(base_path)
# Interface C APIs does not need compiler generated
# workspace to generate the test application, because
# workspace size is codegen'd as a macro to
# tvmgen_<model_name>.h.
if interface_api != "c":
workspace_bytes += mlf_extract_workspace_size_bytes(tar_file)
workspace_bytes += model.extra_memory_in_bytes
for key in model.inputs:
sanitized_tensor_name = re.sub(r"\W", "_", key)
_create_header_file(
f'{_mangle_name(model.name, "input_data")}_{sanitized_tensor_name}',
model.inputs[key],
include_path,
data_linkage,
)
for key in model.outputs:
sanitized_tensor_name = re.sub(r"\W", "_", key)
_create_header_file(
f'{_mangle_name(model.name, "output_data")}_{sanitized_tensor_name}',
np.zeros(model.outputs[key].shape, model.outputs[key].dtype),
include_path,
data_linkage,
)
_create_header_file(
f'{_mangle_name(model.name, "expected_output_data")}_{sanitized_tensor_name}',
model.outputs[key],
include_path,
data_linkage,
)
use_usmp = runner.pass_config.get("tir.usmp.enable", False)
# We only need the stack allocator if USMP is not used
use_stack_allocator = not use_usmp
_create_main(
"test.c",
models,
build_path,
runner.includes,
runner.prologue,
runner.epilogue,
data_linkage,
interface_api,
workspace_bytes,
use_stack_allocator,
use_workspace_io,
)
# Verify that compiles fine
file_dir = os.path.dirname(os.path.abspath(__file__))
makefile_dir = os.path.join(file_dir, "../../../tests/python/relay/aot")
codegen_path = os.path.join(base_path, "codegen")
makefile = os.path.join(makefile_dir, f"{runner.makefile}.mk")
fvp_dir = "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/"
# TODO(@grant-arm): Remove once ci_cpu docker image has been updated to FVP_Corstone_SSE
if not os.path.isdir(fvp_dir):
fvp_dir = "/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/"
custom_params = " ".join(
[f" {param}='{value}'" for param, value in runner.parameters.items()]
)
make_command = (
f"make -f {makefile} build_dir={build_path}"
+ f" CFLAGS='{cflags}'"
+ f" TVM_ROOT={file_dir}/../../.."
+ f" AOT_TEST_ROOT={makefile_dir}"
+ f" CODEGEN_ROOT={codegen_path}"
+ f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}"
+ f" FVP_DIR={fvp_dir}"
+ custom_params
)
compile_log_path = os.path.join(build_path, "test_compile.log")
compile_command = f"{make_command} aot_test_runner"
if verbose:
print("Compile command:\n", compile_command)
_subprocess_check_log_output(compile_command, ".", compile_log_path)
# Verify that runs fine
run_log_path = os.path.join(build_path, "test_run.log")
run_command = f"{make_command} run"
if verbose:
print("Run command:\n", run_command)
# TODO(lhutton1) This is a quick and dirty work around to help temporarily reduce
# the flakyness of the tests. Will remove once #10300 and #10314 are resolved.
try:
_subprocess_check_log_output(run_command, build_path, run_log_path)
except RuntimeError as err:
print("Failed to run the module, having a second attempt...", file=sys.stderr)
print(err, file=sys.stderr)
_subprocess_check_log_output(run_command, build_path, run_log_path)
with open(run_log_path) as run_log:
assert AOT_SUCCESS_TOKEN in run_log.read()
if test_dir is None:
tmpdir = utils.tempdir()
run_and_check_body(os.path.join(tmpdir.path, "test"))
else:
run_and_check_body(test_dir)
def compile_and_run(
models: Union[List[AOTTestModel], AOTTestModel],
runner: AOTTestRunner,
interface_api: str,
use_unpacked_api: bool,
debug_calculated_workspaces: bool = False,
workspace_byte_alignment: int = 8,
constant_byte_alignment: int = 8,
enable_op_fusion: bool = True,
data_linkage: AOTDataLinkage = None,
use_runtime_executor: bool = True,
target: Union[str, tvm.target.Target, List[tvm.target.Target]] = "c",
target_opts: Dict = None,
test_dir: str = None,
verbose: bool = False,
schedule_name: str = None,
):
"""This is a wrapper API to compile and run models as test for AoT
Parameters
----------
test_dir : str
This path will contain build, codegen, include directories
verbose: bool
Prints commands to build and run AOT test runner
"""
if target_opts:
for key, val in target_opts.items():
target += f" {key}={val}"
if isinstance(target, str):
target = tvm.target.Target(target)
compiled_test_mods = compile_models(
models=models,
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=workspace_byte_alignment,
constant_byte_alignment=constant_byte_alignment,
enable_op_fusion=enable_op_fusion,
pass_config=runner.pass_config,
use_runtime_executor=use_runtime_executor,
target=target,
schedule_name=schedule_name,
)
run_and_check(
models=compiled_test_mods,
runner=runner,
interface_api=interface_api,
debug_calculated_workspaces=debug_calculated_workspaces,
workspace_byte_alignment=workspace_byte_alignment,
constant_byte_alignment=constant_byte_alignment,
data_linkage=data_linkage,
test_dir=test_dir,
verbose=verbose,
)
def generate_ref_data(mod, input_data, params=None, target="llvm"):
"""Generate reference data through executing the relay module"""
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = relay.build(mod, target=target, params=params)
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
lib.export_library(lib_path)
lib = tvm.runtime.load_module(lib_path)
grt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu()))
grt_mod.set_input(**input_data)
grt_mod.run()
output_count = grt_mod.get_num_outputs()
out = [grt_mod.get_output(i).numpy() for i in range(output_count)]
if isinstance(mod, tvm.relay.Function):
main = mod
else:
main = mod["main"]
if main.attrs is None or main.attrs["output_tensor_names"] is None:
output_tensor_names = (
["output"] if output_count == 1 else [f"output{i}" for i in range(output_count)]
)
else:
output_tensor_names = main.attrs["output_tensor_names"]
return dict(zip(output_tensor_names, out))
def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True):
"""A helper function to create a Relay IRModule with inputs
and params from a tflite file"""
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name)
inputs = dict()
for param in mod["main"].params:
name = str(param.name_hint)
data_shape = [int(i) for i in param.type_annotation.shape]
dtype = str(param.type_annotation.dtype)
if np.issubdtype(dtype, np.floating):
# Since np.random.uniform only allows the ranges of float32,
# at first float16 is used and scaled afterwards, if necessary.
in_min, in_max = (np.finfo("float16").min, np.finfo("float16").max)
data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype)
scale = np.finfo(dtype).min / np.finfo("float16").min
data *= scale
elif np.issubdtype(dtype, np.integer):
in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max)
data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype)
else:
raise TypeError(f"Type {dtype} not supported")
inputs[name] = data
return mod, inputs, params
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/auto_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-function-docstring
"""Common functions for auto_scheduler test cases"""
import tvm
from tvm import auto_scheduler, te, topi
from tvm.topi.nn.winograd_util import winograd_transform_matrices
from tvm.topi.utils import get_const_tuple
@auto_scheduler.register_workload
def matmul_auto_scheduler_test(N, M, K):
A = te.placeholder((N, K), name="A")
B = te.placeholder((K, M), name="B")
k = te.reduce_axis((0, K), name="k")
C = te.compute(
(N, M),
lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]),
name="C",
attrs={"layout_free_placeholders": [B]},
)
return [A, B, C]
@auto_scheduler.register_workload
def double_matmul_auto_scheduler_test(N):
A = te.placeholder((N, N), name="A", dtype="float32")
B = te.placeholder((N, N), name="B", dtype="float32")
C = te.placeholder((N, N), name="C", dtype="float32")
k = te.reduce_axis((0, N), name="k")
D = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="D")
k = te.reduce_axis((0, N), name="k")
E = te.compute((N, N), lambda i, j: te.sum(D[i][k] * C[k][j], axis=[k]), name="E")
return [A, B, C, E]
@auto_scheduler.register_workload
def parallel_matmul_auto_scheduler_test(N):
"""Two parallel matmuls with shared A."""
A = te.placeholder((N, N), name="A", dtype="float32")
B = te.placeholder((N, N), name="B", dtype="float32")
C = te.placeholder((N, N), name="C", dtype="float32")
k = te.reduce_axis((0, N), name="k")
D = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="D")
k = te.reduce_axis((0, N), name="k")
E = te.compute((N, N), lambda i, j: te.sum(A[i][k] * C[k][j], axis=[k]), name="E")
return [A, B, C, D, E]
# Test for register_workload with different name
@auto_scheduler.register_workload("matmul_auto_scheduler_test_rename_1")
def matmul_auto_scheduler_test_rename_0(N, M, K):
A = te.placeholder((N, K), name="A")
B = te.placeholder((K, M), name="B")
k = te.reduce_axis((0, K), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
return [A, B, C]
@auto_scheduler.register_workload
def conv2d_nchw_bn_relu_auto_scheduler_test(
N, H, W, CI, CO, kernel_size, strides, padding, dilation=1
):
data = te.placeholder((N, CI, H, W), name="Data")
kernel = te.placeholder((CO, CI, kernel_size, kernel_size), name="Kernel")
bias = te.placeholder((CO, 1, 1), name="Bias")
bn_scale = te.placeholder((CO, 1, 1), name="Bn_scale")
bn_offset = te.placeholder((CO, 1, 1), name="Bn_offset")
OH = (H + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
OW = (W + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
conv = topi.nn.conv2d_nchw(data, kernel, strides, padding, dilation)
conv = te.compute(
(N, CO, OH, OW), lambda i, j, k, l: conv[i, j, k, l] + bias[j, 0, 0], name="Bias_add"
)
conv = te.compute(
(N, CO, OH, OW), lambda i, j, k, l: conv[i, j, k, l] * bn_scale[j, 0, 0], name="Bn_mul"
)
conv = te.compute(
(N, CO, OH, OW), lambda i, j, k, l: conv[i, j, k, l] + bn_offset[j, 0, 0], name="Bn_add"
)
out = topi.nn.relu(conv)
return [data, kernel, bias, bn_offset, bn_scale, out]
@auto_scheduler.register_workload
def max_pool2d_auto_scheduler_test(N, H, W, CI, padding):
data = te.placeholder((N, CI, H, W), name="Data")
out = topi.nn.pool2d(data, [2, 2], [1, 1], [1, 1], [padding, padding, padding, padding], "max")
return [data, out]
@auto_scheduler.register_workload
def min_nm_auto_scheduler_test(N, M):
A = te.placeholder((N, M), name="A")
B = topi.min(A, axis=-1)
return [A, B]
@auto_scheduler.register_workload
def softmax_nm_auto_scheduler_test(N, M):
A = te.placeholder((N, M), name="A")
B = topi.nn.softmax(A, axis=1)
return [A, B]
@auto_scheduler.register_workload
def softmax_abcd_auto_scheduler_test(a, b, c, d):
A = te.placeholder((a, b, c, d), name="A")
B = topi.nn.softmax(A, axis=-1)
return [A, B]
@auto_scheduler.register_workload
def invalid_compute_definition():
A = te.placeholder((10, 10), name="A")
# The names of the following two iterators are the same.
# This is invalid.
r1 = te.reduce_axis((0, 2), name="r1")
r2 = te.reduce_axis((0, 2), name="r1")
B = te.compute((10,), lambda i: te.sum(A[i][r1 + r2], axis=[r1, r2]), name="B")
return [A, B]
@auto_scheduler.register_workload
def zero_rank_reduce_auto_scheduler_test(N):
A = tvm.te.placeholder((N,), name="A")
k = tvm.te.reduce_axis((0, N), name="k")
B = tvm.te.compute((), lambda: tvm.te.sum(A[k], k), name="B")
return [A, B]
@auto_scheduler.register_workload
def zero_rank_compute_auto_scheduler_test(N):
A = tvm.te.placeholder((N,), name="A")
B = tvm.te.compute((), lambda: A[0], name="B")
return [A, B]
@auto_scheduler.register_workload
def conv2d_winograd_nhwc_auto_scheduler_test(
N, H, W, CI, CO, kernel_size=3, stride=1, padding=0, dilation=1
):
tile_size = 4
inputs = te.placeholder((N, H, W, CI), name="inputs")
N, H, W, CI = get_const_tuple(inputs.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
KH = KW = kernel_size
HPAD, WPAD, _, _ = topi.nn.get_pad_tuple(padding, (KH, KW))
HSTR, WSTR = (stride, stride) if isinstance(stride, int) else stride
assert HSTR == 1 and WSTR == 1 and KH == KW
data_pad = topi.nn.pad(inputs, (0, HPAD, WPAD, 0), (0, HPAD, WPAD, 0), name="data_pad")
r = KW
m = tile_size
alpha = m + r - 1
A, B, _ = winograd_transform_matrices(m, r, "float32")
H = (H + 2 * HPAD - KH) // HSTR + 1
W = (W + 2 * WPAD - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
kshape = (alpha, alpha, CI, CO)
kernel_pack = te.placeholder(kshape, inputs.dtype, name="weight")
idxdiv = te.indexdiv
idxmod = te.indexmod
# pack input tile
input_tile = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: data_pad[idxdiv(p, (nH * nW))][idxmod(idxdiv(p, nW), nH) * m + eps][
idxmod(p, nW) * m + nu
][ci],
name="input_tile",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
data_pack = te.compute(
(alpha, alpha, P, CI),
lambda eps, nu, p, ci: te.sum(
input_tile[r_a][r_b][p][ci] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
attrs={"auto_scheduler_simplify_const_tensor_indices": ["eps", "nu", "r_a", "r_b"]},
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
bgemm = te.compute(
(alpha, alpha, P, CO),
lambda eps, nu, p, co: te.sum(
data_pack[eps][nu][p][ci] * kernel_pack[eps][nu][ci][co], axis=[ci]
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_b")
inverse = te.compute(
(m, m, P, CO),
lambda vh, vw, p, co: te.sum(
bgemm[r_a][r_b][p][co] * A[r_a][vh] * A[r_b][vw], axis=[r_a, r_b]
),
name="inverse",
attrs={"auto_scheduler_simplify_const_tensor_indices": ["vh", "vw", "r_a", "r_b"]},
)
# output
output = te.compute(
(N, H, W, CO),
lambda n, h, w, co: inverse[
idxmod(h, m), idxmod(w, m), n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m), co
],
name="conv2d_winograd",
)
return [inputs, kernel_pack, output]
def get_tiled_matmul():
"""Get a compute dag and a state for tiled matmul"""
A, B, C = matmul_auto_scheduler_test(512, 512, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
its0 = s0.split(C, s0[C].iters[0], [4, 8, 8])
its1 = s0.split(C, s0[C].iters[4], [8, 4, 4])
s0.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], its1[3], s0[C].iters[8]]
)
return dag, s0
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/autotvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-function-docstring, missing-class-docstring
"""Common utilities for testing autotvm"""
import time
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm import MeasureInput, MeasureResult
from tvm.autotvm.measure.measure import Runner
class DummyRunner(Runner):
def __init__(self):
super(DummyRunner, self).__init__(1, 1)
def run(self, measure_inputs, build_results):
return [
MeasureResult((np.random.random(),), 0, 0.2, time.time())
for _ in range(len(measure_inputs))
]
def get_build_kwargs(self):
return {}
@autotvm.template("testing/matmul")
def matmul(N, L, M, dtype):
A = te.placeholder((N, L), name="A", dtype=dtype)
B = te.placeholder((L, M), name="B", dtype=dtype)
k = te.reduce_axis((0, L), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C")
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
# Make sure configurations have a varied number of itervars. Splitting adds
# new itervars, so conditionally splitting with cause the number of
# itervars to depend on the tile size.
if cfg["tile_x"].size[-1] > 1:
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
else:
s[C].reorder(yo, k, yi, x)
return s, [A, B, C]
@autotvm.template("testing/bad_matmul")
def bad_matmul(N, L, M, dtype):
if "bad_device" in tvm.target.Target.current().keys:
A = te.placeholder((N, L), name="A", dtype=dtype)
B = te.placeholder((L, M), name="B", dtype=dtype)
k = te.reduce_axis((0, L - 1), name="k")
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C")
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
return s, [A, B, C]
return matmul(N, L, M, dtype)
def get_sample_task(n=128):
"""return a sample task for testing"""
target = tvm.target.Target("llvm")
task = autotvm.task.create("testing/matmul", args=(n, n, n, "float32"), target=target)
return task, target
def get_sample_records(n):
"""get sample records for testing"""
tsk, target = get_sample_task()
inps, ress = [], []
for i in range(n):
inps.append(MeasureInput(target, tsk, tsk.config_space.get(i % len(tsk.config_space))))
ress.append(MeasureResult((i + 1,), 0, i, time.time()))
return list(zip(inps, ress))
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/plugin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pytest plugin for using tvm testing extensions.
TVM provides utilities for testing across all supported targets, and
to more easily parametrize across many inputs. For more information
on usage of these features, see documentation in the tvm.testing
module.
These are enabled by default in all pytests provided by tvm, but may
be useful externally for one-off testing. To enable, add the
following line to the test script, or to the conftest.py in the same
directory as the test scripts.
pytest_plugins = ['tvm.testing.plugin']
"""
import pytest
import _pytest
import tvm
from tvm.testing import utils
try:
from xdist.scheduler.loadscope import LoadScopeScheduling
HAVE_XDIST = True
except ImportError:
HAVE_XDIST = False
MARKERS = {
"gpu": "mark a test as requiring a gpu",
"tensorcore": "mark a test as requiring a tensorcore",
"cuda": "mark a test as requiring cuda",
"opencl": "mark a test as requiring opencl",
"rocm": "mark a test as requiring rocm",
"vulkan": "mark a test as requiring vulkan",
"metal": "mark a test as requiring metal",
"llvm": "mark a test as requiring llvm",
"ethosn": "mark a test as requiring ethosn",
"hexagon": "mark a test as requiring hexagon",
"corstone300": "mark a test as requiring Corstone300 FVP",
}
def pytest_configure(config):
"""Runs at pytest configure time, defines marks to be used later."""
for feature in utils.Feature._all_features.values():
feature._register_marker(config)
print("enabled targets:", "; ".join(map(lambda x: x[0], utils.enabled_targets())))
print("pytest marker:", config.option.markexpr)
def pytest_addoption(parser):
"""Add pytest options."""
parser.addoption("--gtest_args", action="store", default="")
def pytest_generate_tests(metafunc):
"""Called once per unit test, modifies/parametrizes it as needed."""
_parametrize_correlated_parameters(metafunc)
_auto_parametrize_target(metafunc)
_add_target_specific_marks(metafunc)
# Process gtest arguments
option_value = metafunc.config.option.gtest_args
if "gtest_args" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("gtest_args", [option_value])
def pytest_collection_modifyitems(config, items):
"""Called after all tests are chosen, currently used for bookkeeping."""
# pylint: disable=unused-argument
_count_num_fixture_uses(items)
_remove_global_fixture_definitions(items)
_sort_tests(items)
@pytest.fixture
def dev(target):
"""Give access to the device to tests that need it."""
return tvm.device(target)
def pytest_sessionfinish(session, exitstatus):
# Don't exit with an error if we select a subset of tests that doesn't
# include anything
if session.config.option.markexpr != "":
if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = pytest.ExitCode.OK
def _auto_parametrize_target(metafunc):
"""Automatically applies parametrize_targets
Used if a test function uses the "target" fixture, but isn't
already marked with @tvm.testing.parametrize_targets. Intended
for use in the pytest_generate_tests() handler of a conftest.py
file.
"""
if "target" in metafunc.fixturenames:
# Check if any explicit parametrizations exist, and apply one
# if they do not. If the function is marked with either
# excluded or known failing targets, use these to determine
# the targets to be used.
parametrized_args = [
arg.strip()
for mark in metafunc.definition.iter_markers("parametrize")
for arg in mark.args[0].split(",")
]
if "target" not in parametrized_args:
excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", [])
# Add a parametrize marker instead of calling
# metafunc.parametrize so that the parametrize rewriting
# can still occur.
mark = pytest.mark.parametrize(
"target",
[
t["target"]
for t in utils._get_targets()
if t["target_kind"] not in excluded_targets
],
scope="session",
)
metafunc.definition.add_marker(mark)
def _add_target_specific_marks(metafunc):
"""Add any target-specific marks to parametrizations over target"""
def update_parametrize_target_arg(
mark,
argnames,
argvalues,
*args,
**kwargs,
):
args = [arg.strip() for arg in argnames.split(",") if arg.strip()]
if "target" in args:
target_i = args.index("target")
new_argvalues = []
for argvalue in argvalues:
if isinstance(argvalue, _pytest.mark.structures.ParameterSet):
# The parametrized value is already a
# pytest.param, so track any marks already
# defined.
param_set = argvalue.values
target = param_set[target_i]
additional_marks = argvalue.marks
elif len(args) == 1:
# Single value parametrization, argvalue is a list of values.
target = argvalue
param_set = (target,)
additional_marks = []
else:
# Multiple correlated parameters, argvalue is a list of tuple of values.
param_set = argvalue
target = param_set[target_i]
additional_marks = []
if mark in metafunc.definition.own_markers:
xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", [])
target_kind = target.split()[0] if isinstance(target, str) else target.kind.name
if target_kind in xfail_targets:
additional_marks.append(
pytest.mark.xfail(
reason=f'Known failing test for target "{target_kind}"'
)
)
new_argvalues.append(
pytest.param(
*param_set, marks=_target_to_requirement(target) + additional_marks
)
)
try:
argvalues[:] = new_argvalues
except TypeError as err:
pyfunc = metafunc.definition.function
filename = pyfunc.__code__.co_filename
line_number = pyfunc.__code__.co_firstlineno
msg = (
f"Unit test {metafunc.function.__name__} ({filename}:{line_number}) "
"is parametrized using a tuple of parameters instead of a list "
"of parameters."
)
raise TypeError(msg) from err
if "target" in metafunc.fixturenames:
# Update any explicit use of @pytest.mark.parmaetrize to
# parametrize over targets. This adds the appropriate
# @tvm.testing.requires_* markers for each target.
for mark in metafunc.definition.iter_markers("parametrize"):
update_parametrize_target_arg(mark, *mark.args, **mark.kwargs)
def _count_num_fixture_uses(items):
# Helper function, counts the number of tests that use each cached
# fixture. Should be called from pytest_collection_modifyitems().
for item in items:
is_skipped = item.get_closest_marker("skip") or any(
mark.args[0] for mark in item.iter_markers("skipif")
)
if is_skipped:
continue
for fixturedefs in item._fixtureinfo.name2fixturedefs.values():
# Only increment the active fixturedef, in a name has been overridden.
fixturedef = fixturedefs[-1]
if hasattr(fixturedef.func, "num_tests_use_this_fixture"):
fixturedef.func.num_tests_use_this_fixture[0] += 1
def _remove_global_fixture_definitions(items):
# Helper function, removes fixture definitions from the global
# variables of the modules they were defined in. This is intended
# to improve readability of error messages by giving a NameError
# if a test function accesses a pytest fixture but doesn't include
# it as an argument. Should be called from
# pytest_collection_modifyitems().
modules = set(item.module for item in items)
for module in modules:
for name in dir(module):
obj = getattr(module, name)
if hasattr(obj, "_pytestfixturefunction") and isinstance(
obj._pytestfixturefunction, _pytest.fixtures.FixtureFunctionMarker
):
delattr(module, name)
def _sort_tests(items):
"""Sort tests by file/function.
By default, pytest will sort tests to maximize the re-use of
fixtures. However, this assumes that all fixtures have an equal
cost to generate, and no caches outside of those managed by
pytest. A tvm.testing.parameter is effectively free, while
reference data for testing may be quite large. Since most of the
TVM fixtures are specific to a python function, sort the test
ordering by python function, so that
tvm.testing.utils._fixture_cache can be cleared sooner rather than
later.
Should be called from pytest_collection_modifyitems.
"""
def sort_key(item):
filename, lineno, test_name = item.location
test_name = test_name.split("[")[0]
return filename, lineno, test_name
items.sort(key=sort_key)
def _target_to_requirement(target):
if isinstance(target, str):
target = tvm.target.Target(target)
# mapping from target to decorator
if target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", []):
return utils.requires_cudnn.marks()
if target.kind.name == "cuda" and "cublas" in target.attrs.get("libs", []):
return utils.requires_cublas.marks()
if target.kind.name == "cuda":
return utils.requires_cuda.marks()
if target.kind.name == "rocm":
return utils.requires_rocm.marks()
if target.kind.name == "vulkan":
return utils.requires_vulkan.marks()
if target.kind.name == "nvptx":
return utils.requires_nvptx.marks()
if target.kind.name == "metal":
return utils.requires_metal.marks()
if target.kind.name == "opencl":
return utils.requires_opencl.marks()
if target.kind.name == "llvm":
return utils.requires_llvm.marks()
if target.kind.name == "hexagon":
return utils.requires_hexagon.marks()
return []
def _parametrize_correlated_parameters(metafunc):
parametrize_needed = {}
for name, fixturedefs in metafunc.definition._fixtureinfo.name2fixturedefs.items():
fixturedef = fixturedefs[-1]
if hasattr(fixturedef.func, "parametrize_group") and hasattr(
fixturedef.func, "parametrize_values"
):
group = fixturedef.func.parametrize_group
values = fixturedef.func.parametrize_values
ids = fixturedef.func.parametrize_ids
if group in parametrize_needed:
assert ids == parametrize_needed[group]["ids"]
else:
parametrize_needed[group] = {"ids": ids, "params": []}
parametrize_needed[group]["params"].append((name, values))
for parametrize_group in parametrize_needed.values():
params = parametrize_group["params"]
ids = parametrize_group["ids"]
if len(params) == 1:
name, values = params[0]
metafunc.parametrize(name, values, indirect=True, ids=ids)
else:
names = ",".join(name for name, values in params)
value_sets = zip(*[values for name, values in params])
metafunc.parametrize(names, value_sets, indirect=True, ids=ids)
# pytest-xdist isn't required but is used in CI, so guard on its presence
if HAVE_XDIST:
def pytest_xdist_make_scheduler(config, log):
"""
Serialize certain tests for pytest-xdist that have inter-test
dependencies
"""
class TvmTestScheduler(LoadScopeScheduling):
"""
Scheduler to serializer tests
"""
def _split_scope(self, nodeid):
"""
Returns a specific string for classes of nodeids
"""
# NOTE: these tests contain inter-test dependencies and must be
# serialized
items = {
"test_tvm_testing_features": "functional-tests",
"tests/python/unittest/test_crt": "crt-tests",
"tests/python/driver/tvmc": "tvmc-tests",
}
for nodeid_pattern, suite_name in items.items():
if nodeid_pattern in nodeid:
return suite_name
return nodeid
return TvmTestScheduler(config, log)
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/popen_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-function-docstring
"""Common functions for popen_pool test cases"""
import tvm
from . import _ffi_api
TEST_GLOBAL_STATE_1 = 0
TEST_GLOBAL_STATE_2 = 0
TEST_GLOBAL_STATE_3 = 0
def initializer(test_global_state_1, test_global_state_2, test_global_state_3):
global TEST_GLOBAL_STATE_1, TEST_GLOBAL_STATE_2, TEST_GLOBAL_STATE_3
TEST_GLOBAL_STATE_1 = test_global_state_1
TEST_GLOBAL_STATE_2 = test_global_state_2
TEST_GLOBAL_STATE_3 = test_global_state_3
def after_initializer():
global TEST_GLOBAL_STATE_1, TEST_GLOBAL_STATE_2, TEST_GLOBAL_STATE_3
return TEST_GLOBAL_STATE_1, TEST_GLOBAL_STATE_2, TEST_GLOBAL_STATE_3
@tvm._ffi.register_func("testing.identity_py")
def identity_py(arg):
return arg
def register_ffi():
@tvm._ffi.register_func("testing.nested_identity_py")
def _identity_py(arg): # pylint: disable=unused-variable
return arg
def call_py_ffi(arg):
_identity_py = tvm._ffi.get_global_func("testing.nested_identity_py")
return _identity_py(arg)
def call_cpp_ffi(arg):
return tvm.testing.echo(arg)
def call_cpp_py_ffi(arg):
return tvm.testing.identity_cpp(arg)
def fast_summation(n):
return n * (n + 1) // 2
def slow_summation(n):
r = 0
for i in range(0, n + 1):
r += i
return r
def timeout_job(n):
_ffi_api.sleep_in_ffi(n * 1.5)
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel, unused-variable
"""Common utility functions in TVM tir"""
def mma_schedule(
workload,
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
shared_scope="shared",
):
"""Create a tensorized schedule for GEMM with MMA intrinsics."""
import tvm # pylint: disable=import-outside-toplevel
ir_module = tvm.IRModule({"main": workload})
sch = tvm.tir.Schedule(ir_module)
block = sch.get_block("C")
i, j, k = sch.get_loops(block)
i, i_tc = sch.split(i, factors=[None, 16])
j, j_tc = sch.split(j, factors=[None, 16])
k, k_tc = sch.split(k, factors=[None, k_inner])
sch.reorder(i, j, k, i_tc, j_tc, k_tc)
block_inner = sch.blockize(i_tc)
block_outer, block_inner = block_inner, block
num_ty = i_factors[2] * j_factors[2]
i0, i1, i2, i3, i4 = sch.split(i, factors=i_factors)
j0, j1, j2, j3, j4 = sch.split(j, factors=j_factors)
k0, k1, k2 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3, k2, i4, j4)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0)
vector_size = 16 if in_dtype == "int8" else 8
warp_size = 32
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vector_size])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
offset = 8 if in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=32, offset=offset)
return block_read
fetch_to_shared(block_outer, 0, 2)
fetch_to_shared(block_outer, 1, 2)
A_warp = sch.cache_read(block_outer, 0, "warp")
B_warp = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_warp, k1)
sch.compute_at(B_warp, k1)
C_warp = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(C_warp, thread_idy)
ii, jj = sch.get_loops(C_warp)[-2:]
io, ii = sch.split(ii, factors=[None, 16])
jo, ji = sch.split(jj, factors=[None, 16])
sch.reorder(io, jo, ii, ji)
sch.decompose_reduction(block_outer, sch.get_loops(block_outer)[3])
block_init_c = sch.get_block("C_init")
def tile_wmma_fragment(block_read, height, width):
i, j = sch.get_loops(block_read)[-2:]
i0, i1 = sch.split(i, factors=[None, height])
j0, j1 = sch.split(j, factors=[None, width])
sch.reorder(i0, j0, i1, j1)
return i1
loop_a = tile_wmma_fragment(A_warp, 16, k_inner)
if b_transposed:
loop_b = tile_wmma_fragment(B_warp, 16, k_inner)
else:
loop_b = tile_wmma_fragment(B_warp, k_inner, 16)
sch.transform_layout(A_warp, ("write", 0), index_map_A)
sch.transform_layout(B_warp, ("write", 0), index_map_B)
sch.transform_layout(C_warp, ("read", 0), index_map_C)
sch.tensorize(loop_a, ldmatrix_a_intrin)
sch.tensorize(loop_b, ldmatrix_b_intrin)
sch.tensorize(sch.get_loops(block_inner)[-3], mma_intrin)
sch.tensorize(sch.get_loops(block_init_c)[-2], mma_fill_intrin)
sch.tensorize(sch.get_loops(C_warp)[-2], mma_store_intrin)
return sch
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/usmp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" This file contains USMP tests harnesses."""
import tvm
def is_tvm_backendallocworkspace_calls(mod: tvm.runtime.module) -> bool:
"""TVMBackendAllocWorkspace call check.
This checker checks whether any c-source produced has TVMBackendAllocWorkspace calls.
If USMP is invoked, none of them should have TVMBAW calls
"""
dso_modules = mod._collect_dso_modules()
for dso_mod in dso_modules:
if dso_mod.type_key not in ["c", "llvm"]:
assert (
False
), 'Current AoT codegen flow should only produce type "c" or "llvm" runtime modules'
source = dso_mod.get_source()
if source.count("TVMBackendAllocWorkspace") != 0:
return True
return False
| https://github.com/zk-ml/tachikoma |
python/tvm/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-comprehension
"""TVM testing utilities
Organization
************
This file contains functions expected to be called directly by a user
while writing unit tests. Integrations with the pytest framework
are in plugin.py.
Testing Markers
***************
We use pytest markers to specify the requirements of test functions. Currently
there is a single distinction that matters for our testing environment: does
the test require a gpu. For tests that require just a gpu or just a cpu, we
have the decorator :py:func:`requires_gpu` that enables the test when a gpu is
available. To avoid running tests that don't require a gpu on gpu nodes, this
decorator also sets the pytest marker `gpu` so we can use select the gpu subset
of tests (using `pytest -m gpu`).
Unfortunately, many tests are written like this:
.. python::
def test_something():
for target in all_targets():
do_something()
The test uses both gpu and cpu targets, so the test needs to be run on both cpu
and gpu nodes. But we still want to only run the cpu targets on the cpu testing
node. The solution is to mark these tests with the gpu marker so they will be
run on the gpu nodes. But we also modify all_targets (renamed to
enabled_targets) so that it only returns gpu targets on gpu nodes and cpu
targets on cpu nodes (using an environment variable).
Instead of using the all_targets function, future tests that would like to
test against a variety of targets should use the
:py:func:`tvm.testing.parametrize_targets` functionality. This allows us
greater control over which targets are run on which testing nodes.
If in the future we want to add a new type of testing node (for example
fpgas), we need to add a new marker in `tests/python/pytest.ini` and a new
function in this module. Then targets using this node should be added to the
`TVM_TEST_TARGETS` environment variable in the CI.
"""
import inspect
import copy
import copyreg
import ctypes
import functools
import hashlib
import itertools
import logging
import os
import pickle
import platform
import sys
import textwrap
import time
import shutil
import subprocess
from pathlib import Path
from typing import Optional, Callable, Union, List, Tuple
import pytest
import numpy as np
import tvm
import tvm.arith
import tvm.tir
import tvm.te
import tvm._ffi
from tvm.contrib import nvcc, cudnn
import tvm.contrib.hexagon._ci_env_check as hexagon
from tvm.driver.tvmc.frontends import load_model
from tvm.error import TVMError
SKIP_SLOW_TESTS = os.getenv("SKIP_SLOW_TESTS", "").lower() in {"true", "1", "yes"}
IS_IN_CI = os.getenv("CI", "") == "true"
skip_if_wheel_test = pytest.mark.skipif(
os.getenv("WHEEL_TEST", "").lower() in {"true", "1", "yes"},
reason="Test not supported in wheel.",
)
def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
"""Version of np.testing.assert_allclose with `atol` and `rtol` fields set
in reasonable defaults.
Arguments `actual` and `desired` are not interchangeable, since the function
compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we
often allow `desired` to be close to zero, we generally want non-zero `atol`.
"""
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
np.testing.assert_allclose(actual.shape, desired.shape)
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)
def check_numerical_grads(
function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1
):
"""A helper function that checks that numerical gradients of a function are
equal to gradients computed in some different way (analytical gradients).
Numerical gradients are computed using finite difference approximation. To
reduce the number of function evaluations, the number of points used is
gradually increased if the error value is too high (up to 5 points).
Parameters
----------
function
A function that takes inputs either as positional or as keyword
arguments (either `function(*input_values)` or `function(**input_values)`
should be correct) and returns a scalar result. Should accept numpy
ndarrays.
input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
A list of values or a dict assigning values to variables. Represents the
point at which gradients should be computed.
grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]
Gradients computed using a different method.
function_value : float, optional
Should be equal to `function(**input_values)`.
delta : float, optional
A small number used for numerical computation of partial derivatives.
The default 1e-3 is a good choice for float32.
atol : float, optional
Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a
gradient.
rtol : float, optional
Relative tolerance.
"""
# If input_values is a list then function accepts positional arguments
# In this case transform it to a function taking kwargs of the form {"0": ..., "1": ...}
if not isinstance(input_values, dict):
input_len = len(input_values)
input_values = {str(idx): val for idx, val in enumerate(input_values)}
def _function(_input_len=input_len, _orig_function=function, **kwargs):
return _orig_function(*(kwargs[str(i)] for i in range(input_len)))
function = _function
grad_values = {str(idx): val for idx, val in enumerate(grad_values)}
if function_value is None:
function_value = function(**input_values)
# a helper to modify j-th element of val by a_delta
def modify(val, j, a_delta):
val = val.copy()
val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta
return val
# numerically compute a partial derivative with respect to j-th element of the var `name`
def derivative(x_name, j, a_delta):
modified_values = {
n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()
}
return (function(**modified_values) - function_value) / a_delta
def compare_derivative(j, n_der, grad):
der = grad.reshape(-1)[j]
return np.abs(n_der - der) < atol + rtol * np.abs(n_der)
for x_name, grad in grad_values.items():
if grad.shape != input_values[x_name].shape:
raise AssertionError(
"Gradient wrt '{}' has unexpected shape {}, expected {} ".format(
x_name, grad.shape, input_values[x_name].shape
)
)
ngrad = np.zeros_like(grad)
wrong_positions = []
# compute partial derivatives for each position in this variable
for j in range(np.prod(grad.shape)):
# forward difference approximation
nder = derivative(x_name, j, delta)
# if the derivative is not equal to the analytical one, try to use more
# precise and expensive methods
if not compare_derivative(j, nder, grad):
# central difference approximation
nder = (derivative(x_name, j, -delta) + nder) / 2
if not compare_derivative(j, nder, grad):
# central difference approximation using h = delta/2
cnder2 = (
derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)
) / 2
# five-point derivative
nder = (4 * cnder2 - nder) / 3
# if the derivatives still don't match, add this position to the
# list of wrong positions
if not compare_derivative(j, nder, grad):
wrong_positions.append(np.unravel_index(j, grad.shape))
ngrad.reshape(-1)[j] = nder
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))
dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad**2))
if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
"NaN or infinity detected during numerical gradient checking wrt '{}'\n"
"analytical grad = {}\n numerical grad = {}\n".format(x_name, grad, ngrad)
)
# we multiply atol by this number to make it more universal for different sizes
sqrt_n = np.sqrt(float(np.prod(grad.shape)))
if dist > atol * sqrt_n + rtol * grad_norm:
raise AssertionError(
"Analytical and numerical grads wrt '{}' differ too much\n"
"analytical grad = {}\n numerical grad = {}\n"
"{}% of elements differ, first 10 of wrong positions: {}\n"
"distance > atol*sqrt(n) + rtol*grad_norm\n"
"distance {} > {}*{} + {}*{}".format(
x_name,
grad,
ngrad,
wrong_percentage,
wrong_positions[:10],
dist,
atol,
sqrt_n,
rtol,
grad_norm,
)
)
max_diff = np.max(np.abs(ngrad - grad))
avg_diff = np.mean(np.abs(ngrad - grad))
logging.info(
"Numerical grad test wrt '%s' of shape %s passes, "
"dist = %f, max_diff = %f, avg_diff = %f",
x_name,
grad.shape,
dist,
max_diff,
avg_diff,
)
def assert_prim_expr_equal(lhs, rhs):
"""Assert lhs and rhs equals to each iother.
Parameters
----------
lhs : tvm.tir.PrimExpr
The left operand.
rhs : tvm.tir.PrimExpr
The left operand.
"""
ana = tvm.arith.Analyzer()
if not ana.can_prove_equal(lhs, rhs):
raise ValueError("{} and {} are not equal".format(lhs, rhs))
def check_bool_expr_is_true(bool_expr, vranges, cond=None):
"""Check that bool_expr holds given the condition cond
for every value of free variables from vranges.
for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)
here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y
We creates iterations to check,
for x in range(10):
for y in range(10):
assert !(2x > 4y) || (x > 2y)
Parameters
----------
bool_expr : tvm.ir.PrimExpr
Boolean expression to check
vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]
Free variables and their ranges
cond: tvm.ir.PrimExpr
extra conditions needs to be satisfied.
"""
if cond is not None:
bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)
def _run_expr(expr, vranges):
"""Evaluate expr for every value of free variables
given by vranges and return the tensor of results.
"""
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tvm.tir.stmt_functor.substitute(expr, vmap)
A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = tvm.te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].numpy()
res = _run_expr(bool_expr, vranges)
if not np.all(res):
indices = list(np.argwhere(res == 0)[0])
counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]
counterex = sorted(counterex, key=lambda x: x[0])
counterex = ", ".join([v + " = " + str(i) for v, i in counterex])
ana = tvm.arith.Analyzer()
raise AssertionError(
"Expression {}\nis not true on {}\n"
"Counterexample: {}".format(ana.simplify(bool_expr), vranges, counterex)
)
def check_int_constraints_trans_consistency(constraints_trans, vranges=None):
"""Check IntConstraintsTransform is a bijective transformation.
Parameters
----------
constraints_trans : arith.IntConstraintsTransform
Integer constraints transformation
vranges: Dict[tvm.tir.Var, tvm.ir.Range]
Free variables and their ranges
"""
if vranges is None:
vranges = {}
def _check_forward(constraints1, constraints2, varmap, backvarmap):
ana = tvm.arith.Analyzer()
all_vranges = vranges.copy()
all_vranges.update({v: r for v, r in constraints1.ranges.items()})
# Check that the transformation is injective
cond_on_vars = tvm.tir.const(1, "bool")
for v in constraints1.variables:
if v in varmap:
# variable mapping is consistent
v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))
cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)
# Also we have to check that the new relations are true when old relations are true
cond_subst = tvm.tir.stmt_functor.substitute(
tvm.te.all(tvm.tir.const(1, "bool"), *constraints2.relations), backvarmap
)
# We have to include relations from vranges too
for v in constraints2.variables:
if v in constraints2.ranges:
r = constraints2.ranges[v]
range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)
range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)
cond_subst = tvm.te.all(cond_subst, range_cond)
cond_subst = ana.simplify(cond_subst)
check_bool_expr_is_true(
tvm.te.all(cond_subst, cond_on_vars),
all_vranges,
cond=tvm.te.all(tvm.tir.const(1, "bool"), *constraints1.relations),
)
_check_forward(
constraints_trans.src,
constraints_trans.dst,
constraints_trans.src_to_dst,
constraints_trans.dst_to_src,
)
_check_forward(
constraints_trans.dst,
constraints_trans.src,
constraints_trans.dst_to_src,
constraints_trans.src_to_dst,
)
def _get_targets(target_names=None):
if target_names is None:
target_names = _tvm_test_targets()
if not target_names:
target_names = DEFAULT_TEST_TARGETS
targets = []
for target in target_names:
target_kind = target.split()[0]
if target_kind == "cuda" and "cudnn" in tvm.target.Target(target).attrs.get("libs", []):
is_enabled = tvm.support.libinfo()["USE_CUDNN"].lower() in ["on", "true", "1"]
is_runnable = is_enabled and cudnn.exists()
elif target_kind == "hexagon":
is_enabled = tvm.support.libinfo()["USE_HEXAGON"].lower() in ["on", "true", "1"]
# If Hexagon has compile-time support, we can always fall back
is_runnable = is_enabled and "ANDROID_SERIAL_NUMBER" in os.environ
else:
is_enabled = tvm.runtime.enabled(target_kind)
is_runnable = is_enabled and tvm.device(target_kind).exist
targets.append(
{
"target": target,
"target_kind": target_kind,
"is_enabled": is_enabled,
"is_runnable": is_runnable,
}
)
if all(not t["is_runnable"] for t in targets):
if tvm.runtime.enabled("llvm"):
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_names,
)
return _get_targets(["llvm"])
raise TVMError(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target."
" Cannot default to llvm, as it is not enabled." % target_names
)
return targets
DEFAULT_TEST_TARGETS = [
"llvm",
"cuda",
"nvptx",
"vulkan -from_device=0",
"opencl",
"opencl -device=mali,aocl_sw_emu",
"opencl -device=intel_graphics",
"metal",
"rocm",
"hexagon",
]
def device_enabled(target):
"""Check if a target should be used when testing.
It is recommended that you use :py:func:`tvm.testing.parametrize_targets`
instead of manually checking if a target is enabled.
This allows the user to control which devices they are testing against. In
tests, this should be used to check if a device should be used when said
device is an optional part of the test.
Parameters
----------
target : str
Target string to check against
Returns
-------
bool
Whether or not the device associated with this target is enabled.
Example
-------
>>> @tvm.testing.uses_gpu
>>> def test_mytest():
>>> for target in ["cuda", "llvm"]:
>>> if device_enabled(target):
>>> test_body...
Here, `test_body` will only be reached by with `target="cuda"` on gpu test
nodes and `target="llvm"` on cpu test nodes.
"""
assert isinstance(target, str), "device_enabled requires a target as a string"
# only check if device name is found, sometime there are extra flags
target_kind = target.split(" ")[0]
return any(target_kind == t["target_kind"] for t in _get_targets() if t["is_runnable"])
def enabled_targets():
"""Get all enabled targets with associated devices.
In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of
this function.
In this context, enabled means that TVM was built with support for
this target, the target name appears in the TVM_TEST_TARGETS
environment variable, and a suitable device for running this
target exists. If TVM_TEST_TARGETS is not set, it defaults to
variable DEFAULT_TEST_TARGETS in this module.
If you use this function in a test, you **must** decorate the test with
:py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).
Returns
-------
targets: list
A list of pairs of all enabled devices and the associated context
"""
return [(t["target"], tvm.device(t["target"])) for t in _get_targets() if t["is_runnable"]]
class Feature:
"""A feature that may be required to run a test.
Parameters
----------
name: str
The short name of the feature. Should match the name in the
requires_* decorator. This is applied as a mark to all tests
using this feature, and can be used in pytests ``-m``
argument.
long_name: Optional[str]
The long name of the feature, to be used in error messages.
If None, defaults to the short name.
cmake_flag: Optional[str]
The flag that must be enabled in the config.cmake in order to
use this feature.
If None, no flag is required to use this feature.
target_kind_enabled: Optional[str]
The target kind that must be enabled to run tests using this
feature. If present, the target_kind must appear in the
TVM_TEST_TARGETS environment variable, or in
tvm.testing.DEFAULT_TEST_TARGETS if TVM_TEST_TARGETS is
undefined.
If None, this feature does not require a specific target to be
enabled.
compile_time_check: Optional[Callable[[], Union[bool,str]]]
A check that returns True if the feature can be used at
compile-time. (e.g. Validating the version number of the nvcc
compiler.) If the feature does not have support to perform
compile-time tests, the check should returns False to display
a generic error message, or a string to display a more
specific error message.
If None, no additional check is performed.
target_kind_hardware: Optional[str]
The target kind that must have available hardware in order to
run tests using this feature. This is checked using
tvm.device(target_kind_hardware).exist. If a feature requires
a different check, this should be implemented using
run_time_check.
If None, this feature does not require a specific
tvm.device to exist.
run_time_check: Optional[Callable[[], Union[bool,str]]]
A check that returns True if the feature can be used at
run-time. (e.g. Validating the compute version supported by a
GPU.) If the feature does not have support to perform
run-time tests, the check should returns False to display a
generic error message, or a string to display a more specific
error message.
If None, no additional check is performed.
parent_features: Optional[Union[str,List[str]]]
The short name of a feature or features that are required in
order to use this feature. (e.g. Using cuDNN requires using
CUDA) This feature should inherit all checks of the parent
feature, with the exception of the `target_kind_enabled`
checks.
If None, this feature does not require any other parent
features.
"""
_all_features = {}
def __init__(
self,
name: str,
long_name: Optional[str] = None,
cmake_flag: Optional[str] = None,
target_kind_enabled: Optional[str] = None,
compile_time_check: Optional[Callable[[], Union[bool, str]]] = None,
target_kind_hardware: Optional[str] = None,
run_time_check: Optional[Callable[[], Union[bool, str]]] = None,
parent_features: Optional[Union[str, List[str]]] = None,
):
self.name = name
self.long_name = long_name or name
self.cmake_flag = cmake_flag
self.target_kind_enabled = target_kind_enabled
self.compile_time_check = compile_time_check
self.target_kind_hardware = target_kind_hardware
self.run_time_check = run_time_check
if parent_features is None:
self.parent_features = []
elif isinstance(parent_features, str):
self.parent_features = [parent_features]
else:
self.parent_features = parent_features
self._all_features[self.name] = self
def _register_marker(self, config):
config.addinivalue_line("markers", f"{self.name}: Mark a test as using {self.long_name}")
def _uses_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._uses_marks()
yield getattr(pytest.mark, self.name)
def _compile_only_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._compile_only_marks()
if self.compile_time_check is not None:
res = self.compile_time_check()
if isinstance(res, str):
yield pytest.mark.skipif(True, reason=res)
else:
yield pytest.mark.skipif(
not res, reason=f"Compile-time support for {self.long_name} not present"
)
if self.target_kind_enabled is not None:
target_kind = self.target_kind_enabled.split()[0]
yield pytest.mark.skipif(
all(enabled.split()[0] != target_kind for enabled in _tvm_test_targets()),
reason=(
f"{self.target_kind_enabled} tests disabled "
f"by TVM_TEST_TARGETS environment variable"
),
)
if self.cmake_flag is not None:
yield pytest.mark.skipif(
not _cmake_flag_enabled(self.cmake_flag),
reason=(
f"{self.long_name} support not enabled. "
f"Set {self.cmake_flag} in config.cmake to enable."
),
)
def _run_only_marks(self):
for parent in self.parent_features:
yield from self._all_features[parent]._run_only_marks()
if self.run_time_check is not None:
res = self.run_time_check()
if isinstance(res, str):
yield pytest.mark.skipif(True, reason=res)
else:
yield pytest.mark.skipif(
not res, reason=f"Run-time support for {self.long_name} not present"
)
if self.target_kind_hardware is not None:
yield pytest.mark.skipif(
not tvm.device(self.target_kind_hardware).exist,
reason=f"No device exists for target {self.target_kind_hardware}",
)
def marks(self, support_required="compile-and-run"):
"""Return a list of marks to be used
Parameters
----------
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
See Feature.__call__ for details.
"""
if support_required not in ["compile-and-run", "compile-only", "optional"]:
raise ValueError(f"Unknown feature support type: {support_required}")
if support_required == "compile-and-run":
marks = itertools.chain(
self._run_only_marks(), self._compile_only_marks(), self._uses_marks()
)
elif support_required == "compile-only":
marks = itertools.chain(self._compile_only_marks(), self._uses_marks())
elif support_required == "optional":
marks = self._uses_marks()
else:
raise ValueError(f"Unknown feature support type: {support_required}")
return list(marks)
def __call__(self, func=None, *, support_required="compile-and-run"):
"""Mark a pytest function as requiring this feature
Can be used either as a bare decorator, or as a decorator with
arguments.
Parameters
----------
func: Callable
The pytest test function to be marked
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
If "compile-and-run", the test case is marked as using the
feature, and is skipped if the environment lacks either
compile-time or run-time support for the feature.
If "compile-only", the test case is marked as using the
feature, and is skipped if the environment lacks
compile-time support.
If "optional", the test case is marked as using the
feature, but isn't skipped. This is kept for backwards
compatibility for tests that use `enabled_targets()`, and
should be avoided in new test code. Instead, prefer
parametrizing over the target using the `target` fixture.
Examples
--------
.. code-block:: python
@feature
def test_compile_and_run():
...
@feature(compile_only=True)
def test_compile_only():
...
"""
if support_required not in ["compile-and-run", "compile-only", "optional"]:
raise ValueError(f"Unknown feature support type: {support_required}")
def wrapper(func):
for mark in self.marks(support_required=support_required):
func = mark(func)
return func
if func is None:
return wrapper
return wrapper(func)
@classmethod
def require(cls, name, support_required="compile-and-run"):
"""Returns a decorator that marks a test as requiring a feature
Parameters
----------
name: str
The name of the feature that is used by the test
support_required: str
Allowed values: "compile-and-run" (default),
"compile-only", or "optional".
See Feature.__call__ for details.
Examples
--------
.. code-block:: python
@Feature.require("cuda")
def test_compile_and_run():
...
@Feature.require("cuda", compile_only=True)
def test_compile_only():
...
"""
return cls._all_features[name](support_required=support_required)
def _any_gpu_exists():
return (
tvm.cuda().exist
or tvm.rocm().exist
or tvm.opencl().exist
or tvm.metal().exist
or tvm.vulkan().exist
)
# Mark a test as requiring llvm to run
requires_llvm = Feature(
"llvm", "LLVM", cmake_flag="USE_LLVM", target_kind_enabled="llvm", target_kind_hardware="llvm"
)
# Mark a test as requiring a GPU to run.
requires_gpu = Feature("gpu", run_time_check=_any_gpu_exists)
# Mark to differentiate tests that use the GPU in some capacity.
#
# These tests will be run on CPU-only test nodes and on test nodes with GPUs.
# To mark a test that must have a GPU present to run, use
# :py:func:`tvm.testing.requires_gpu`.
uses_gpu = requires_gpu(support_required="optional")
# Mark a test as requiring the x86 Architecture to run.
requires_x86 = Feature(
"x86", "x86 Architecture", run_time_check=lambda: platform.machine() == "x86_64"
)
# Mark a test as requiring the CUDA runtime.
requires_cuda = Feature(
"cuda",
"CUDA",
cmake_flag="USE_CUDA",
target_kind_enabled="cuda",
target_kind_hardware="cuda",
parent_features="gpu",
)
# Mark a test as requiring a tensorcore to run
requires_tensorcore = Feature(
"tensorcore",
"NVIDIA Tensor Core",
run_time_check=lambda: tvm.cuda().exist and nvcc.have_tensorcore(tvm.cuda().compute_version),
parent_features="cuda",
)
# Mark a test as requiring the cuDNN library.
requires_cudnn = Feature("cudnn", "cuDNN", cmake_flag="USE_CUDNN", parent_features="cuda")
# Mark a test as requiring the cuBLAS library.
requires_cublas = Feature("cublas", "cuBLAS", cmake_flag="USE_CUBLAS", parent_features="cuda")
# Mark a test as requiring the NVPTX compilation on the CUDA runtime
requires_nvptx = Feature(
"nvptx",
"NVPTX",
target_kind_enabled="nvptx",
target_kind_hardware="nvptx",
parent_features=["llvm", "cuda"],
)
# Mark a test as requiring the CUDA Graph Feature
requires_cudagraph = Feature(
"cudagraph",
"CUDA Graph",
target_kind_enabled="cuda",
compile_time_check=nvcc.have_cudagraph,
parent_features="cuda",
)
# Mark a test as requiring the OpenCL runtime
requires_opencl = Feature(
"opencl",
"OpenCL",
cmake_flag="USE_OPENCL",
target_kind_enabled="opencl",
target_kind_hardware="opencl" if "RPC_TARGET" not in os.environ else None,
parent_features="gpu" if "RPC_TARGET" not in os.environ else None,
)
# Mark a test as requiring the rocm runtime
requires_rocm = Feature(
"rocm",
"ROCm",
cmake_flag="USE_ROCM",
target_kind_enabled="rocm",
target_kind_hardware="rocm",
parent_features="gpu",
)
# Mark a test as requiring the metal runtime
requires_metal = Feature(
"metal",
"Metal",
cmake_flag="USE_METAL",
target_kind_enabled="metal",
target_kind_hardware="metal",
parent_features="gpu",
)
# Mark a test as requiring the vulkan runtime
requires_vulkan = Feature(
"vulkan",
"Vulkan",
cmake_flag="USE_VULKAN",
target_kind_enabled="vulkan",
target_kind_hardware="vulkan",
parent_features="gpu",
)
# Mark a test as requiring OpenCLML support in build.
requires_openclml = Feature(
"OpenCLML",
"CLML",
cmake_flag="USE_CLML",
target_kind_enabled="opencl",
)
# Mark a test as requiring microTVM to run
requires_micro = Feature("micro", "MicroTVM", cmake_flag="USE_MICRO")
# Mark a test as requiring CUTLASS to run
requires_cutlass = Feature("cutlass", "CUTLASS", cmake_flag="USE_CUTLASS")
# Mark a test as requiring rpc to run
requires_rpc = Feature("rpc", "RPC", cmake_flag="USE_RPC")
# Mark a test as requiring Arm(R) Ethos(TM)-N to run
requires_ethosn = Feature("ethosn", "Arm(R) Ethos(TM)-N", cmake_flag="USE_ETHOSN")
# Mark a test as requiring libtorch to run
requires_libtorch = Feature("libtorch", "LibTorch", cmake_flag="USE_LIBTORCH")
# Mark a test as requiring Hexagon to run
requires_hexagon = Feature(
"hexagon",
"Hexagon",
cmake_flag="USE_HEXAGON",
target_kind_enabled="hexagon",
compile_time_check=hexagon._compile_time_check,
run_time_check=hexagon._run_time_check,
parent_features="llvm",
)
# Mark a test as requiring the CMSIS NN library
requires_cmsisnn = Feature("cmsisnn", "CMSIS NN", cmake_flag="USE_CMSISNN")
def _corstone300_compile_time_check():
if shutil.which("arm-none-eabi-gcc") is None:
return "ARM embedded toolchain unavailable"
return True
# Mark a test as requiring the corstone300 FVP
requires_corstone300 = Feature(
"corstone300",
"Corstone-300",
compile_time_check=_corstone300_compile_time_check,
parent_features="cmsisnn",
)
# Mark a test as requiring Vitis AI to run
requires_vitis_ai = Feature("vitis_ai", "Vitis AI", cmake_flag="USE_VITIS_AI")
def _arm_dot_supported():
arch = platform.machine()
if arch not in ["arm64", "aarch64"]:
return False
if sys.platform.startswith("darwin"):
cpu_info = subprocess.check_output("sysctl -a", shell=True).strip().decode()
for line in cpu_info.split("\n"):
if line.startswith("hw.optional.arm.FEAT_DotProd"):
return bool(int(line.split(":", 1)[1]))
elif sys.platform.startswith("linux"):
return True
return False
def _is_intel():
# Only linux is supported for now.
if sys.platform.startswith("linux"):
with open("/proc/cpuinfo", "r") as content:
return "Intel" in content.read()
return False
def _has_vnni():
arch = platform.machine()
# Only linux is supported for now.
if arch == "x86_64" and sys.platform.startswith("linux"):
with open("/proc/cpuinfo", "r") as content:
return "avx512_vnni" in content.read()
return False
requires_arm_dot = Feature("arm_dot", "ARM dot product", run_time_check=_arm_dot_supported)
requires_cascadelake = Feature(
"cascadelake", "x86 CascadeLake", run_time_check=lambda: _has_vnni() and _is_intel()
)
def _cmake_flag_enabled(flag):
flag = tvm.support.libinfo()[flag]
# Because many of the flags can be library flags, we check if the
# flag is not disabled, rather than checking if it is enabled.
return flag.lower() not in ["off", "false", "0"]
def _tvm_test_targets():
target_str = os.environ.get("TVM_TEST_TARGETS", "").strip()
if target_str:
# Use dict instead of set for de-duplication so that the
# targets stay in the order specified.
return list({t.strip(): None for t in target_str.split(";") if t.strip()})
return DEFAULT_TEST_TARGETS
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
f = args[0]
for d in reversed(decs):
f = d(f)
return f
return decs
slow = pytest.mark.skipif(
SKIP_SLOW_TESTS,
reason="Skipping slow test since the SKIP_SLOW_TESTS environment variable is 'true'",
)
def requires_nvcc_version(major_version, minor_version=0, release_version=0):
"""Mark a test as requiring at least a specific version of nvcc.
Unit test marked with this decorator will run only if the
installed version of NVCC is at least `(major_version,
minor_version, release_version)`.
This also marks the test as requiring a cuda support.
Parameters
----------
major_version: int
The major version of the (major,minor,release) version tuple.
minor_version: int
The minor version of the (major,minor,release) version tuple.
release_version: int
The release version of the (major,minor,release) version tuple.
"""
try:
nvcc_version = nvcc.get_cuda_version()
except RuntimeError:
nvcc_version = (0, 0, 0)
min_version = (major_version, minor_version, release_version)
version_str = ".".join(str(v) for v in min_version)
requires = [
pytest.mark.skipif(nvcc_version < min_version, reason=f"Requires NVCC >= {version_str}"),
*requires_cuda.marks(),
]
def inner(func):
return _compose([func], requires)
return inner
def requires_cuda_compute_version(major_version, minor_version=0):
"""Mark a test as requiring at least a compute architecture
Unit test marked with this decorator will run only if the CUDA
compute architecture of the GPU is at least `(major_version,
minor_version)`.
This also marks the test as requiring a cuda support.
Parameters
----------
major_version: int
The major version of the (major,minor) version tuple.
minor_version: int
The minor version of the (major,minor) version tuple.
"""
min_version = (major_version, minor_version)
try:
arch = tvm.contrib.nvcc.get_target_compute_version()
compute_version = tvm.contrib.nvcc.parse_compute_version(arch)
except ValueError:
# No GPU present. This test will be skipped from the
# requires_cuda() marks as well.
compute_version = (0, 0)
min_version_str = ".".join(str(v) for v in min_version)
compute_version_str = ".".join(str(v) for v in compute_version)
requires = [
pytest.mark.skipif(
compute_version < min_version,
reason=f"Requires CUDA compute >= {min_version_str}, but have {compute_version_str}",
),
*requires_cuda.marks(),
]
def inner(func):
return _compose([func], requires)
return inner
def skip_if_32bit(reason):
def decorator(*args):
if "32bit" in platform.architecture()[0]:
return _compose(args, [pytest.mark.skip(reason=reason)])
return _compose(args, [])
return decorator
def requires_package(*packages):
"""Mark a test as requiring python packages to run.
If the packages listed are not available, tests marked with
`requires_package` will appear in the pytest results as being skipped.
This is equivalent to using ``foo = pytest.importorskip('foo')`` inside
the test body.
Parameters
----------
packages : List[str]
The python packages that should be available for the test to
run.
Returns
-------
mark: pytest mark
The pytest mark to be applied to unit tests that require this
"""
def has_package(package):
try:
__import__(package)
return True
except ImportError:
return False
marks = [
pytest.mark.skipif(not has_package(package), reason=f"Cannot import '{package}'")
for package in packages
]
def wrapper(func):
for mark in marks:
func = mark(func)
return func
return wrapper
def parametrize_targets(*args):
"""Parametrize a test over a specific set of targets.
Use this decorator when you want your test to be run over a
specific set of targets and devices. It is intended for use where
a test is applicable only to a specific target, and is
inapplicable to any others (e.g. verifying target-specific
assembly code matches known assembly code). In most
circumstances, :py:func:`tvm.testing.exclude_targets` or
:py:func:`tvm.testing.known_failing_targets` should be used
instead.
If used as a decorator without arguments, the test will be
parametrized over all targets in
:py:func:`tvm.testing.enabled_targets`. This behavior is
automatically enabled for any target that accepts arguments of
``target`` or ``dev``, so the explicit use of the bare decorator
is no longer needed, and is maintained for backwards
compatibility.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str], optional
Set of targets to run against. If not supplied,
:py:func:`tvm.testing.enabled_targets` will be used.
Example
-------
>>> @tvm.testing.parametrize_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
# Backwards compatibility, when used as a decorator with no
# arguments implicitly parametrizes over "target". The
# parametrization is now handled by _auto_parametrize_target, so
# this use case can just return the decorated function.
if len(args) == 1 and callable(args[0]):
return args[0]
return pytest.mark.parametrize("target", list(args), scope="session")
def exclude_targets(*args):
"""Exclude a test from running on a particular target.
Use this decorator when you want your test to be run over a
variety of targets and devices (including cpu and gpu devices),
but want to exclude some particular target or targets. For
example, a test may wish to be run against all targets in
tvm.testing.enabled_targets(), except for a particular target that
does not support the capabilities.
Applies pytest.mark.skipif to the targets given.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str]
Set of targets to exclude.
Example
-------
>>> @tvm.testing.exclude_targets("cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.exclude_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wraps(func):
func.tvm_excluded_targets = args
return func
return wraps
def known_failing_targets(*args):
"""Skip a test that is known to fail on a particular target.
Use this decorator when you want your test to be run over a
variety of targets and devices (including cpu and gpu devices),
but know that it fails for some targets. For example, a newly
implemented runtime may not support all features being tested, and
should be excluded.
Applies pytest.mark.xfail to the targets given.
Parameters
----------
f : function
Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,
where `xxxxxxxxx` is any name.
targets : list[str]
Set of targets to skip.
Example
-------
>>> @tvm.testing.known_failing_targets("cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
Or
>>> @tvm.testing.known_failing_targets("llvm", "cuda")
>>> def test_mytest(target, dev):
>>> ... # do something
"""
def wraps(func):
func.tvm_known_failing_targets = args
return func
return wraps
def parameter(*values, ids=None, by_dict=None):
"""Convenience function to define pytest parametrized fixtures.
Declaring a variable using ``tvm.testing.parameter`` will define a
parametrized pytest fixture that can be used by test
functions. This is intended for cases that have no setup cost,
such as strings, integers, tuples, etc. For cases that have a
significant setup cost, please use :py:func:`tvm.testing.fixture`
instead.
If a test function accepts multiple parameters defined using
``tvm.testing.parameter``, then the test will be run using every
combination of those parameters.
The parameter definition applies to all tests in a module. If a
specific test should have different values for the parameter, that
test should be marked with ``@pytest.mark.parametrize``.
Parameters
----------
values : Any
A list of parameter values. A unit test that accepts this
parameter as an argument will be run once for each parameter
given.
ids : List[str], optional
A list of names for the parameters. If None, pytest will
generate a name from the value. These generated names may not
be readable/useful for composite types such as tuples.
by_dict : Dict[str, Any]
A mapping from parameter name to parameter value, to set both the
values and ids.
Returns
-------
function
A function output from pytest.fixture.
Example
-------
>>> size = tvm.testing.parameter(1, 10, 100)
>>> def test_using_size(size):
>>> ... # Test code here
Or
>>> shape = tvm.testing.parameter((5,10), (512,1024), ids=['small','large'])
>>> def test_using_size(shape):
>>> ... # Test code here
Or
>>> shape = tvm.testing.parameter(by_dict={'small': (5,10), 'large': (512,1024)})
>>> def test_using_size(shape):
>>> ... # Test code here
"""
if by_dict is not None:
if values or ids:
raise RuntimeError(
"Use of the by_dict parameter cannot be used alongside positional arguments"
)
ids, values = zip(*by_dict.items())
# Optional cls parameter in case a parameter is defined inside a
# class scope.
@pytest.fixture(params=values, ids=ids)
def as_fixture(*_cls, request):
return request.param
return as_fixture
_parametrize_group = 0
def parameters(*value_sets, ids=None):
"""Convenience function to define pytest parametrized fixtures.
Declaring a variable using tvm.testing.parameters will define a
parametrized pytest fixture that can be used by test
functions. Like :py:func:`tvm.testing.parameter`, this is intended
for cases that have no setup cost, such as strings, integers,
tuples, etc. For cases that have a significant setup cost, please
use :py:func:`tvm.testing.fixture` instead.
Unlike :py:func:`tvm.testing.parameter`, if a test function
accepts multiple parameters defined using a single call to
``tvm.testing.parameters``, then the test will only be run once
for each set of parameters, not for all combinations of
parameters.
These parameter definitions apply to all tests in a module. If a
specific test should have different values for some parameters,
that test should be marked with ``@pytest.mark.parametrize``.
Parameters
----------
values : List[tuple]
A list of parameter value sets. Each set of values represents
a single combination of values to be tested. A unit test that
accepts parameters defined will be run once for every set of
parameters in the list.
ids : List[str], optional
A list of names for the parameter sets. If None, pytest will
generate a name from each parameter set. These generated names may
not be readable/useful for composite types such as tuples.
Returns
-------
List[function]
Function outputs from pytest.fixture. These should be unpacked
into individual named parameters.
Example
-------
>>> size, dtype = tvm.testing.parameters( (16,'float32'), (512,'float16') )
>>> def test_feature_x(size, dtype):
>>> # Test code here
>>> assert( (size,dtype) in [(16,'float32'), (512,'float16')])
"""
global _parametrize_group
parametrize_group = _parametrize_group
_parametrize_group += 1
outputs = []
for param_values in zip(*value_sets):
# Optional cls parameter in case a parameter is defined inside a
# class scope.
def fixture_func(*_cls, request):
return request.param
fixture_func.parametrize_group = parametrize_group
fixture_func.parametrize_values = param_values
fixture_func.parametrize_ids = ids
outputs.append(pytest.fixture(fixture_func))
return outputs
def fixture(func=None, *, cache_return_value=False):
"""Convenience function to define pytest fixtures.
This should be used as a decorator to mark functions that set up
state before a function. The return value of that fixture
function is then accessible by test functions as that accept it as
a parameter.
Fixture functions can accept parameters defined with
:py:func:`tvm.testing.parameter`.
By default, the setup will be performed once for each unit test
that uses a fixture, to ensure that unit tests are independent.
If the setup is expensive to perform, then the
cache_return_value=True argument can be passed to cache the setup.
The fixture function will be run only once (or once per parameter,
if used with tvm.testing.parameter), and the same return value
will be passed to all tests that use it. If the environment
variable TVM_TEST_DISABLE_CACHE is set to a non-zero value, it
will disable this feature and no caching will be performed.
Example
-------
>>> @tvm.testing.fixture
>>> def cheap_setup():
>>> return 5 # Setup code here.
>>>
>>> def test_feature_x(target, dev, cheap_setup)
>>> assert(cheap_setup == 5) # Run test here
Or
>>> size = tvm.testing.parameter(1, 10, 100)
>>>
>>> @tvm.testing.fixture
>>> def cheap_setup(size):
>>> return 5*size # Setup code here, based on size.
>>>
>>> def test_feature_x(cheap_setup):
>>> assert(cheap_setup in [5, 50, 500])
Or
>>> @tvm.testing.fixture(cache_return_value=True)
>>> def expensive_setup():
>>> time.sleep(10) # Setup code here
>>> return 5
>>>
>>> def test_feature_x(target, dev, expensive_setup):
>>> assert(expensive_setup == 5)
"""
force_disable_cache = bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0")))
cache_return_value = cache_return_value and not force_disable_cache
# Deliberately at function scope, so that caching can track how
# many times the fixture has been used. If used, the cache gets
# cleared after the fixture is no longer needed.
scope = "function"
def wraps(func):
if cache_return_value:
func = _fixture_cache(func)
func = pytest.fixture(func, scope=scope)
return func
if func is None:
return wraps
return wraps(func)
class _DeepCopyAllowedClasses(dict):
def __init__(self, allowed_class_list):
self.allowed_class_list = allowed_class_list
super().__init__()
def get(self, key, *args, **kwargs):
"""Overrides behavior of copy.deepcopy to avoid implicit copy.
By default, copy.deepcopy uses a dict of id->object to track
all objects that it has seen, which is passed as the second
argument to all recursive calls. This class is intended to be
passed in instead, and inspects the type of all objects being
copied.
Where copy.deepcopy does a best-effort attempt at copying an
object, for unit tests we would rather have all objects either
be copied correctly, or to throw an error. Classes that
define an explicit method to perform a copy are allowed, as
are any explicitly listed classes. Classes that would fall
back to using object.__reduce__, and are not explicitly listed
as safe, will throw an exception.
"""
obj = ctypes.cast(key, ctypes.py_object).value
cls = type(obj)
if (
cls in copy._deepcopy_dispatch
or issubclass(cls, type)
or getattr(obj, "__deepcopy__", None)
or copyreg.dispatch_table.get(cls)
or cls.__reduce__ is not object.__reduce__
or cls.__reduce_ex__ is not object.__reduce_ex__
or cls in self.allowed_class_list
):
return super().get(key, *args, **kwargs)
rfc_url = (
"https://github.com/apache/tvm-rfcs/blob/main/rfcs/0007-parametrized-unit-tests.md"
)
raise TypeError(
(
f"Cannot copy fixture of type {cls.__name__}. TVM fixture caching "
"is limited to objects that explicitly provide the ability "
"to be copied (e.g. through __deepcopy__, __getstate__, or __setstate__),"
"and forbids the use of the default `object.__reduce__` and "
"`object.__reduce_ex__`. For third-party classes that are "
"safe to use with copy.deepcopy, please add the class to "
"the arguments of _DeepCopyAllowedClasses in tvm.testing._fixture_cache.\n"
"\n"
f"For discussion on this restriction, please see {rfc_url}."
)
)
def _fixture_cache(func):
cache = {}
# Can't use += on a bound method's property. Therefore, this is a
# list rather than a variable so that it can be accessed from the
# pytest_collection_modifyitems().
num_tests_use_this_fixture = [0]
num_times_fixture_used = 0
# Using functools.lru_cache would require the function arguments
# to be hashable, which wouldn't allow caching fixtures that
# depend on numpy arrays. For example, a fixture that takes a
# numpy array as input, then calculates uses a slow method to
# compute a known correct output for that input. Therefore,
# including a fallback for serializable types.
def get_cache_key(*args, **kwargs):
try:
hash((args, kwargs))
return (args, kwargs)
except TypeError as e:
pass
try:
return pickle.dumps((args, kwargs))
except TypeError as e:
raise TypeError(
"TVM caching of fixtures requires arguments to the fixture "
"to be either hashable or serializable"
) from e
@functools.wraps(func)
def wrapper(*args, **kwargs):
if num_tests_use_this_fixture[0] == 0:
raise RuntimeError(
"Fixture use count is 0. "
"This can occur if tvm.testing.plugin isn't registered. "
"If using outside of the TVM test directory, "
"please add `pytest_plugins = ['tvm.testing.plugin']` to your conftest.py"
)
try:
cache_key = get_cache_key(*args, **kwargs)
try:
cached_value = cache[cache_key]
except KeyError:
cached_value = cache[cache_key] = func(*args, **kwargs)
yield copy.deepcopy(
cached_value,
# allowed_class_list should be a list of classes that
# are safe to copy using copy.deepcopy, but do not
# implement __deepcopy__, __reduce__, or
# __reduce_ex__.
_DeepCopyAllowedClasses(allowed_class_list=[]),
)
finally:
# Clear the cache once all tests that use a particular fixture
# have completed.
nonlocal num_times_fixture_used
num_times_fixture_used += 1
if num_times_fixture_used >= num_tests_use_this_fixture[0]:
cache.clear()
# Set in the pytest_collection_modifyitems(), by _count_num_fixture_uses
wrapper.num_tests_use_this_fixture = num_tests_use_this_fixture
return wrapper
def identity_after(x, sleep):
"""Testing function to return identity after sleep
Parameters
----------
x : int
The input value.
sleep : float
The amount of time to sleep
Returns
-------
x : object
The original value
"""
if sleep:
time.sleep(sleep)
return x
def terminate_self():
"""Testing function to terminate the process."""
sys.exit(-1)
def is_ampere_or_newer():
"""Check if the target environment has an NVIDIA Ampere GPU or newer."""
arch = tvm.contrib.nvcc.get_target_compute_version()
major, _ = tvm.contrib.nvcc.parse_compute_version(arch)
return major >= 8
def install_request_hook(depth: int) -> None:
"""Add a wrapper around urllib.request for CI tests"""
if not IS_IN_CI:
return
# https://sphinx-gallery.github.io/stable/faq.html#why-is-file-not-defined-what-can-i-use
base = None
msg = ""
try:
base = __file__
msg += f"found file {__file__}\n"
except NameError:
msg += f"no file\n"
if base is None:
hook_script_dir = Path.cwd().resolve()
msg += "used path.cwd()\n"
else:
hook_script_dir = Path(base).resolve().parent
msg += "used base()\n"
msg += f"using depth {depth}\n"
if depth <= 0:
raise ValueError(f"depth less than 1 not supported, found: {depth}")
# Go up the parent directories
while depth > 0:
msg += f"[depth={depth}] dir={hook_script_dir}\n"
hook_script_dir = hook_script_dir.parent
depth -= 1
# Ensure the specified dir is valid
hook_script_dir = hook_script_dir / "tests" / "scripts" / "request_hook"
if not hook_script_dir.exists():
raise RuntimeError(f"Directory {hook_script_dir} does not exist:\n{msg}")
# Import the hook and start it up (it's not included here directly to avoid
# keeping a database of URLs inside the tvm Python package
sys.path.append(str(hook_script_dir))
# This import is intentionally delayed since it should only happen in CI
import request_hook # pylint: disable=import-outside-toplevel
request_hook.init()
def fetch_model_from_url(
url: str,
model_format: str,
sha256: str,
) -> Tuple[tvm.ir.module.IRModule, dict]:
"""Testing function to fetch a model from a URL and return it as a Relay
model. Downloaded files are cached for future re-use.
Parameters
----------
url : str
The URL or list of URLs to try downloading the model from.
model_format: str
The file extension of the model format used.
sha256 : str
The sha256 hex hash to compare the downloaded model against.
Returns
-------
(mod, params) : object
The Relay representation of the downloaded model.
"""
rel_path = f"model_{sha256}.{model_format}"
file = tvm.contrib.download.download_testdata(url, rel_path, overwrite=False)
# Check SHA-256 hash
file_hash = hashlib.sha256()
with open(file, "rb") as f:
for block in iter(lambda: f.read(2**24), b""):
file_hash.update(block)
if file_hash.hexdigest() != sha256:
raise FileNotFoundError("SHA-256 hash for model does not match")
tvmc_model = load_model(file, model_format)
return tvmc_model.mod, tvmc_model.params
def _mark_parameterizations(*params, marker_fn, reason):
"""
Mark tests with a nodeid parameters that exactly matches one in params.
Useful for quickly marking tests as xfail when they have a large
combination of parameters.
"""
params = set(params)
def decorator(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if "[" in request.node.name and "]" in request.node.name:
# Strip out the test name and the [ and ] brackets
params_from_name = request.node.name[len(request.node.originalname) + 1 : -1]
if params_from_name in params:
marker_fn(
reason=f"{marker_fn.__name__} on nodeid {request.node.nodeid}: " + reason
)
return func(request, *args, **kwargs)
return wrapper
return decorator
def xfail_parameterizations(*xfail_params, reason):
return _mark_parameterizations(*xfail_params, marker_fn=pytest.xfail, reason=reason)
def skip_parameterizations(*skip_params, reason):
return _mark_parameterizations(*skip_params, marker_fn=pytest.skip, reason=reason)
def main():
test_file = inspect.getsourcefile(sys._getframe(1))
sys.exit(pytest.main([test_file] + sys.argv[1:]))
class CompareBeforeAfter:
"""Utility for comparing before/after of TIR transforms
A standard framework for writing tests that take a TIR PrimFunc as
input, apply a transformation, then either compare against an
expected output or assert that the transformation raised an error.
A test should subclass CompareBeforeAfter, defining class members
`before`, `transform`, and `expected`. CompareBeforeAfter will
then use these members to define a test method and test fixture.
`transform` may be one of the following.
- An instance of `tvm.ir.transform.Pass`
- A method that takes no arguments and returns a `tvm.ir.transform.Pass`
- A pytest fixture that returns a `tvm.ir.transform.Pass`
`before` may be any one of the following.
- An instance of `tvm.tir.PrimFunc`. This is allowed, but is not
the preferred method, as any errors in constructing the
`PrimFunc` occur while collecting the test, preventing any other
tests in the same file from being run.
- An TVMScript function, without the ``@T.prim_func`` decoration.
The ``@T.prim_func`` decoration will be applied when running the
test, rather than at module import.
- A method that takes no arguments and returns a `tvm.tir.PrimFunc`
- A pytest fixture that returns a `tvm.tir.PrimFunc`
`expected` may be any one of the following. The type of
`expected` defines the test being performed. If `expected`
provides a `tvm.tir.PrimFunc`, the result of the transformation
must match `expected`. If `expected` is an exception, then the
transformation must raise that exception type.
- Any option supported for `before`.
- The `Exception` class object, or a class object that inherits
from `Exception`.
- A method that takes no arguments and returns `Exception` or a
class object that inherits from `Exception`.
- A pytest fixture that returns `Exception` or an class object
that inherits from `Exception`.
Examples
--------
.. python::
class TestRemoveIf(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.Simplify()
def before(A: T.Buffer[1, "int32"]):
if True:
A[0] = 42
else:
A[0] = 5
def expected(A: T.Buffer[1, "int32"]):
A[0] = 42
"""
def __init_subclass__(cls):
if hasattr(cls, "before"):
cls.before = cls._normalize_before(cls.before)
if hasattr(cls, "expected"):
cls.expected = cls._normalize_expected(cls.expected)
if hasattr(cls, "transform"):
cls.transform = cls._normalize_transform(cls.transform)
@classmethod
def _normalize_ir_module(cls, func):
if isinstance(func, tvm.tir.PrimFunc):
def inner(self):
# pylint: disable=unused-argument
return func
elif cls._is_method(func):
def inner(self):
# pylint: disable=unused-argument
return func(self)
elif inspect.isclass(func):
def inner(self):
# pylint: disable=unused-argument
func_dict = {}
for name, method in func.__dict__.items():
if name.startswith("_"):
pass
elif isinstance(method, tvm.ir.function.BaseFunc):
func_dict[name] = method
else:
source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(method))
prim_func = tvm.script.from_source(source_code)
func_dict[name] = prim_func
return tvm.IRModule(func_dict)
else:
def inner(self):
# pylint: disable=unused-argument
source_code = "@T.prim_func\n" + textwrap.dedent(inspect.getsource(func))
return tvm.script.from_source(source_code)
return pytest.fixture(inner)
@classmethod
def _normalize_before(cls, func):
if hasattr(func, "_pytestfixturefunction"):
return func
else:
return cls._normalize_ir_module(func)
@classmethod
def _normalize_expected(cls, func):
if hasattr(func, "_pytestfixturefunction"):
return func
elif inspect.isclass(func) and issubclass(func, Exception):
def inner(self):
# pylint: disable=unused-argument
return func
return pytest.fixture(inner)
else:
return cls._normalize_ir_module(func)
@classmethod
def _normalize_transform(cls, transform):
def apply(module_transform):
def inner(obj):
if isinstance(obj, tvm.IRModule):
return module_transform(obj)
elif isinstance(obj, tvm.tir.PrimFunc):
mod = tvm.IRModule({"main": obj})
mod = module_transform(mod)
return mod["main"]
else:
raise TypeError(f"Expected IRModule or PrimFunc, but received {type(obj)}")
return inner
if hasattr(transform, "_pytestfixturefunction"):
if not hasattr(cls, "_transform_orig"):
cls._transform_orig = transform
def inner(self, _transform_orig):
# pylint: disable=unused-argument
return apply(_transform_orig)
elif isinstance(transform, tvm.ir.transform.Pass):
def inner(self):
# pylint: disable=unused-argument
return apply(transform)
elif cls._is_method(transform):
def inner(self):
# pylint: disable=unused-argument
return apply(transform(self))
else:
raise TypeError(
"Expected transform to be a tvm.ir.transform.Pass, or a method returning a Pass"
)
return pytest.fixture(inner)
@staticmethod
def _is_method(func):
sig = inspect.signature(func)
return "self" in sig.parameters
def test_compare(self, before, expected, transform):
"""Unit test to compare the expected TIR PrimFunc to actual"""
def pprint(name, obj):
script = obj.script()
if isinstance(obj, tvm.IRModule):
return script.replace("class Module", f"class {name}")
else:
return script.replace("def func", f"def {name}")
if inspect.isclass(expected) and issubclass(expected, Exception):
with pytest.raises(expected):
after = transform(before)
# This portion through pytest.fail isn't strictly
# necessary, but gives a better error message that
# includes the before/after.
before_str = pprint("before", before)
after_str = pprint("after", after)
pytest.fail(
msg=(
f"Expected {expected.__name__} to be raised from transformation, "
f"instead received TIR\n:{before_str}\n{after_str}"
)
)
elif isinstance(expected, (tvm.tir.PrimFunc, tvm.ir.IRModule)):
after = transform(before)
try:
tvm.ir.assert_structural_equal(after, expected)
except ValueError as err:
before_str = pprint("before", before)
after_str = pprint("after", after)
expected_str = pprint("expected", expected)
raise ValueError(
f"TIR after transformation did not match expected:\n"
f"{before_str}\n{after_str}\n{expected_str}"
) from err
else:
raise TypeError(
f"tvm.testing.CompareBeforeAfter requires the `expected` fixture "
f"to return either `Exception`, an `Exception` subclass, "
f"or an instance of `tvm.tir.PrimFunc`. "
f"Instead, received {type(expected)}."
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Tensor-level IR"""
from tvm.ir import PrimExpr
from tvm.runtime import const
from .buffer import Buffer, decl_buffer, DataProducer
from .data_layout import Layout, BijectiveLayout, bijective_layout, layout
from .expr import Var, SizeVar, Reduce, FloatImm, IntImm, StringImm, Cast
from .expr import Add, Sub, Mul, Div, Mod, FloorDiv, FloorMod
from .expr import Min, Max, EQ, NE, LT, LE, GT, GE, And, Or, Not
from .expr import Select, BufferLoad, ProducerLoad, Load, Ramp, Broadcast, Shuffle
from .expr import Call, CallEffectKind, Let, IterVar, CommReducer, Any
from .stmt import Stmt, LetStmt, AssertStmt, ForKind, For, While
from .stmt import (
BufferStore,
BufferRealize,
Store,
ProducerStore,
Allocate,
AllocateConst,
AttrStmt,
DeclBuffer,
)
from .stmt import ProducerRealize, SeqStmt
from .stmt import IfThenElse, Evaluate, Prefetch, stmt_seq, stmt_list
from .stmt import BufferRegion, MatchBufferRegion, Block, BlockRealize
from .function import PrimFunc, TensorIntrin, IndexMap
from .op import call_packed_lowered, call_cpacked_lowered
from .op import call_packed, call_cpacked, call_intrin, call_pure_extern, call_extern
from .op import call_llvm_intrin, call_llvm_pure_intrin, ret, all, any, min_value, max_value, trace
from .op import tvm_check_return
from .op import tvm_stack_alloca, tvm_stack_make_shape, tvm_stack_make_array
from .op import tvm_tuple, tvm_struct_get, tvm_struct_set
from .op import address_of, lookup_param, assume, undef
from .op import tvm_thread_allreduce, type_annotation, tvm_access_ptr, tvm_throw_last_error
from .op import (
tvm_load_matrix_sync,
tvm_store_matrix_sync,
tvm_mma_sync,
tvm_bmma_sync,
tvm_fill_fragment,
)
from .op import ptx_mma, ptx_mma_sp, mma_store, mma_fill
from .op import ptx_ldmatrix, ptx_cp_async, ptx_commit_group, ptx_wait_group
from .op import vectorlow, vectorhigh, vectorcombine
from .op import infinity, reinterpret
from .op import exp, exp2, exp10, log, log2, log10, log1p, ldexp, clz
from .op import sin, sinh, asin, asinh
from .op import cos, cosh, acos, acosh
from .op import tan, tanh, atan, atan2, atanh
from .op import erf, sigmoid, sqrt, rsqrt, floor, ceil, hypot
from .op import trunc, abs, round, nextafter, nearbyint, power, popcount, fmod, if_then_else
from .op import likely, isnan, isnullptr, isfinite, isinf, copysign
from .op import div, indexdiv, indexmod, truncdiv, truncmod, floordiv, floormod, ceildiv
from .op import comm_reducer, min, max, sum
from .op import q_multiply_shift, q_multiply_shift_per_axis, shift_left, shift_right
from .op import TVMBackendAllocWorkspace, TVMBackendFreeWorkspace
from .op import start_profile_intrinsic, end_profile_intrinsic
from .generic import add, subtract, multiply
from .schedule import StmtSRef, BlockScope, ScheduleState, Schedule, ScheduleError
from . import schedule
from . import ir_builder
from . import transform
from . import analysis
from . import stmt_functor
from . import usmp
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir"""
import tvm._ffi
tvm._ffi._init_api("tir", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/analysis/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all TIR analysis utils."""
# pylint: disable=wildcard-import, invalid-name
from .analysis import *
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/analysis/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.analysis", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/analysis/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping existing analysis utils."""
# pylint: disable=invalid-name
from typing import Dict, List, Union
from tvm import Object
from tvm.ir import IRModule
from tvm.tir.expr import Var
from tvm.tir.stmt import Block, BufferRegion, PrimExpr
from .. import Buffer, Stmt
from ..function import PrimFunc
from . import _ffi_api
def expr_deep_equal(lhs: PrimExpr, rhs: PrimExpr) -> bool:
"""Deeply compare two nested expressions.
Parameters
----------
lhs : PrimExpr
The left operand.
rhs : PrimExpr
The right operand.
Returns
-------
result : bool
The comparison result
Note
----
This function does not remap variable bindings, it will not
return true for (let x = 1 in x + 1) vs (let y = 1 in y + 1), unless x.same_as(y).
Use py:func:`tvm.ir.structural_equal` to handle structural variable remapping.
Due to the restriction of not remapping variables, this function can run
faster than StructuralEqual and can be used as a utility function during arithmetic
simplifications.
Always consider py:func:`tvm.ir.structural_equal` first, which handles
the structural remapping.
See Also
--------
tvm.ir.structural_equal
"""
return _ffi_api.expr_deep_equal(lhs, rhs) # type: ignore
def verify_ssa(func: PrimFunc) -> bool:
"""Verify if the func is in SSA form.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_ssa(func) # type: ignore
def verify_memory(func: PrimFunc) -> bool:
"""Verify if func contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_memory(func) # type: ignore
def verify_gpu_code(func: PrimFunc, constraints: Dict[str, int]) -> None:
"""Verify if module contains illegal host side direct memory access.
Parameters
----------
func: tvm.tir.PrimFunc
The module to be verified.
constraints : Dict[str, int]
The attribute constraints.
Returns
-------
result : bool
The result of verification.
"""
return _ffi_api.verify_gpu_code(func, constraints) # type: ignore
def get_block_access_region(
block: Block, buffer_var_map: Dict[Var, Buffer]
) -> List[List[BufferRegion]]:
"""Detect which regions of tensors in this block are read or written to.
Regions are sorted by order of appearance in the AST.
Parameters
----------
block: tvm.tir.Block
The block in which we are detecting read/write regions.
buffer_var_map : Dict[Var, Buffer]
The outside buffers which may access the block. Mapping from buffer var to the buffer
Returns
-------
result : List[List[BufferRegion]]
Array of access regions. There are three arrays of BufferRegion:
- first: read regions
- second: write regions
- third: opaque regions
"""
return _ffi_api.GetBlockAccessRegion(block, buffer_var_map) # type: ignore
def get_block_read_write_region(
block: Block, buffer_var_map: Dict[Var, Buffer]
) -> List[List[BufferRegion]]:
"""Auto detect the block read/write region according to its body stmt.
An opaque access will be counted as both a read and a write access
Parameters
----------
block: tvm.tir.Block
The block in which we are detecting read/write regions.
buffer_var_map : Dict[Var, Buffer]
The outside buffers which may access the block. Mapping from buffer var to the buffer
Returns
-------
result : List[List[BufferRegion]]
An array only consisting of the read regions and write regions of the input block
"""
return _ffi_api.GetBlockReadWriteRegion(block, buffer_var_map) # type: ignore
def calculate_workspace_bytes(func: PrimFunc, workspace_byte_alignment: int) -> int:
"""Calculate the workspace size in bytes needed by the TIR allocates inside the TIR
PrimFunc.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
workspace_byte_alignment : int
The byte alignment required for each tensor
Returns
-------
result : int
Workspace size in bytes.
"""
return _ffi_api.calculate_workspace_bytes(func, workspace_byte_alignment) # type: ignore
def calculate_constant_bytes(func: PrimFunc, constant_byte_alignment: int) -> int:
"""Calculate the constant size in bytes needed by the TIR allocates inside the TIR
PrimFunc.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
constant_byte_alignment : int
The byte alignment required for each tensor
Returns
-------
result : int
Workspace size in bytes.
"""
return _ffi_api.calculate_constant_bytes(func, constant_byte_alignment) # type: ignore
def detect_buffer_access_lca(func: PrimFunc) -> Dict[Buffer, Stmt]:
"""Detect the lowest common ancestor(LCA) of buffer access, including both high-level
access(BufferLoad, BufferStore) and low-level access(Load, Store and opaque access).
The LCA may be a For loop or a Block.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be detected.
Returns
-------
result : Dict[Buffer, Stmt]
Map from buffer to the LCA of all access to it.
"""
return _ffi_api.detect_buffer_access_lca(func) # type: ignore # pylint: disable=no-member
def estimate_tir_flops(stmt_or_mod: Union[Stmt, IRModule]) -> float:
"""Estimate the FLOPs of a TIR fragment.
Parameters
----------
stmt_or_mod: Union[Stmt, IRModule]
The TIR fragment or IRModule to be estimated.
Returns
-------
flops: float
The estimated FLOPs.
"""
return _ffi_api.EstimateTIRFlops(stmt_or_mod) # type: ignore # pylint: disable=no-member
# NOTE: relay_func_type in the following two functions should be relay.FuncType however that would
# introduce a cycling dependency. We make do with Object.
def get_prim_func_arg_and_result_memory_constraints(
func: PrimFunc, relay_func_type: Object
) -> List[str]:
"""Returns the memory (aka storage) scope constraints for all the arguments and result
of func. However the result will be w.r.t. the func's representation as a Relay Function
of relay_func_type before lowering and conversion to DPS.
Visible for testing.
Parameters
----------
func: tvm.tir.PrimFunc
The function to retrieve constraints from.
relay_func_type: tvm.relay.FuncType
The type of the Relay Function from which the func was derived.
Returns
-------
result: List[AnyStr]
Memory scope constraints for funcs args and result in Relay form. The empty string
denotes 'no constraint'.
"""
return _ffi_api.GetPrimFuncArgAndResultMemoryConstraints( # type: ignore # pylint: disable=no-member
func, relay_func_type
)
def apply_prim_func_arg_and_result_memory_constraints(
func: PrimFunc, relay_func_type: Object, arg_and_result_memory_scopes: List[str]
) -> PrimFunc:
"""Returns func written to capture the memory (aka storage) scope constraints
for each of the func's parameters given by arg_and_result_memory_scopes. However,
arg_and_result_memory_scopes should be w.r.t. the func's representation as a Relay
Function of relay_func_type before lowering and conversion to DPS.
Visible for testing.
CAUTION: This is experimental. The resulting PrimFunc may not have fully accounted
for all new memory scopes.
Parameters
----------
func: tvm.tir.PrimFunc
The function to retrieve constraints from.
relay_func_type: tvm.relay.FuncType
The type of the Relay Function from which the func was derived.
arg_and_result_memory_scopes: Array[AnyStr]
Memory constraints for funcs args and result in Relay form. The empty string denotes
'no constraint'.
Returns
-------
result: tvm.tir.PrimFunc
The rewritten func.
"""
return _ffi_api.ApplyPrimFuncArgAndResultMemoryConstraints( # type: ignore # pylint: disable=no-member
func, relay_func_type, arg_and_result_memory_scopes
)
def verify_well_formed(func: PrimFunc, assert_mode: bool = True) -> bool:
"""Verify if the given TIR is well-formed. The verification includes:
- Check if expressions not contain vars that is defined outside the block.
Parameters
----------
func: tvm.tir.PrimFunc
The function to be verified.
assert_mode: bool
The indicator if it raises an error when the function is not well-formed.
Returns
-------
result: bool
Whether it is a well-formed TIR function.
"""
return _ffi_api.VerifyWellFormed(func, assert_mode) # type: ignore # pylint: disable=no-member
def OOBChecker():
"""Detect out of bounds memory access in arrays.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.OOBChecker() # type: ignore
def find_anchor_block(mod: IRModule) -> Block:
"""Find the "anchor block" of the given module.
We define the anchor block to be the block with (1) an init statement and (2) having
the biggest flops count. The latter condition is only used when there are multiple blocks
with an init statement.
For example, if the input module is conv2d + fused spatial blocks, conv2d is the anchor block.
The input module may not contain more than one such block. For example, a module having
two conv2d is not allowed as an input.
However, a module created from winograd convolution has multiple blocks with an init statement
(input transform, batched GEMM, and output transform). We use the second condition, the flops
count, to determine that the batched GEMM block is the anchor block.
Parameters
----------
mod: tvm.ir.IRModule
The input TIR module.
Returns
-------
anchor_block: Block
The anchor block if found, None otherwise.
"""
return _ffi_api.find_anchor_block(mod) # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstraction for array data structures."""
from numbers import Integral
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.ir import PointerType, PrimExpr, PrimType, Range
from tvm.runtime import Object, convert
from . import _ffi_api
@tvm._ffi.register_object("tir.Buffer")
class Buffer(Object):
"""Symbolic data buffer in TVM.
Buffer provide a way to represent data layout
specialization of data structure in TVM.
Do not construct directly, use :py:func:`~decl_buffer` instead.
See the documentation of :py:func:`decl_buffer` for more details.
See Also
--------
decl_buffer : Declare a buffer
"""
READ = 1
WRITE = 2
def access_ptr(self, access_mask, ptr_type="handle", content_lanes=1, offset=0, extent=None):
"""Get an access pointer to the head of buffer.
This is the recommended method to get buffer data
ptress when interacting with external functions.
Parameters
----------
access_mask : int
The access pattern MASK. Indicate whether the
access will read or write to the data content.
ptr_type : str, optional
The data type of the result pointer. Do not specify
unless we want to cast pointer to specific type.
content_lanes: int, optional
The number of lanes for the data type. This value
is greater than one for vector types.
offset: Expr, optional
The offset of pointer. We can use it to offset by
the number of elements from the address of ptr.
extent: Expr, optional
The extent of pointer.
Examples
--------
.. code-block:: python
# Get access ptr for read
buffer.access_ptr("r")
# Get access ptr for read/write with bitmask
buffer.access_ptr(Buffer.READ | Buffer.WRITE)
# Get access ptr for read/write with str flag
buffer.access_ptr("rw")
# Get access ptr for read with offset
buffer.access_ptr("r", offset = 100)
# Get access ptr for read with extent
buffer.access_ptr("r", extent = 100)
"""
if isinstance(access_mask, string_types):
mask = 0
for value in access_mask:
if value == "r":
mask = mask | Buffer.READ
elif value == "w":
mask = mask | Buffer.WRITE
else:
raise ValueError("Unknown access_mask %s" % access_mask)
access_mask = mask
offset = convert(offset)
extent = convert(extent)
return _ffi_api.BufferAccessPtr(
self, access_mask, ptr_type, content_lanes, offset, extent # type: ignore
)
def vload(self, begin, dtype=None):
"""Generate an Expr that loads dtype from begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
dtype : str
The data type to be loaded,
can be vector type which have lanes that is multiple of Buffer.dtype
Returns
-------
load : Expr
The corresponding load expression.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
dtype = dtype if dtype else self.dtype
return _ffi_api.BufferVLoad(self, begin, dtype) # type: ignore
def vstore(self, begin, value):
"""Generate a Stmt that store value into begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
value : Expr
The value to be stored.
Returns
-------
store : Stmt
The corresponding store stmt.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
return _ffi_api.BufferVStore(self, begin, value) # type: ignore
def scope(self):
"""Return the storage scope associated with this buffer.
Returns
-------
scope : str
The storage scope associated with this buffer.
"""
return _ffi_api.BufferStorageScope(self) # type: ignore
def get_flattened_buffer(self):
"""Generate a Buffer that is a flattened version of this buffer.
Returns
-------
flattened : Buffer
The corresponding flat buffer.
"""
return _ffi_api.BufferGetFlattenedBuffer(self) # type: ignore
def offset_of(self, indices):
"""Determine the offset of the provided indices in the flattened buffer.
Parameters
----------
indices : Union[PrimExpr, List[PrimExpr]]
The indices of the element in the original buffer.
Returns
-------
flattened_indices: List[PrimExpr]
The offset indices of the element in the flattened buffer.
"""
return _ffi_api.BufferOffsetOf(self, indices) # type: ignore
def __getitem__(self, indices):
from ..arith import Analyzer # pylint: disable=import-outside-toplevel
from .expr import BufferLoad, Ramp # pylint: disable=import-outside-toplevel
from .stmt import BufferRegion # pylint: disable=import-outside-toplevel
if not isinstance(indices, (tuple, list)):
indices = [indices]
has_slice = any(isinstance(i, slice) for i in indices)
has_step = any(isinstance(i, slice) and i.step is not None for i in indices)
analyzer = Analyzer()
if has_slice and not has_step:
region = []
for i, index in enumerate(indices):
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = self.shape[i] if index.stop is None else index.stop
region.append(Range.from_min_extent(start, analyzer.simplify(stop - start)))
else:
region.append(Range.from_min_extent(index, 1))
return BufferRegion(self, region)
else:
expr_indices = []
for index in indices:
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = self.shape[i] if index.stop is None else index.stop
step = 1 if index.step is None else index.step
lanes = analyzer.simplify((stop - start + step - 1) // step)
if lanes == 1:
expr_indices.append(start)
else:
expr_indices.append(Ramp(start, step, int(lanes)))
else:
expr_indices.append(index)
return BufferLoad(self, expr_indices)
def decl_buffer(
shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0,
buffer_type="",
axis_separators=None,
span=None,
):
"""Declare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
buffer_type: str, optional, {"", "auto_broadcast"}
auto_broadcast buffer allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension j's shape equals 1.
axis_separators : list of int, optional
If passed, a list of separators between groups of axes,
each of which is flattened to an output axis. For flat
memory spaces, should either be None, or an empty list.
span: Optional[Span]
The location of the decl_buffer creation in the source.
Returns
-------
buffer : tvm.tir.Buffer
The created buffer
Example
-------
Here's an example of how broadcast buffer can be used to define a symbolic broadcast operation,
.. code-block:: python
m0, m1, m2 = te.var("m0"), te.var("m1"), te.var("m2")
n0, n1, n2 = te.var("n0"), te.var("n1"), te.var("n2")
o0, o1, o2 = te.var("o0"), te.var("o1"), te.var("o2")
A = te.placeholder((m0, m1, m2), name='A')
B = te.placeholder((n0, n1, n2), name='B')
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully generic.
"""
# pylint: disable=import-outside-toplevel
from .expr import Var
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
dtype = "float32" if dtype is None else dtype
strides = () if strides is None else strides
if axis_separators is None:
axis_separators = []
if offset_factor != 0 and elem_offset is None:
shape_dtype = shape[0].dtype if shape and hasattr(shape[0], "dtype") else "int32"
elem_offset = Var("%s_elem_offset" % name, shape_dtype)
if data is None:
# Bool is represented as uint1 in the IR, but stored as int8
storage_type = PrimType(dtype)
storage_type = PrimType("int8") if storage_type.dtype == "bool" else storage_type
data = Var(name, PointerType(storage_type, scope), span)
return _ffi_api.Buffer( # type: ignore
data,
dtype,
shape,
strides,
elem_offset,
name,
data_alignment,
offset_factor,
buffer_type,
axis_separators,
span,
)
@tvm._ffi.register_object("tir.DataProducer")
class DataProducer(Object):
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/data_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Data layout."""
from typing import Union
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("tir.Layout")
class Layout(Object):
"""Layout is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of the primal axis C (channel).
See Also
--------
layout : Declare a layout
"""
def __len__(self):
return _ffi_api.LayoutNdim(self) # type: ignore
def __contains__(self, axis):
return len(axis) == 1 and axis[0].isalpha() and axis[0] in self.name
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Layout index out of range")
return _ffi_api.LayoutGetItem(self, index) # type: ignore
def index_of(self, axis):
"""Get the index of an axis
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
index : int
The index of the axis, -1 if not found.
"""
return _ffi_api.LayoutIndexOf(self, axis) # type: ignore
def factor_of(self, axis):
"""Get the factor size of the subordinate axis.
Parameters
----------
axis : str
The axis name, need to be [a-z,A-Z]
Returns
-------
factor : int
the size of the subordinate-axis of axis (if axis is a primal-axis),
or the size of axis itself (if axis is a subordinate-axis).
Return -1 if axis is not in the layout.
"""
return _ffi_api.LayoutFactorOf(self, axis) # type: ignore
@tvm._ffi.register_object("tir.BijectiveLayout")
class BijectiveLayout(Object):
"""Bijective mapping for two layouts (src-layout and dst-layout).
It provides shape and index conversion between each other.
Do not construct directly, use :any:`bijective_layout` instead.
See the documentation of :any:`bijective_layout` for more details.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
See Also
--------
bijective_layout : Declare a layout
"""
def forward_index(self, index):
"""Given the indices of the src-layout, infer the dst index.
Parameters
----------
index: Array of Expr
The indices in src-layout.
Returns
-------
dst_index: Array of Expr
The inferred indices in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardIndex(self, index) # type: ignore
def backward_index(self, index):
"""Given the indices of the dst-layout, infer the src index.
Parameters
----------
index: Array of Expr
The indices in dst-layout.
Returns
-------
src_index: Array of Expr
The inferred indices in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardIndex(self, index) # type: ignore
def forward_shape(self, shape):
"""Given the shape of the src-layout, infer the dst shape.
Parameters
----------
shape: Array of Expr
The shape in src-layout.
Returns
-------
dst_shape: Array of Expr
The inferred shape in dst-layout.
"""
return _ffi_api.BijectiveLayoutForwardShape(self, shape) # type: ignore
def backward_shape(self, shape):
"""Given the shape of the dst-layout, infer the src shape.
Parameters
----------
shape: Array of Expr
The shape in dst-layout.
Returns
-------
src_shape: Array of Expr
The inferred shape in src-layout.
"""
return _ffi_api.BijectiveLayoutBackwardShape(self, shape) # type: ignore
def layout(layout_str: str) -> Layout:
"""Create a layout node from a string.
Parameters
----------
layout_str : str
A layout representation is composed of upper cases, lower cases and numbers,
where upper case indicates a primal axis and
the corresponding lower case with factor size indicates the subordinate axis.
For example, NCHW16c can describe a 5-D tensor of
[batch_size, channel, height, width, channel_block].
Here subordinate axis channel_block=16 is the factor size of
the primal axis C (channel).
Returns
-------
layout : Layout
The created layout
"""
return _ffi_api.Layout(layout_str) # type: ignore
def bijective_layout(
src_layout: Union[str, Layout], dst_layout: Union[str, Layout]
) -> BijectiveLayout:
"""Create a bijective layout mapping.
Parameters
----------
src_layout : str or Layout
source layout.
dst_layout : str or Layout
destination layout.
Returns
-------
bijective_layout : BijectiveLayout
The created bijective layout
"""
if isinstance(src_layout, str):
src_layout = layout(src_layout)
if isinstance(dst_layout, str):
dst_layout = layout(dst_layout)
return _ffi_api.BijectiveLayout(src_layout, dst_layout) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""TIR expression nodes.
Each expression node have subfields that can be visited from python side.
For example, you can use addexp.a to get the left operand of an Add node.
.. code-block:: python
x = tvm.tir.Var("n", "int32")
y = x + 2
assert(isinstance(y, tvm.tir.Add))
assert(y.a == x)
"""
from typing import Optional, Union
from tvm import ir
import tvm._ffi
from tvm.ir.base import Span
from tvm.runtime import Object, ObjectGeneric, DataType, DataTypeCode, const
from tvm.ir import PrimExpr, Op
import tvm.ir._ffi_api
from . import generic as _generic
from . import _ffi_api
def div_ambiguity_error():
return RuntimeError(
"TVM supports multiple types of integer divisions, "
+ "please call div, indexdiv/indexmod, floordiv/floormod "
+ " or truncdiv/truncmod directly to avoid ambiguity in the code."
)
def _dtype_is_int(value):
if isinstance(value, int):
return True
return (
isinstance(value, ExprOp) and DataType(value.dtype).type_code == DataTypeCode.INT
) # type: ignore
def _dtype_is_float(value):
if isinstance(value, float):
return True
return (
isinstance(value, ExprOp) and DataType(value.dtype).type_code == DataTypeCode.FLOAT
) # type: ignore
class ExprOp(object):
"""Operator overloading for Expr like expressions."""
# TODO(tkonolige): use inspect to add source information to these objects
def __add__(self, other):
return _generic.add(self, other)
def __radd__(self, other):
return _generic.add(other, self)
def __sub__(self, other):
return _generic.subtract(self, other)
def __rsub__(self, other):
return _generic.subtract(other, self)
def __mul__(self, other):
return _generic.multiply(self, other)
def __rmul__(self, other):
return _generic.multiply(other, self)
def __div__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rdiv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __truediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rtruediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __floordiv__(self, other):
return _generic.floordiv(self, other)
def __rfloordiv__(self, other):
return _generic.floordiv(other, self, None)
def __mod__(self, other):
return _ffi_api._OpFloorMod(self, other, None) # type: ignore
def __rmod__(self, other):
return _ffi_api._OpFloorMod(other, self, None) # type: ignore
def __neg__(self):
neg_one = const(-1, self.dtype) # type: ignore
return self.__mul__(neg_one)
def __lshift__(self, other):
return _ffi_api.left_shift(self, other, None) # type: ignore
def __rlshift__(self, other):
return _ffi_api.left_shift(other, self, None) # type: ignore
def __rshift__(self, other):
return _ffi_api.right_shift(self, other, None) # type: ignore
def __rrshift__(self, other):
return _ffi_api.right_shift(other, self, None) # type: ignore
def __and__(self, other):
return _ffi_api.bitwise_and(self, other, None) # type: ignore
def __rand__(self, other):
return _ffi_api.bitwise_and(other, self, None) # type: ignore
def __or__(self, other):
return _ffi_api.bitwise_or(self, other, None) # type: ignore
def __ror__(self, other):
return _ffi_api.bitwise_or(other, self, None) # type: ignore
def __xor__(self, other):
return _ffi_api.bitwise_xor(self, other, None) # type: ignore
def __rxor__(self, other):
return _ffi_api.bitwise_xor(other, self, None) # type: ignore
def __invert__(self):
if _dtype_is_float(self):
raise RuntimeError("Cannot use ~ operator on float type Expr.")
return _ffi_api.bitwise_not(self, None) # type: ignore
def __lt__(self, other):
return _ffi_api._OpLT(self, other, None) # type: ignore
def __le__(self, other):
return _ffi_api._OpLE(self, other, None) # type: ignore
def __eq__(self, other):
return EqualOp(self, other)
def __ne__(self, other):
return NotEqualOp(self, other)
def __gt__(self, other):
return _ffi_api._OpGT(self, other, None) # type: ignore
def __ge__(self, other):
return _ffi_api._OpGE(self, other, None) # type: ignore
def __nonzero__(self):
raise ValueError(
"Cannot use and / or / not operator to Expr, hint: "
+ "use tvm.tir.all / tvm.tir.any instead"
)
def __bool__(self):
return self.__nonzero__()
def equal(self, other, span=None):
"""Build an equal check expression with other expr.
Parameters
----------
other : PrimExpr
The other expression
span : Optional[Span]
The location of the cast in the source.
Returns
-------
ret : PrimExpr
The equality expression.
"""
return _ffi_api._OpEQ(self, other, span) # type: ignore
def astype(self, dtype: str, span: Optional[Span] = None):
"""Cast the expression to other type.
Parameters
----------
dtype : str
The type of new expression
span : Optional[Span]
The location of the cast in the source.
Returns
-------
expr : PrimExpr
Expression with new type
"""
return _generic.cast(self, dtype, span)
class EqualOp(ObjectGeneric, ExprOp):
"""Deferred equal operator.
This is used to support sugar that a == b can either
mean Object.same_as or Object.equal.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
span : Optional[Span]
The location of the cast in the source.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b, span=None):
self.a = a
self.b = b
self.span = span
def __nonzero__(self):
return self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpEQ(self.a, self.b, self.span) # type: ignore
class NotEqualOp(ObjectGeneric, ExprOp):
"""Deferred NE operator.
This is used to support sugar that a != b can either
mean not Object.same_as or make.NE.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
span : Optional[Span]
The location of the cast in the source.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b, span=None):
self.a = a
self.b = b
self.span = span
def __nonzero__(self):
return not self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpNE(self.a, self.b, self.span) # type: ignore
class IntImmEnum(ObjectGeneric):
"""Lazily evaluate an IntImm in case
the constructor is not available in runtime.
Parameters
----------
value : int
The enum value
span : Optional[Span]
The location of the cast in the source.
"""
def __init__(self, value, span=None):
self.value = value
self.span = span
def asobject(self):
"""Convert object."""
return IntImm("int32", self.value, self.span) # type: ignore
class PrimExprWithOp(ExprOp, PrimExpr):
"""Helper base class to inherit from PrimExpr."""
# In Python3, We have to explicitly tell interpreter to retain __hash__ if we overide __eq__
# https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = PrimExpr.__hash__
class ConstExpr(PrimExprWithOp):
pass
class BinaryOpExpr(PrimExprWithOp):
pass
class CmpExpr(PrimExprWithOp):
pass
class LogicalExpr(PrimExprWithOp):
pass
@tvm._ffi.register_object("tir.Var")
class Var(PrimExprWithOp):
"""Symbolic variable.
Parameters
----------
name : str
The name
dtype : Union[str, tvm.irType]
The data type
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, name: str, dtype: Union[str, ir.Type], span: Optional[Span] = None):
self.__init_handle_by_constructor__(_ffi_api.Var, name, dtype, span) # type: ignore
@tvm._ffi.register_object("tir.SizeVar")
class SizeVar(Var):
"""Symbolic variable to represent a tensor index size
which is greater or equal to zero.
Parameters
----------
name : str
The name
dtype : int
The data type
span : Optional[Span]
The location of this itervar in the source code.
"""
# pylint: disable=super-init-not-called
def __init__(self, name, dtype, span=None):
self.__init_handle_by_constructor__(_ffi_api.SizeVar, name, dtype, span) # type: ignore
@tvm._ffi.register_object("tir.IterVar")
class IterVar(Object, ExprOp):
"""Represent iteration variable.
IterVar represents axis iterations in the computation.
Parameters
----------
dom : Range
The domain of the iteration.
var : Union[Var, str]
The internal variable that is used for iteration.
iter_type : int
The iteration type.
thread_tag : str
The thread type tag.
span : Optional[Span]
The location of this itervar in the source code.
See Also
--------
te.thread_axis: Create thread axis IterVar.
te.reduce_axis: Create reduce axis IterVar.
"""
DataPar = 0
ThreadIndex = 1
CommReduce = 2
Ordered = 3
DimInfo = 4
Unrolled = 5
Vectorized = 6
Parallelized = 7
Tensorized = 8
def __init__(self, dom, var, iter_type, thread_tag="", span=None):
if dom is not None:
if isinstance(dom, (list, tuple)):
if len(dom) != 2:
raise TypeError("need to be list of ranges")
dom = tvm.ir.Range(dom[0], dom[1])
if not isinstance(dom, tvm.ir.Range):
raise TypeError("dom need to be Range")
name = var if var is not None else "iter"
dtype = "int32" if dom is None else dom.extent.dtype
var = Var(name, dtype=dtype, span=span) if not isinstance(var, Var) else var
if dom is not None:
assert (
var.dtype == dom.extent.dtype
), "IterVar's Var dtype must match its domain's extent's dtype"
self.__init_handle_by_constructor__(
_ffi_api.IterVar, dom, var, iter_type, thread_tag, span # type: ignore
)
@tvm._ffi.register_object("tir.CommReducer")
class CommReducer(Object):
"""Commutative reduce operator
Parameters
----------
lhs : List[Var]
The left arguments of the reducer.
rhs : List[Var]
The right arguments of the reducer.
result : List[PrimExpr]
The reduction results.
identity_element : List[PrimExpr]
The identity elements.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, lhs, rhs, result, identity_element, span=None):
self.__init_handle_by_constructor__(
_ffi_api.CommReducer, lhs, rhs, result, identity_element, span # type: ignore
)
@tvm._ffi.register_object("tir.Reduce")
class Reduce(PrimExprWithOp):
"""Reduce node.
Parameters
----------
combiner : CommReducer
The combiner.
src : list of Expr
The source expression.
rdom : list of IterVar
The iteration domain
condition : PrimExpr
The reduce condition.
value_index : int
The value index.
init : list of Expr
The initial value for output. This can be an int, float or ProducerLoad
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, combiner, src, rdom, condition, value_index, init=None, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Reduce, combiner, src, rdom, condition, value_index, init, span # type: ignore
)
@tvm._ffi.register_object
class FloatImm(ConstExpr):
"""Float constant.
Parameters
----------
dtype : str
The data type
value : float
The constant value.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.FloatImm, dtype, value, span # type: ignore
)
def __float__(self):
return self.value
@tvm._ffi.register_object
class IntImm(ConstExpr):
"""Int constant.
Parameters
----------
dtype : str
The data type
value : int
The constant value.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.IntImm, dtype, value, span # type: ignore
)
def __hash__(self):
return self.value
def __int__(self):
return self.value
def __nonzero__(self):
return self.value != 0
def __eq__(self, other):
return _ffi_api._OpEQ(self, other, None) # type: ignore
def __ne__(self, other):
return _ffi_api._OpNE(self, other, None) # type: ignore
def __bool__(self):
return self.__nonzero__()
@tvm._ffi.register_object("tir.StringImm") # type: ignore
class StringImm(ConstExpr):
"""String constant.
Parameters
----------
value : str
The value of the function.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.StringImm, value, span) # type: ignore
def __eq__(self, other):
if isinstance(other, ConstExpr):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, ConstExpr):
return self.value != other.value
return self.value != other
def __hash__(self):
return PrimExpr.__hash__(self)
@tvm._ffi.register_object("tir.Cast")
class Cast(PrimExprWithOp):
"""Cast expression.
Parameters
----------
dtype : str
The data type
value : PrimExpr
The value of the function.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.Cast, dtype, value, span) # type: ignore
@tvm._ffi.register_object("tir.Add")
class Add(BinaryOpExpr):
"""Add node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Add, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Sub")
class Sub(BinaryOpExpr):
"""Sub node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Sub, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Mul")
class Mul(BinaryOpExpr):
"""Mul node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Mul, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Div")
class Div(BinaryOpExpr):
"""Div node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Div, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Mod")
class Mod(BinaryOpExpr):
"""Mod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Mod, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.FloorDiv")
class FloorDiv(BinaryOpExpr):
"""FloorDiv node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.FloorDiv, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.FloorMod")
class FloorMod(BinaryOpExpr):
"""FloorMod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.FloorMod, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Min")
class Min(BinaryOpExpr):
"""Min node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Min, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Max")
class Max(BinaryOpExpr):
"""Max node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Max, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.EQ")
class EQ(CmpExpr):
"""EQ node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.EQ, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.NE")
class NE(CmpExpr):
"""NE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.NE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.LT")
class LT(CmpExpr):
"""LT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.LT, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.LE")
class LE(CmpExpr):
"""LE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.LE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.GT")
class GT(CmpExpr):
"""GT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.GT, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.GE")
class GE(CmpExpr):
"""GE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.GE, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.And")
class And(LogicalExpr):
"""And node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.And, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Or")
class Or(LogicalExpr):
"""Or node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, b, span=None):
self.__init_handle_by_constructor__(_ffi_api.Or, a, b, span) # type: ignore
@tvm._ffi.register_object("tir.Not")
class Not(LogicalExpr):
"""Not node.
Parameters
----------
a : PrimExpr
The input value
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, a, span=None):
self.__init_handle_by_constructor__(_ffi_api.Not, a, span) # type: ignore
@tvm._ffi.register_object("tir.Select")
class Select(PrimExprWithOp):
"""Select node.
Note
----
Select may compute both true_value and false_value.
Use :py:class:`tvm.tir.if_then_else` instead if you want to
get a conditional expression that only evaluates
the correct branch.
Parameters
----------
condition : PrimExpr
The condition expression.
true_value : PrimExpr
The value to take when condition is true.
false_value : PrimExpr
The value to take when condition is false.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, true_value, false_value, span=None):
if isinstance(condition, bool):
condition = IntImm("bool", condition)
self.__init_handle_by_constructor__(
_ffi_api.Select, condition, true_value, false_value, span # type: ignore
)
@tvm._ffi.register_object("tir.Load")
class Load(PrimExprWithOp):
"""Load node.
Parameters
----------
dtype : str
The data type.
buffer_var : Var
The buffer variable in the load expression.
index : PrimExpr
The index in the load.
predicate : PrimExpr
The load predicate.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, buffer_var, index, predicate=None, span=None):
if predicate is None:
predicate = _ffi_api.const_true(dtype, span) # type: ignore
self.__init_handle_by_constructor__(
_ffi_api.Load, dtype, buffer_var, index, predicate, span # type: ignore
)
@tvm._ffi.register_object("tir.BufferLoad")
class BufferLoad(PrimExprWithOp):
"""Buffer load node.
Parameters
----------
buffer : Buffer
The buffer to be loaded.
indices : List[PrimExpr]
The buffer indices.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferLoad, buffer, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerLoad")
class ProducerLoad(PrimExprWithOp):
"""Producer load node.
Parameters
----------
producer : DataProducer
The buffer to be loaded.
indices : List[PrimExpr]
The buffer indices.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerLoad, producer, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.Ramp")
class Ramp(PrimExprWithOp):
"""Ramp node.
Parameters
----------
base : PrimExpr
The base expression.
stride : ramp stride
The stride of the ramp.
lanes : int
The lanes of the expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, base, stride, lanes, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Ramp, base, stride, lanes, span # type: ignore
)
@tvm._ffi.register_object("tir.Broadcast")
class Broadcast(PrimExprWithOp):
"""Broadcast node.
Parameters
----------
value : PrimExpr
The value of the expression.
lanes : int
The lanes of the expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, lanes, span=None):
self.__init_handle_by_constructor__(_ffi_api.Broadcast, value, lanes, span) # type: ignore
@tvm._ffi.register_object("tir.Shuffle")
class Shuffle(PrimExprWithOp):
"""Shuffle node.
Parameters
----------
vectors : Array of Expr
The vectors
indices : Array of indices
The indices
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, vectors, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.Shuffle, vectors, indices, span # type: ignore
)
class CallEffectKind:
"""Possible kinds of Call effects."""
# only expose up to opaque
ExprAnnotation = IntImmEnum(0)
Pure = IntImmEnum(1)
ReadState = IntImmEnum(2)
UpdateState = IntImmEnum(3)
Opaque = UpdateState
@tvm._ffi.register_object("tir.Call")
class Call(PrimExprWithOp):
"""Call node.
Parameters
----------
dtype : str
The return data type
op : Union[RelayExpr, str]
The function to be called, or the name
to the global tvm.Op
args : list of Expr
The input arguments to the call
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, dtype, op, args, span=None):
if isinstance(op, str):
if not op.startswith("tir."):
raise ValueError(
(
"Cannot handle str op argument %s. This function only handles str "
+ "argument with the tir namespace. If you are "
+ "certain about the intrinsic name, pass in Op.get(name) instead"
)
% op
)
op = Op.get(op)
self.__init_handle_by_constructor__(_ffi_api.Call, dtype, op, args, span) # type: ignore
@tvm._ffi.register_object("tir.Let")
class Let(PrimExprWithOp):
"""Let node.
Parameters
----------
var : Var
The variable in the binding.
value : PrimExpr
The value in to be binded.
body : PrimExpr
The body expression.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, var, value, body, span=None):
self.__init_handle_by_constructor__(_ffi_api.Let, var, value, body, span) # type: ignore
@tvm._ffi.register_object("tir.Any")
class Any(PrimExprWithOp):
"""Any node.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, span=None):
self.__init_handle_by_constructor__(_ffi_api.Any, span) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Function data types."""
import collections
import inspect
from typing import Callable, List, Mapping, Optional, Union, Tuple
import tvm
import tvm._ffi
import tvm.runtime
from tvm.runtime import Object
from tvm.ir import BaseFunc, Range
from .buffer import Buffer
from .expr import Var, PrimExpr
from . import _ffi_api
from ..runtime.ndarray import NDArray
@tvm._ffi.register_object("tir.PrimFunc")
class PrimFunc(BaseFunc):
"""A function declaration expression.
Parameters
----------
params: List[Union[tvm.tir.Var, tvm.tir.Buffer]]
List of input parameters to the function.
body: tvm.tir.Stmt
The body of the function.
ret_type: tvm.ir.Type
The return type annotation of the function.
buffer_map : Map[tvm.tir.Var, tvm.tir.Buffer]
The buffer binding map.
preflattened_buffer_map : Optional[Map[tvm.tir.Var, tvm.tir.Buffer]]
The buffer binding map, prior to any flattening.
attrs: Optional[tvm.Attrs]
Attributes of the function, can be None
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(
self,
params,
body,
ret_type=None,
buffer_map=None,
preflattened_buffer_map=None,
attrs=None,
span=None,
):
param_list = []
buffer_map = {} if buffer_map is None else buffer_map
preflattened_buffer_map = {} if preflattened_buffer_map is None else preflattened_buffer_map
for x in params:
x = tvm.runtime.convert(x) if not isinstance(x, Object) else x
if isinstance(x, Buffer):
var = Var(x.name, dtype="handle")
param_list.append(var)
buffer_map[var] = x
elif isinstance(x, Var):
param_list.append(x)
else:
raise TypeError("params can only contain Var or Buffer")
self.__init_handle_by_constructor__(
_ffi_api.PrimFunc,
param_list,
body,
ret_type,
buffer_map,
preflattened_buffer_map,
attrs,
span,
) # type: ignore
def with_body(self, new_body, span=None):
"""Create a new PrimFunc with the same set signatures but a new body.
Parameters
----------
new_body : Stmt
The new body.
span : Optional[Span]
The location of this itervar in the source code.
Returns
-------
new_func : PrimFunc
The created new function.
"""
return PrimFunc(
self.params,
new_body,
self.ret_type,
self.buffer_map,
self.preflattened_buffer_map,
self.attrs,
span,
)
def specialize(self, param_map: Mapping[Var, Union[PrimExpr, Buffer]]):
"""Specialize parameters of PrimFunc
Parameters
----------
param_map : Mapping[Var, Union[PrimExpr, Buffer]]
The mapping from function params to the instance
Examples
--------
We can define a Meta TIR function with symbolic shape:
.. code-block:: python
@T.prim_func
def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32")
B = T.match_buffer(b, (m, n), "float32")
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
Then we can make it specialized with given shapes or buffers.
.. code-block:: python
a, _, m, n = mem_copy.params
func = mem_copy.specialize({a: tir.decl_buffer((16, 16))})
# or
func = mem_copy.specialize({n: 16, m: 16})
The specialized function:
.. code-block:: python
@T.prim_func
def mem_copy_16_16(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
Returns
-------
func : PrimFunc
The new function with parameter specialized
"""
return _ffi_api.Specialize(self, param_map) # type: ignore
def script(self, tir_prefix: str = "T", show_meta: bool = False) -> str:
"""Print IRModule into TVMScript
Parameters
----------
tir_prefix : str
The tir namespace prefix
show_meta : bool
Whether to show meta information
Returns
-------
script : str
The TVM Script of the PrimFunc
"""
return tvm._ffi.get_global_func("script.AsTVMScript")(
self, tir_prefix, show_meta
) # type: ignore
def show(self, style: Optional[str] = None) -> None:
"""
A sugar for print highlighted TVM script.
Parameters
----------
style : str, optional
Pygments styles extended by "light" (default) and "dark", by default "light"
"""
from tvm.script.highlight import cprint # pylint: disable=import-outside-toplevel
# Use deferred import to avoid circular import while keeping cprint under tvm/script
cprint(self, style=style)
@tvm._ffi.register_object("tir.TensorIntrin")
class TensorIntrin(Object):
"""A tensor intrinsic.
Parameters
----------
desc : PrimFunc
The function to describe the computation.
impl : PrimFunc
The function of the implementation for the execution.
"""
def __init__(self, desc, impl):
self.__init_handle_by_constructor__(_ffi_api.TensorIntrin, desc, impl)
@staticmethod
def register(name: str, desc: PrimFunc, impl: PrimFunc, override: bool = False):
"""Register a tensor intrinsic with its name.
Parameters
----------
name : str
The name of the TensorIntrin to register.
desc : PrimFunc
The function to describe the computation.
impl : PrimFunc
The function of the implementation for the execution.
override: bool
Whether override existing intrinsic.
"""
return _ffi_api.TensorIntrinRegister(
name, TensorIntrin(desc, impl), override
) # type: ignore
@staticmethod
def get(name: str, allow_missing: bool = False) -> Optional["TensorIntrin"]:
"""Look up a tensor intrinsic by its name.
Parameters
----------
name : str
The name of the TensorIntrin to look up.
allow_missing : bool
Whether to allow missing tensor intrin. If False, raise an error if the tensor intrin
doesn't exist.
Returns
-------
result : Optional[TensorIntrin]
The TensorIntrin with the specified name, or None if not found.
"""
return _ffi_api.TensorIntrinGet(name, allow_missing) # pylint: type: ignore
@tvm._ffi.register_object("tir.IndexMap")
class IndexMap(Object):
"""A mapping from multi-dimensional indices to another set of multi-dimensional indices
Parameters
----------
initial_indices : List[Var]
Variables representing the indices prior to remapping.
final_indices : List[PrimExpr]
Expressions defining the indices after remapping.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
"""
initial_indices: List[Var]
final_indices: List[PrimExpr]
# Sentinel value used to indicate which groups of pre-flattening axes
# should be used to post-flattening axes axes. See
# Stage.transform_layout for more details.
AXIS_SEPARATOR = "axis_separator"
def __init__(self, initial_indices, final_indices, inverse_index_map):
if isinstance(inverse_index_map, Callable):
inverse_index_map = IndexMap.from_func(inverse_index_map)
self.__init_handle_by_constructor__(
_ffi_api.IndexMap, initial_indices, final_indices, inverse_index_map
)
@staticmethod
def from_func(
mapping_function: Callable,
ndim: Optional[int] = None,
inverse_index_map: Union[Callable, Optional["IndexMap"]] = None,
):
"""Create an index map from a function
Parameters
----------
mapping_function : Callable
The function to map from source indices to target indices.
The function should accept `tir.Var` parameters and return
a either a `tir.PrimExpr`, or a list of `tir.PrimExpr`.
Returning a `tir.PrimExpr` is equivalent to returning a
list of length 1 containing that `tir.PrimExpr`.
ndim: Optional[int]
The dimensionality of the buffer to which this
transformation should be applied. If mapping_function uses
variadic argument `*args`, `ndim` must be specified. If
mapping_function does not use variadic arguments, ndim is
optional.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
Returns
-------
index_map: IndexMap
Returns an IndexMap representing the `mapping_function`.
"""
index_map, axis_separators = IndexMap.from_func_with_separators(
mapping_function, ndim, inverse_index_map
)
assert not axis_separators, (
"The mapping_function provided to IndexMap.from_func "
"may not return IndexMap.AXIS_SEPARATOR. "
"If required, please use IndexMap.from_func_with_separators instead."
)
return index_map
@staticmethod
def from_func_with_separators(
mapping_function: Callable,
ndim: Optional[int] = None,
inverse_index_map: Union[Callable, Optional["IndexMap"]] = None,
):
"""Create an index map from a function
Parameters
----------
mapping_function : Callable
The function to map from source indices to target indices.
The function should accept tir.Var parameters and return
either a `tir.PrimExpr` or a list. Each element of the
returned list should be either a `tir.PrimExpr` or the
object `IndexMap.AXIS_SEPARATOR`. Returning a
`tir.PrimExpr` is equivalent to returning a list of length
1 containing that `tir.PrimExpr`.
ndim: Optional[int]
The dimensionality of the buffer to which this
transformation should be applied. If mapping_function uses
variadic argument `*args`, ndim must be specified. If
mapping_function does not use variadic arguments, ndim is
optional.
inverse_index_map : Union[Callable, Optional[IndexMap]]
The optional pre-defined inverse index map.
When this is defined, IndexMap::Inverse will return the pre-defined inverse index map.
Otherwise, the inverse index map will be computed on the fly.
It is the user's responsibility to ensure the correctness of the pre-defined inverse
index map.
Returns
-------
ret: Tuple[IndexMap, List[int]]
Returns a tuple whose first element is an IndexMap
representing the `mapping_function`, and whose second index
is a list of indices at which `IndexMap.AXIS_SEPARATOR`
occurred.
"""
params = inspect.signature(mapping_function).parameters
args = []
var_arg_name = None
kwargs = collections.OrderedDict()
default_index_dtype = "int32"
for name, param in params.items():
if param.kind in [
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
]:
args.append(tvm.tir.Var(name, default_index_dtype))
elif param.kind == inspect.Parameter.VAR_POSITIONAL:
var_arg_name = name
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[name] = tvm.tir.Var(name, default_index_dtype)
else:
raise ValueError("transform_layout mapping may not have *args")
# Now that all the named arguments have been collected,
# everything that remains should go to the *args, if
# specified.
if var_arg_name is not None:
assert ndim is not None, "ndim must be specified when *args is used"
num_var_args = ndim - len(args) - len(kwargs)
for i in range(num_var_args):
args.append(tvm.tir.Var(f"{var_arg_name}_{i}", default_index_dtype))
mapping = mapping_function(*args, **kwargs)
initial_indices = args + list(kwargs.values())
final_indices = []
axis_separators = []
try:
iter(mapping)
is_iterable = True
except TypeError:
is_iterable = False
if is_iterable:
for val in mapping:
if isinstance(val, tvm.ir.PrimExpr):
final_indices.append(val)
elif val is IndexMap.AXIS_SEPARATOR:
axis_separators.append(len(final_indices))
else:
raise TypeError(
"Expected mapping function to return list of "
"either tvm.ir.PrimExpr or IndexMap.AXIS_SEPARATOR. "
f"Instead received {val} of type {type(val)}."
)
else:
final_indices.append(mapping)
return IndexMap(initial_indices, final_indices, inverse_index_map), axis_separators
def is_equivalent_to(self, other_map: "IndexMap") -> bool:
"""Return if the index maps are equivalent.
Parameters
----------
other_map: IndexMap
The IndexMap to which the comparison should be made.
Returns
-------
is_equivalent: bool
True if the two mappings represent the same
transformation, otherwise False
"""
if len(self.initial_indices) != len(other_map.initial_indices):
return False
if len(self.final_indices) != len(other_map.final_indices):
return False
analyzer = tvm.arith.Analyzer()
mapped_other_final_indices = other_map.map_indices(self.initial_indices)
for self_index, other_index in zip(self.final_indices, mapped_other_final_indices):
if not analyzer.can_prove_equal(self_index, other_index):
return False
return True
def map_indices(self, indices: List[PrimExpr]) -> List[PrimExpr]:
"""Apply the index map to a set of indices
Parameters
----------
indices : List[PrimExpr]
The indices to be mapped
Returns
-------
result : List[PrimExpr]
The mapped indices
"""
return _ffi_api.IndexMapMapIndices(self, indices)
def map_shape(self, shape: List[PrimExpr]) -> List[PrimExpr]:
"""Apply the index map to a buffer shape
Parameters
----------
shape : List[PrimExpr]
The buffer shape to be mapped
Returns
-------
result : List[PrimExpr]
The mapped shape
"""
return _ffi_api.IndexMapMapShape(self, shape)
def map_ndarray(self, arr_src: NDArray) -> NDArray:
"""Apply thie index map to transform the layout of the input NDArray
Parameters
----------
arr_src : runtime.NDArray
The NDArray to be transformed
Returns
-------
arr_dst : runtime.NDArray
The transformed NDArray
"""
return _ffi_api.IndexMapMapNDArray(self, arr_src)
def inverse(self, shape: List[Union[Range, PrimExpr]]) -> "IndexMap":
"""Return the inverse of the map
Throws an error if the function is not bijective.
Parameters
----------
shape: List[Union[Range,PrimExpr]]
The region over which the inverse should be determined.
Used for validating that the mapping is bijective over
this range.
Returns
-------
inverse : IndexMap
The inverse
"""
shape = [dim if isinstance(dim, Range) else Range(0, dim) for dim in shape]
return _ffi_api.IndexMapInverse(self, shape)
def non_surjective_inverse(
self, shape: List[Union[Range, PrimExpr]]
) -> Tuple["IndexMap", PrimExpr]:
"""Return the inverse of the map
Can be applied to transformations that introduce padding.
Parameters
----------
shape: List[Union[Range,PrimExpr]]
The region over which the inverse should be determined.
Used for determining the predicate.
Returns
-------
result : Tuple[IndexMap, PrimExpr]
The inverse, and a predicate for which the inverse maps to
a valid index in the input range.
Examples
--------
.. code-block:: python
index_map = IndexMap.from_func(lambda i: [i//4, i%4])
inverse_map, predicate = index_map.non_surjective_inverse([14])
assert inverse_map.is_equivalent_to(IndexMap.from_func(lambda j,k: [4*j + k])
print(predicate) # Prints "(axis0==3) && (axis2 >= 2)"
"""
shape = [dim if isinstance(dim, Range) else Range(0, dim) for dim in shape]
return _ffi_api.IndexMapNonSurjectiveInverse(self, shape)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic opertors in TVM.
We follow the numpy naming convention for this interface
(e.g., tvm.tir.generic.multitply ~ numpy.multiply).
The default implementation is used by tvm.ExprOp.
"""
# pylint: disable=unused-argument
from . import _ffi_api
# Operator precedence used when overloading.
__op_priority__ = 0
def add(lhs, rhs, span=None):
"""Generic add operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of add operaton.
"""
return _ffi_api._OpAdd(lhs, rhs, span) # type: ignore
def subtract(lhs, rhs, span=None):
"""Generic subtract operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of subtract operaton.
"""
return _ffi_api._OpSub(lhs, rhs, span) # type: ignore
def multiply(lhs, rhs, span=None):
"""Generic multiply operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of multiply operaton.
"""
return _ffi_api._OpMul(lhs, rhs, span) # type: ignore
def divide(lhs, rhs, span=None):
"""Generic divide operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of divide operaton.
"""
return _ffi_api._OpDiv(lhs, rhs, span) # type: ignore
def floordiv(lhs, rhs, span=None):
"""Generic floordiv operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of floordiv operaton.
"""
return _ffi_api._OpFloorDiv(lhs, rhs, span) # type: ignore
def cast(src, dtype, span=None):
"""Generic cast operator.
Parameters
----------
src : object
The source operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of cast operaton.
"""
return _ffi_api._cast(dtype, src, span) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/ir_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Developer API of IR node builder make function."""
import tvm
from tvm._ffi.base import string_types
from tvm.runtime import ObjectGeneric, convert, const
from tvm.ir import container as _container
from . import stmt as _stmt
from . import expr as _expr
from . import buffer as _buffer
from . import op
class WithScope(object):
"""Auxiliary scope with"""
def __init__(self, enter_value, exit_cb):
self._enter_value = enter_value
self._exit_cb = exit_cb
def __enter__(self):
return self._enter_value
def __exit__(self, ptype, value, trace):
self._exit_cb()
class BufferVar(ObjectGeneric):
"""Buffer variable with content type, makes load store easily.
Do not create it directly, create use IRBuilder.
Array access through a BufferVar must use the same number of
indices as the underlying buffer was declared to have.
Examples
--------
In the follow example, x is BufferVar.
:code:`x[0] = ...` directly emit a BufferStore to the IRBuilder,
:code:`x[10]` translates to BufferLoad.
.. code-block:: python
# The following code generate IR for x[0] = x[10] + 1
ib = tvm.tir.ir_builder.create()
x = ib.allocate("float32", 20)
x[0] = x[10] + 1
# Array access using a multidimensional index
y = ib.allocate("float32", (32, 32))
y[2, 31] = 0.
See Also
--------
IRBuilder.pointer
IRBuilder.allocate
"""
def __init__(self, builder, buffer, content_type):
self._builder = builder
self._buffer = buffer
self._content_type = content_type
def asobject(self):
return self._buffer
@property
def dtype(self):
return self._content_type
def _normalize_index(self, index):
try:
index = [*index]
except TypeError:
index = [index]
index = [x.var if isinstance(x, _expr.IterVar) else x for x in index]
# Workaround to support previous behavior of ir_builder
# indexing by a single index, treating the buffer as if were
# already flattened.
if len(index) == 1 and len(self._buffer.shape) != 1:
index = tvm.topi.utils.unravel_index(index[0], self._buffer.shape)
return index
def __getitem__(self, index):
index = self._normalize_index(index)
return _expr.BufferLoad(self._buffer, index)
def __setitem__(self, index, value):
index = self._normalize_index(index)
value = convert(value)
value_element = value.dtype.split("x", maxsplit=1)[0]
content_element = self._content_type.split("x", maxsplit=1)[0]
if value_element != content_element:
raise ValueError(
"data type does not match content type %s vs %s" % (value.dtype, self._content_type)
)
self._builder.emit(_stmt.BufferStore(self._buffer, value, index))
class IRBuilder(object):
"""Auxiliary builder to build IR for testing and dev.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
n = te.var("n")
A = ib.allocate("float32", n, name="A")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope((i % 2) == 0):
A[i] = A[i] + 1
# The result stmt.
stmt = ib.get()
"""
def __init__(self):
self._seq_stack = [[]] # type: ignore
self.nidx = 0
def _pop_seq(self):
"""Pop sequence from stack"""
seq = self._seq_stack.pop()
if not seq or callable(seq[-1]):
seq.append(_stmt.Evaluate(0))
seqwrap = lambda x: x[0] if len(x) == 1 else _stmt.SeqStmt(list(reversed(x)))
ret_seq = [seq[-1]]
for s in reversed(seq[:-1]):
if callable(s):
ret_seq = [s(seqwrap(ret_seq))]
else:
assert isinstance(s, _stmt.Stmt)
ret_seq.append(s)
return seqwrap(ret_seq)
def emit(self, stmt):
"""Emit a statement to the end of current scope.
Parameters
----------
stmt : Stmt or callable.
The statement to be emitted or callable that build stmt given body.
"""
if isinstance(stmt, _expr.Call):
stmt = _stmt.Evaluate(stmt)
assert isinstance(stmt, _stmt.Stmt) or callable(stmt)
self._seq_stack[-1].append(stmt)
def scope_attr(self, node, attr_key, value):
"""Create an AttrStmt at current scope.
Parameters
----------
attr_key : str
The key of the attribute type.
node : Node
The attribute node to annottate on.
value : Expr
Attribute value.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
ib.scope_attr(x, "storage_scope", "global")
x[i] = x[i - 1] + 1
"""
if isinstance(node, string_types):
node = _expr.StringImm(node)
if isinstance(value, string_types):
value = _expr.StringImm(value)
# thread_extent could be zero for dynamic workloads
if attr_key == "thread_extent":
value = op.max(1, value)
self.emit(lambda x: _stmt.AttrStmt(node, attr_key, value, x))
def for_range(self, begin, end, name="i", dtype=None, kind="serial"):
"""Create a for iteration scope.
Parameters
----------
begin : Expr
The min iteration scope.
end : Expr
The end iteration scope
name : str, optional
The name of iteration variable, if no input names,
using typical index names i, j, k, then i_nidx
dtype : str, optional
The data type of iteration variable.
kind : str, optional
The special tag on the for loop.
Returns
-------
loop_scope : With.Scope of Var
The for scope, when enters returns loop_var
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
x = ib.pointer("float32")
with ib.for_range(1, 10, name="i") as i:
x[i] = x[i - 1] + 1
"""
if name == "i":
name = chr(ord(name) + self.nidx) if self.nidx < 3 else name + "_" + str(self.nidx - 3)
self.nidx += 1
self._seq_stack.append([])
# auto infer dtype when it's not specified
def get_dtype(expr):
if isinstance(expr, _expr.PrimExpr):
if not expr.dtype.startswith("int"):
raise NotImplementedError(
f"Infer loop_var dtype failed:"
f" unsupported dtype in loop begin or end {expr.dtype}"
)
return expr.dtype
if isinstance(expr, int):
return "int32"
raise NotImplementedError(
f"Infer loop_var dtype failed:"
f" unsupported dtype in loop begin or end {expr.dtype}"
)
if dtype is None:
dtype = "int64" if "int64" in [get_dtype(begin), get_dtype(end)] else "int32"
loop_var = _expr.Var(name, dtype=dtype)
extent = end if begin == 0 else (end - begin)
def _exit_cb():
if kind == "serial":
kind_id = _stmt.ForKind.SERIAL
elif kind == "parallel":
kind_id = _stmt.ForKind.PARALLEL
elif kind == "vectorize":
kind_id = _stmt.ForKind.VECTORIZED
elif kind == "unroll":
kind_id = _stmt.ForKind.UNROLLED
else:
raise ValueError("Unknown kind")
self.emit(_stmt.For(loop_var, begin, extent, kind_id, self._pop_seq()))
return WithScope(loop_var, _exit_cb)
def while_loop(self, condition):
"""Create a while loop scope.
Parameters
----------
condition : Expr
The termination condition.
Returns
-------
loop_scope : With.Scope of Var
The while scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
with ib.while_loop(iterations[0] < 10):
iterations[0] += 1
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.While(condition, self._pop_seq()))
return WithScope(None, _exit_cb)
def if_scope(self, cond):
"""Create an if scope.
Parameters
----------
cond : Expr
The condition.
Returns
-------
if_scope : WithScope
The result if scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(cond, self._pop_seq(), None))
return WithScope(None, _exit_cb)
def else_scope(self):
"""Create an else scope.
This can only be used right after an if scope.
Returns
-------
else_scope : WithScope
The result else scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
with ib.else_scope():
x[i] = x[i - 1] + 2
"""
if not self._seq_stack[-1]:
raise RuntimeError("else_scope can only follow an if_scope")
prev = self._seq_stack[-1][-1]
if not isinstance(prev, _stmt.IfThenElse) or prev.else_case:
raise RuntimeError("else_scope can only follow an if_scope")
self._seq_stack[-1].pop()
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(prev.condition, prev.then_case, self._pop_seq()))
return WithScope(None, _exit_cb)
def new_scope(self):
"""Create new scope,
this is useful to set boundary of attr and allocate.
Returns
-------
new_scope : WithScope
The result new scope.
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(self._pop_seq())
return WithScope(None, _exit_cb)
def let(self, var_name, value):
"""Create a new let stmt binding.
Parameters
----------
var_name : str
The name of the variable
value : PrimExpr
The value to be bound
Returns
-------
var : tvm.tir.Var
The var that can be in for future emits.
"""
var = _expr.Var(var_name, dtype=value.dtype)
self.emit(lambda x: _stmt.LetStmt(var, value, x))
return var
def allocate(self, dtype, shape, name="buf", axis_separators=None, scope=""):
"""Create a allocate statement.
Parameters
----------
dtype : str
The content data type.
shape : tuple of Expr
The shape of array to be allocated.
name : str, optional
The name of the buffer.
axis_separators : list of int, optional
If passed, a list of separators between groups of axes,
each of which is flattened to an output axis. For flat
memory spaces, should either be None, or an empty list.
scope : str, optional
The scope of the buffer.
Returns
-------
buffer : BufferVar
The buffer var representing the buffer.
"""
if not isinstance(shape, (list, tuple, _container.Array)):
shape = [shape]
buffer = _buffer.decl_buffer(
shape, dtype, name, scope=scope, axis_separators=axis_separators
)
buffer_var = buffer.data
self.emit(lambda x: _stmt.Allocate(buffer_var, dtype, shape, const(1, dtype="uint1"), x))
return BufferVar(self, buffer, dtype)
def pointer(self, content_type, name="ptr", scope=""):
"""Create pointer variable with content type.
Parameters
----------
content_type : str
The content data type.
name : str, optional
The name of the pointer.
scope : str, optional
The scope of the pointer.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
buffer = _buffer.decl_buffer(shape=[1], dtype=content_type, name=name, scope=scope)
return BufferVar(self, buffer, content_type)
def buffer_ptr(self, buf):
"""Create pointer variable corresponds to buffer ptr.
Parameters
----------
buf : Buffer
The buffer to be extracted.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
return BufferVar(self, buf, buf.dtype)
def likely(self, expr):
"""Add likely tag for expression.
Parameters
----------
expr : Expr
The expression. Usually a condition expression.
Returns
-------
expr : Expr
The expression will likely tag.
"""
return _expr.Call(expr.dtype, "tir.likely", [expr])
def get(self):
"""Return the builded IR.
Returns
-------
stmt : Stmt
The result statement.
"""
seq = self._pop_seq()
if self._seq_stack:
raise RuntimeError("cannot call get inside construction scope")
return seq
def create():
"""Create a new IRBuilder
Returns
-------
builder : IRBuilder
The created IRBuilder
"""
return IRBuilder()
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, invalid-name
"""Operators used in TIR expression."""
import warnings
from typing import Any, Optional
import tvm._ffi
from tvm.ir import Array, Op, PrimExpr
from tvm.ir.base import Span
from tvm.runtime import const, convert
from . import _ffi_api
from .buffer import Buffer
from .expr import Call, CommReducer, IntImm, PrimExprWithOp, StringImm, Var
def _pack_buffer(buf, span=None):
"""Build intrinsics that packs the buffer."""
shape = Call("handle", "tir.tvm_stack_make_shape", buf.shape, span)
strides = Call("handle", "tir.tvm_stack_make_shape", buf.strides, span) if buf.strides else 0
pack_args = [
buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset,
]
return Call("handle", Op.get("tir.tvm_stack_make_array"), pack_args, span)
def call_packed_lowered(*args, span=None):
"""Lowered version of call packed.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_packed_lowered"), call_args, span)
def call_cpacked_lowered(*args, span=None):
"""Lowered version of call c-packed.
Same as call_packed, except that the first argument is the function name
(as in call_extern), and the last argument is the resource handle.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_cpacked_lowered"), call_args, span)
def call_packed(*args, span=None):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will receive an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_packed"), call_args, span)
def call_cpacked(*args, span=None):
"""Build expression by call an external packed function.
Same as call_packed, except that the first argument is the function name
(as in call_extern), and the last argument is the resource handle.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
te.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call("int32", Op.get("tir.tvm_call_cpacked"), call_args, span)
def call_intrin(dtype, func_name, *args, span=None):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(dtype, func_name, convert(args), span)
def call_pure_extern(dtype, func_name, *args, span=None):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, Op.get("tir.call_pure_extern"), convert((StringImm(func_name),) + args), span
)
def call_extern(dtype, func_name, *args, span=None):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, Op.get("tir.call_extern"), convert((StringImm(func_name),) + args), span=span
)
def call_llvm_intrin(dtype, name, *args, span=None):
"""Build expression by calling a llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
if isinstance(name, str):
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
elif isinstance(name, IntImm):
llvm_id = name.value
else:
llvm_id = name
if llvm_id == 0:
warnings.warn(f"Unknown llvm intrinsic function {name}, falling back to 0")
return call_intrin(
dtype,
Op.get("tir.call_llvm_intrin"),
tvm.tir.const(llvm_id, "uint32"),
*args,
span=span,
)
def call_llvm_pure_intrin(dtype, name, *args, span=None):
"""Build expression by calling a pure llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
if isinstance(name, str):
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
elif isinstance(name, IntImm):
llvm_id = name.value
else:
llvm_id = name
if llvm_id == 0:
warnings.warn(f"Unknown llvm intrinsic function {name}, falling back to 0")
return call_intrin(
dtype,
Op.get("tir.call_llvm_pure_intrin"),
tvm.tir.const(llvm_id, "uint32"),
*args,
span=span,
)
def tvm_check_return(expected, return_unexpected, nested_call):
"""Return new on stack dtype[num]
Parameters
----------
expected : int
The expected return code.
return_unexpected : int
The unexpected return code.
nested_call : PrimExpr
The call expression to check return.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.tvm_check_return", expected, return_unexpected, nested_call)
def tvm_stack_alloca(dtype_str, num):
"""Return new on stack dtype[num]
Parameters
----------
dtype_str : str
The data type of array.
num : int
The size of array.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_stack_alloca", dtype_str, num)
def tvm_stack_make_shape(*args):
"""Allocate a shape tuple on stack, return the handle
Parameters
----------
args : int
The tuple shape.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_stack_make_shape", *args)
def tvm_stack_make_array(data, shape, strides, ndim, arr_dtype, elem_offset):
"""Allocate a NDArray(DLTensor) on stack, return the handle
Parameters
----------
data : Expr
The data of array.
shape : Expr
The shape of array.
strides : Expr
The strides of array.
ndim : Expr
The dimensions of array.
arr_dtype : Expr
The data type of array.
elem_offse : Expr
The element offset of array.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle", "tir.tvm_stack_make_array", data, shape, strides, ndim, arr_dtype, elem_offset
)
def assume(cond=None):
"""Provide a true statement that can be used for simplifications
Parameters
----------
cond : Expr
The constraint condition.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("bool", "tir.assume", cond)
def undef():
"""Returns an initialized but arbitrary value
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.undef")
def start_profile_intrinsic(id):
"""Start profile intrinsic.
Parameters
----------
id : int
The intrinsic id.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.start_profile_intrinsic", id)
def end_profile_intrinsic(id):
"""End profile intrinsic.
Parameters
----------
id : int
The intrinsic id.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.end_profile_intrinsic", id)
def tvm_tuple(*value):
"""Create a tuple structure in value field of AttrStmt
Parameters
----------
value : Expr
The value in tuple.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_tuple", *value)
def tvm_struct_get(arr, index, field, dtype):
"""Get struct field value in array
Parameters
----------
dtype : str
The date type of the result.
arr : StructType*
The array of struct.
index : int
The index of struct.
field : int
The field of struct.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.tvm_struct_get", arr, index, field)
def tvm_struct_set(arr, index, field, value):
"""Set value in struct field in array
Parameters
----------
arr : StructType*
The array of struct.
index : int
The index of struct.
field : int
The field of struct.
value : Expr
The value to be set in field.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_struct_set", arr, index, field, value)
def address_of(buffer_load, span=None):
"""Returns the address of an element in the buffer
Parameters
----------
buffer_load: BufferLoad
The buffer load.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.address_of", buffer_load, span=span)
def lookup_param(param_name, span=None):
"""Returns the param by name
Parameters
----------
param_name : str
The name of param.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.lookup_param", param_name, span=span)
def tvm_thread_allreduce(*freduce_args):
"""
Parameters
----------
freduce_args : Expr
The args.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_thread_allreduce", *freduce_args)
def type_annotation(dtype):
"""Create a type annotation expression
Parameters
----------
dtype : Expr
The data type.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.type_annotation")
def tvm_access_ptr(ptype, data, offset, extent, rw_mask):
"""Get head access address with memory access pattern info
Parameters
----------
ptype : Expr
The data type of pointer.
data : DType*
The data of pointer.
offset : int
The offset of pointer.
extent : int
The extent of pointer.
rw_mask : int
The read write mask.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("handle", "tir.tvm_access_ptr", ptype, data, offset, extent, rw_mask)
def tvm_throw_last_error():
"""Throw TVMGetLastError()
Returns
-------
ret : PrimExpr
The return expression
"""
return call_intrin("handle", "tir.tvm_throw_last_error")
def tvm_load_matrix_sync(fragment, m, n, k, index, buffer_ptr, stride, layout):
"""TVM intrinsic for tensor core load operators
Parameters
----------
fragment : Var
The wmma fragment.
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
buffer_ptr : Expr
The fragment buffer pointer.
stride : Expr
The fragment stride.
layout : Literal["row_major", "column_major"]
The fragment layout.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
fragment,
m,
n,
k,
index,
buffer_ptr,
stride,
layout,
)
def tvm_mma_sync(
fragment_d, index_d, fragment_a, index_a, fragment_b, index_b, fragment_c, index_c
):
"""TVM intrinsic for tensor core mma_sync operators
Parameters
----------
fragment_d : Var
The wmma fragment_d.
index_d : Expr
The fragment_d index.
fragment_a : Var
The wmma fragment_a.
index_a : Expr
The fragment_a index.
fragment_b : Var
The wmma fragment_b.
index_b : Expr
The fragment_b index.
fragment_c : Var
The wmma fragment_c.
index_c : Expr
The fragment_c index.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_mma_sync",
fragment_d,
index_d,
fragment_a,
index_a,
fragment_b,
index_b,
fragment_c,
index_c,
)
def tvm_bmma_sync(
fragment_d, index_d, fragment_a, index_a, fragment_b, index_b, fragment_c, index_c
):
"""TVM intrinsic for tensor core bmma_sync operators
Parameters
----------
fragment_d : Var
The bwmma fragment_d.
index_d : Expr
The fragment_d index.
fragment_a : Var
The bwmma fragment_a.
index_a : Expr
The fragment_a index.
fragment_b : Var
The bwmma fragment_b.
index_b : Expr
The fragment_b index.
fragment_c : Var
The bwmma fragment_c.
index_c : Expr
The fragment_c index.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_bmma_sync",
fragment_d,
index_d,
fragment_a,
index_a,
fragment_b,
index_b,
fragment_c,
index_c,
)
def tvm_fill_fragment(fragment, m, n, k, index, value):
"""TVM intrinsic for tensor core fill_fragment operators
Parameters
----------
fragment : Var
The wmma fragment
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
value : Expr
The value to be filled in fragment.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_fill_fragment",
fragment,
m,
n,
k,
index,
value,
)
def tvm_store_matrix_sync(fragment, m, n, k, index, buffer_ptr, stride, layout):
"""TVM intrinsic for tensor core store operators
Parameters
----------
fragment : Var
The wmma fragment.
m : UIntImm
The shape of wmma fragment.
n : UIntImm
The shape of wmma fragment.
k : UIntImm
The shape of wmma fragment.
index : Expr
The fragment index.
buffer_ptr : Expr
The fragment buffer pointer.
stride : Expr
The fragment stride.
layout : Literal["row_major", "column_major"]
The fragment layout.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
fragment,
m,
n,
k,
index,
buffer_ptr,
stride,
layout,
)
def ptx_mma(
dtype,
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
operator=None,
):
"""TVM intrinsic for ptx tensor core mma instructions
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-for-mma
Parameters
----------
dtype : str
The data type of the result.
shape : str
The shape of mma fragment.
A_layout : Literal["row", "col"]
The layout of multiplicand fragment A.
B_layout : Literal["row", "col"]
The layout of multiplicand fragment B.
A_dtype : str
The data type of multiplicand fragment A.
B_dtype : str
The data type of multiplicand fragment B.
C_dtype : str
The data type of accumulator fragment C.
multiplicand_a : Var
The multiplicand fragment A variable.
a_index : Expr
The index of multiplicand fragment A.
multiplicand_b : Var
The multiplicand fragment B variable.
b_index : Expr
The index of multiplicand fragment A.
accumulator : Var
The accumulator fragment C variable.
c_index : Expr
The index of accumulator fragment C.
saturate : bool
The optional saturation at the output.
operator : Optional[Literal["xor", "and"]]
The 1-bit operator.
Returns
-------
call : PrimExpr
The call expression.
"""
if operator is None:
return call_intrin(
dtype,
"tir.ptx_mma",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
)
return call_intrin(
dtype,
"tir.ptx_mma",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
saturate,
operator,
)
def ptx_mma_sp(
dtype,
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
metadata,
meta_index,
sparse_selector,
saturate,
):
"""TVM intrinsic for sparse tensor core ptx instructions
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-for-sparse-mma
Parameters
----------
dtype : str
The data type of the result.
shape : str
The shape of mma fragment.
A_layout : Literal["row", "col"]
The layout of multiplicand fragment A.
B_layout : Literal["row", "col"]
The layout of multiplicand fragment B.
A_dtype : str
The data type of multiplicand fragment A.
B_dtype : str
The data type of multiplicand fragment B.
C_dtype : str
The data type of multiplicand fragment C.
multiplicand_a : Var
The multiplicand fragment A variable.
a_index : Expr
The index of multiplicand fragment A.
multiplicand_b : Var
The multiplicand fragment B variable.
b_index : Expr
The index of multiplicand fragment B.
accumulator : Var
The accumulator fragment C variable.
c_index : Expr
The index of accumulator fragment C.
metadata : Expr
The metadata of operand.
meta_index : Expr
The metadata index of operand.
sparse_selector : Expr
The sparse selector indicating the thread that stores the metadata.
saturate : bool
The optional saturation at the output.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.ptx_mma_sp",
shape,
A_layout,
B_layout,
A_dtype,
B_dtype,
C_dtype,
multiplicand_a,
a_index,
multiplicand_b,
b_index,
accumulator,
c_index,
metadata,
meta_index,
sparse_selector,
saturate,
)
def mma_store(dtype, m, n, dst_ptr, src_ptr, src_offset, dst_stride):
"""TVM intrinsic for storing the result of PTX MMA into a destination pointer
Parameters
----------
dtype : str
The data type of the result.
m : IntImm
The shape of mma fragment.
n : IntImm
The shape of mma fragment.
dst_ptr : Var
The destination pointer variable.
src_ptr : Var
The source pointer variable.
src_offset : Expr
The source offset.
dst_stride : Var
The destination stride.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.mma_store",
m,
n,
dst_ptr,
src_ptr,
src_offset,
dst_stride,
)
def mma_fill(dtype, local_size, local_ptr, offset):
"""TVM intrinsic for zero-initalizing an MMA accumulation registor
Parameters
----------
dtype : str
The data type of the result.
local_size : IntImm
The number of elements.
local_ptr : Var
The destination pointer variable.
offset : Expr
The destination offset.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.mma_fill",
local_size,
local_ptr,
offset,
)
def ptx_ldmatrix(dtype, trans, num, type, local_ptr, local_offset, smem_ptr, smem_offset):
"""TVM intrinsic for ptx load matrix from shared memory
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix
Parameters
----------
dtype : str
The data type of the result.
trans : bool
The matrix is loaded in column-major format.
num : IntImm
The number of matrices.
type : Literal[".b16"]
The data type of the matrices.
local_ptr : Var
The local pointer variable.
local_offset : Expr
The offset of local pointer.
smem_ptr : Var
The shared memory pointer variable.
smem_offset : Expr
The offset of shared memort pointer.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype,
"tir.ptx_ldmatrix",
trans,
num,
type,
local_ptr,
local_offset,
smem_ptr,
smem_offset,
)
def ptx_cp_async(dtype, shared_ptr, shared_offset, global_ptr, global_offset, bytes):
"""TVM intrinsic for ptx async copy from global to shared memory
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async
Parameters
----------
dtype : str
The data type of the result.
shared_ptr : Var
The shared memory pointer variable.
shared_offset : Expr
The offset of shared memory pointer.
global_ptr : Var
The global memory pointer variable.
global_offset : Expr
The offset of global memory pointer.
bytes : int
The data size to copy.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
dtype, "tir.ptx_cp_async", shared_ptr, shared_offset, global_ptr, global_offset, bytes
)
def ptx_commit_group():
"""TVM intrinsic for ptx async copy commit
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-commit-group
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("", "tir.ptx_commit_group")
def ptx_wait_group(num):
"""TVM intrinsic for ptx async copy wait
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-wait-group
Parameters
----------
num : int
The number of the most recent uncommitted pending cp.async groups to wait.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("", "tir.ptx_wait_group", num)
def vectorlow(dtype, vec):
"""Get the low level half of the vector
Parameters
----------
dtype : str
The data type of the result.
vec : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorlow", vec)
def vectorhigh(dtype, vec):
"""Get the high level half of the vector
Parameters
----------
dtype : str
The data type of the result.
vec : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorhigh", vec)
def vectorcombine(dtype, vec1, vec2):
"""Concat two vectors
Parameters
----------
vec1 : list
The input vector.
vec2 : list
The input vector.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(dtype, "tir.vectorcombine", vec1, vec2)
def ret(val):
"""Create a tir return expression
Parameters
----------
val : Expr
The returned tir expression, whose data type is int, float or void pointer.
Returns
-------
ret : PrimExpr
The return expression
"""
return call_intrin(val.dtype, "tir.ret", val)
def any(*args, span=None):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpOr(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpOr(val, args[i], span) # type: ignore
return val
def all(*args, span=None):
"""Create a new expression of the intersection of all conditions in the
arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpAnd(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpAnd(val, args[i], span) # type: ignore
return val
@tvm._ffi.register_func("tvm.default_trace_action")
def _tvm_default_trace_action(*args):
print(list(args))
def trace(args, trace_action="tvm.default_trace_action"):
"""Trace tensor data at the runtime.
The trace function allows to trace specific tensor at the
runtime. The tracing value should come as last argument.
The trace action should be specified, by default
tvm.default_trace_action is used.
Parameters
----------
args : list of Expr or Buffers.
Positional arguments.
trace_action : str.
The name of the trace action.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.tir.call_packed : Creates packed function.
"""
if not isinstance(args, list):
raise Exception("tvm.tir.trace consumes the args as list type")
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
call_args.insert(0, trace_action)
return tvm.tir.Call(args[-1].dtype, Op.get("tir.tvm_call_trace_packed"), call_args)
def min_value(dtype, span=None):
"""minimum value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The minimum value of dtype.
"""
return _ffi_api.min_value(dtype, span) # type: ignore
def max_value(dtype: str, span: Optional[Span] = None) -> Any:
"""maximum value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The maximum value of dtype.
"""
return _ffi_api.max_value(dtype, span) # type: ignore
def infinity(dtype: str, span: Optional[Span] = None) -> Any:
"""infinity value of dtype
Parameters
----------
dtype : str
The data type.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The infinity value of dtype.
"""
return _ffi_api.infinity(dtype, span) # type: ignore
def reinterpret(dtype, value) -> Any:
"""infinity value of dtype
Parameters
----------
dtype : str
The data type.
value : PrimExpr
The input value.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
value : tvm.Expr
The reinterpret cast value of dtype.
"""
return call_intrin(dtype, "tir.reinterpret", value)
def exp(x):
"""Take exponential of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.exp", x)
def exp2(x):
"""Calculate 2**x
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.exp2", x)
def exp10(x):
"""Calculate 10**x
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.exp10", x)
def erf(x):
"""Take gauss error function of the input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.erf", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.log", x)
def log2(x):
"""Take log2 of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.log2", x)
def log10(x):
"""Take log10 of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.log10", x)
def log1p(x):
"""Take log(x + 1) with respect to input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.log1p", x)
def tan(x):
"""Take tan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.tan", x)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.cos", x)
def cosh(x):
"""Take cosh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.cosh", x)
def acos(x):
"""Take acos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.acos", x)
def acosh(x):
"""Take acos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.acosh", x)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.sin", x)
def sinh(x):
"""Take sinh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.sinh", x)
def asin(x):
"""Take asin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.asin", x)
def asinh(x):
"""Take asinh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.asinh", x)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.atan", x)
def atanh(x):
"""Take atanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.atanh", x)
def atan2(x1, x2):
"""Take arctan2(x1, x2).
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x1.dtype, "tir.atan2", x1, x2)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.sqrt", x)
def rsqrt(x):
"""Take reciprocal of square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.rsqrt", x)
def clz(x):
"""Count leading zero bits of an integer x.
Parameters
----------
x : PrimExpr
Input 32 or 64 bit integer.
The result is undefined if the input is 0.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("int32", "tir.clz", x)
def floor(x: PrimExprWithOp, span=None):
"""Take floor of float input x.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.floor(x, span) # type: ignore
def ceil(x, span=None):
"""Take ceil of float input x.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.ceil(x, span) # type: ignore
def trunc(x, span=None):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.trunc(x, span) # type: ignore
def abs(x, span=None):
"""Get absolute value of the input element-wise.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.abs(x, span) # type: ignore
def round(x, span=None):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.round(x, span) # type: ignore
def nearbyint(x, span=None):
"""Round elements of the array to the nearest integer.
This intrinsic uses llvm.nearbyint instead of llvm.round
which is faster but will results different from te.round.
Notably nearbyint rounds according to the rounding mode,
whereas te.round (llvm.round) ignores that.
For differences between the two see:
https://en.cppreference.com/w/cpp/numeric/math/round
https://en.cppreference.com/w/cpp/numeric/math/nearbyint
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.nearbyint(x, span) # type: ignore
def nextafter(x1, x2):
"""Return the next floating-point value after x1 towards x2.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x1.dtype, "tir.nextafter", x1, x2) # type: ignore
def hypot(x1, x2):
"""Equivalent to sqrt(x1**2 + x2**2), element-wise.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x1.dtype, "tir.hypot", x1, x2) # type: ignore
def copysign(x1, x2):
"""Change the sign of x1 to that of x2, element-wise.
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x1.dtype, "tir.copysign", x1, x2) # type: ignore
def ldexp(x1, x2):
"""Returns x1 * (2 ** x2).
Parameters
----------
x1 : PrimExpr
Input argument.
x2 : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x1.dtype, "tir.ldexp", x1, x2) # type: ignore
def likely(cond, span=None):
"""Mark condition as likely.
Parameters
----------
cond : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The marked expression.
"""
return _ffi_api.likely(cond, span) # type: ignore
def isnan(x, span=None):
"""Check if input value is Nan.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isnan(x, span) # type: ignore
def isnullptr(x, span=None):
"""Check if input value is nullptr.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("bool", "tir.isnullptr", x, span=span) # type: ignore
def isfinite(x, span=None):
"""Check if input value is finite.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isfinite(x, span) # type: ignore
def isinf(x, span=None):
"""Check if input value is infinite.
Parameters
----------
x : PrimExpr
Input argument.
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isinf(x, span) # type: ignore
def power(x, y, span=None):
"""x power y
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
The exponent
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api._OpPow(convert(x), convert(y), span) # type: ignore
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.popcount", x)
def q_multiply_shift(x, y, q, s):
"""Execute a multiplication between two Q-numbers x and y
followed by a right shift s. The mathematical expression is:
out = round(x*y*2^-s)
More about Q-numbers here: https://en.wikipedia.org/wiki/Q_(number_format)
The rounding rule is to the nearest value, rounding half up
(i.e., round(x.1) = x and round (x.5) = x+1)
Parameters
----------
x : PrimExpr
First Q-number
y : PrimExpr
Second Q-number
q : PrimExpr
Number of fractional bits in x and y. Needs to be > 0
s : PrimExpr
Integer shift
Returns
-------
y : PrimExpr
The result.
"""
return call_intrin("int32", "tir.q_multiply_shift", x, y, q, s)
def q_multiply_shift_per_axis(
x: PrimExpr,
y: PrimExpr,
ls: PrimExpr,
rs: PrimExpr,
q: IntImm,
is_lshift_required: IntImm,
is_rshift_required: IntImm,
):
"""Execute a multiplication between two Q-numbers x and y
Parameters
----------
x : PrimExpr
First Q-number.
y : PrimExpr
Second Q-number.
ls : PrimExpr
Integer left shift.
rs : PrimExpr
Integer right shift.
q : IntImm
Number of fractional bits in x and y. Needs to be > 0.
is_lshift_required : IntImm
Whether we need to do left shift or not.
is_rshift_required : IntImm
Whether we need to do right shift or not.
Returns
-------
z : PrimExpr
The result.
"""
return call_intrin(
"int32",
"tir.q_multiply_shift_per_axis",
x,
y,
ls,
rs,
q,
is_lshift_required,
is_rshift_required,
)
def shift_left(x, y, span=None):
"""Return the result of x left shifted by y bits.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api.left_shift(x, y, span)
def shift_right(x, y, span=None):
"""Return the result of x right shifted by y bits.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api.right_shift(x, y, span)
def fmod(x, y):
"""Return the remainder of x divided by y with the same sign as x.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return call_intrin(x.dtype, "tir.fmod", x, y)
def if_then_else(cond, t, f, span=None):
"""Conditional selection expression.
Parameters
----------
cond : PrimExpr
The condition
t : PrimExpr
The result expression if cond is true.
f : PrimExpr
The result expression if cond is false.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have different conditions.
"""
return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f), span) # type: ignore
def div(a, b, span=None):
"""Compute a / b as in C/C++ semantics.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
When operands are integers, returns truncdiv(a, b, span).
"""
return _ffi_api._OpDiv(a, b, span) # type: ignore
def indexdiv(a, b, span=None):
"""Compute floor(a / b) where a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexDiv(a, b, span) # type: ignore
def indexmod(a, b, span=None):
"""Compute the remainder of indexdiv. a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexMod(a, b, span) # type: ignore
def truncdiv(a, b, span=None):
"""Compute the truncdiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncDiv(a, b, span) # type: ignore
def truncmod(a, b, span=None):
"""Compute the truncmod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncMod(a, b, span) # type: ignore
def floordiv(a, b, span=None):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b, span) # type: ignore
def floormod(a, b, span=None):
"""Compute the floormod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
span : Optional[Span]
The location of this operator in the source.
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorMod(a, b, span) # type: ignore
def ceildiv(lhs, rhs, span=None):
"""Generic ceildiv operator.
Parameters
----------
lhs : object
The left operand.
rhs : object
The right operand.
span : Optional[Span]
The location of this operator in the source.
Returns
-------
op : tvm.Expr
The result Expr of ceildiv operaton.
"""
return _ffi_api._OpCeilDiv(lhs, rhs, span) # type: ignore
def comm_reducer(fcombine, fidentity, name="reduce"):
"""Create a commutative reducer for reduction.
Parameters
----------
fcombine : function(Expr -> Expr -> Expr)
A binary function which takes two Expr as input to return a Expr.
fidentity : function(str -> Expr)
A function which takes a type string as input to return a const Expr.
Returns
-------
reducer : function
A function which creates a reduce expression over axis.
There are two ways to use it:
1. accept (expr, axis, where) to produce an Reduce Expr on
specified axis;
2. simply use it with multiple Exprs.
Example
-------
.. code-block:: python
n = te.var("n")
m = te.var("m")
mysum = te.comm_reducer(lambda x, y: x+y,
lambda t: tvm.tir.const(0, dtype=t), name="mysum")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), name="k")
B = te.compute((n,), lambda i: mysum(A[i, k], axis=k), name="B")
"""
def _reduce_directly(*args):
num = len(args)
# process `where` is None
if num == 3 and args[2] is None:
num = 2
res = args[0]
for i in range(num - 1):
res = fcombine(res, args[i + 1])
return res
def _make_reduce(expr, axis, where=None, init=None):
code = fcombine.__code__
assert fcombine.__code__.co_argcount == 2
expr = convert(expr)
if init is not None:
init = convert(init)
if isinstance(expr, Array):
size = len(expr)
larr = []
rarr = []
dtypes = []
for i in range(size):
dtype = expr[i].dtype
dtypes.append(dtype)
lname = code.co_varnames[0] + "_" + str(i)
larr.append(Var(lname, dtype))
rname = code.co_varnames[1] + "_" + str(i)
rarr.append(Var(rname, dtype))
if init is not None:
init = convert(init)
assert isinstance(init, Array)
assert len(init) == size
for init_i in range(size):
init_i = convert(init_i)
assert isinstance(
init_i, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm)
)
else:
init = convert([])
lhs = convert(larr)
rhs = convert(rarr)
result = fcombine(lhs, rhs)
id_elem = fidentity(*dtypes)
else:
assert isinstance(expr, tvm.ir.PrimExpr)
size = 1
dtype = expr.dtype
lvar = Var(code.co_varnames[0], dtype)
rvar = Var(code.co_varnames[1], dtype)
result = [fcombine(lvar, rvar)]
id_elem = [fidentity(dtype)]
lhs = convert([lvar])
rhs = convert([rvar])
expr = convert([expr])
if init is not None:
assert isinstance(init, (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm))
init = convert([init])
result = convert(result)
id_elem = convert(id_elem)
combiner = CommReducer(lhs, rhs, result, id_elem)
axis = convert(axis if isinstance(axis, (list, tuple)) else [axis])
if where is None:
where = convert(True)
if init is None:
outputs = tuple(
tvm.tir.Reduce(combiner, expr, axis, where, i, convert([])) for i in range(size)
)
else:
outputs = tuple(
tvm.tir.Reduce(combiner, expr, axis, where, i, init) for i in range(size)
)
return outputs[0] if size == 1 else outputs
# pylint: disable=keyword-arg-before-vararg
def reducer(expr, axis, where=None, init=None, *args):
if isinstance(axis, (tvm.tir.IterVar, list, tuple)):
assert not args
return _make_reduce(expr, axis, where, init)
if where is None:
assert not args
return _reduce_directly(expr, axis)
return _reduce_directly(expr, axis, where, *args)
doc_str = """Create a {0} expression over axis.
Parameters
----------
expr : PrimExpr
The source expression.
axis : IterVar
The reduction IterVar axis
where : optional, Expr
Filtering predicate of the reduction.
Returns
-------
value : PrimExpr
The result value.
Example
-------
.. code-block:: python
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), name="k")
# there are two way to use this {0} reducer:
# mode 1, accept (expr, axis, where) to produce an Reduce Expr
# tvm.{0} represents tvm.te.{0} or tvm.tir.{0}.
B = te.compute((m,), lambda i: tvm.{0}(A[i, k], axis=k), name="B")
# mode 2, simply use it with multiple Exprs:
{0}_res = tvm.{0}(m, n)
"""
reducer.__doc__ = doc_str.format(name)
return reducer
def TVMBackendAllocWorkspace(device_type, device_id, nbytes, dtype_code_hint, dtype_bits_hint):
"""Backend function to allocate temporal workspace
Parameters
----------
device_type : int
The device type which the space will be allocated.
device_id : int
The device id which the space will be allocated.
nbytes : int
The size of the space requested.
dtype_code_hint : int
The type code of the array elements. Only used in certain backends such as OpenGL.
dtype_bits_hint : int
The type bits of the array elements. Only used in certain backends such as OpenGL.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin(
"handle",
"tir.TVMBackendAllocWorkspace",
device_type,
device_id,
nbytes,
dtype_code_hint,
dtype_bits_hint,
)
def TVMBackendFreeWorkspace(device_type, device_id, ptr):
"""Backend function to free temporal workspace.
Parameters
----------
device_type : int
The device type which the space will be allocated.
device_id : int
The device id which the space will be allocated.
ptr : Var
The result allocated space pointer.
Returns
-------
call : PrimExpr
The call expression.
"""
return call_intrin("int32", "tir.TVMBackendFreeWorkspace", device_type, device_id, ptr)
# pylint: disable=unnecessary-lambda
sum = comm_reducer(lambda x, y: x + y, lambda t: const(0, dtype=t), name="sum")
min = comm_reducer(lambda x, y: _ffi_api._OpMin(x, y, None), max_value, name="min") # type: ignore
max = comm_reducer(lambda x, y: _ffi_api._OpMax(x, y, None), min_value, name="max") # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Namespace for the TensorIR schedule API."""
from .block_scope import BlockScope, Dependency, DepKind, StmtSRef
from .instruction import Instruction, InstructionKind
from .schedule import BlockRV, ExprRV, LoopRV, Schedule, ScheduleError
from .state import ScheduleDebugMask, ScheduleState
from .trace import Trace
from . import analysis
from . import transform
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.schedule"""
import tvm._ffi
tvm._ffi._init_api("tir.schedule", __name__) # pylint: disable=protected-access
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/_type_checker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type checking functionality"""
import collections
import collections.abc
import functools
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
import typing
def _is_none_type(type_: Any) -> bool:
return type_ is None or type_ is type(None)
if hasattr(typing, "_GenericAlias"):
# For python versions 3.7 onward, check the __origin__ attribute.
class _Subtype:
@staticmethod
def _origin(type_: Any) -> Any:
if hasattr(typing, "_SpecialGenericAlias"):
if isinstance(type_, typing._SpecialGenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
if isinstance(type_, typing._GenericAlias): # type: ignore # pylint: disable=protected-access
return type_.__origin__
return None
@staticmethod
def list_(type_: Any) -> Any:
if _Subtype._origin(type_) is list:
if hasattr(typing, "get_args"):
(subtype,) = typing.get_args(type_) # type: ignore
else:
(subtype,) = type_.__args__
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Any:
if _Subtype._origin(type_) is dict:
if hasattr(typing, "get_args"):
(ktype, vtype) = typing.get_args(type_) # type: ignore
else:
(ktype, vtype) = type_.__args__
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is tuple:
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
return subtypes
return None
@staticmethod
def optional( # pylint: disable=missing-function-docstring
type_: Any,
) -> Optional[List[type]]:
if _Subtype._origin(type_) is Union:
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]: # pylint: disable=missing-function-docstring
if _Subtype._origin(type_) is Union:
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if _Subtype._origin(type_) is collections.abc.Callable:
if hasattr(typing, "get_args"):
subtypes = typing.get_args(type_) # type: ignore
else:
subtypes = type_.__args__
return subtypes
return None
elif hasattr(typing, "_Union"):
# For python 3.6 and below, check the __name__ attribute, or CallableMeta.
class _Subtype: # type: ignore
@staticmethod
def list_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "List":
(subtype,) = type_.__args__ # type: ignore # pylint: disable=no-member
return [subtype]
return None
@staticmethod
def dict_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Dict":
(ktype, vtype) = type_.__args__ # type: ignore # pylint: disable=no-member
return [ktype, vtype]
return None
@staticmethod
def tuple_(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.GenericMeta): # type: ignore # pylint: disable=no-member
if type_.__name__ == "Tuple":
subtypes = type_.__args__ # type: ignore # pylint: disable=no-member
return subtypes
return None
@staticmethod
def optional(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) == 2 and _is_none_type(subtypes[1]):
return [subtypes[0]]
return None
@staticmethod
def union(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing._Union): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
if len(subtypes) != 2 or not _is_none_type(subtypes[1]):
return list(subtypes)
return None
@staticmethod
def callable(type_: Any) -> Optional[List[type]]:
if isinstance(type_, typing.CallableMeta): # type: ignore # pylint: disable=no-member,protected-access
subtypes = type_.__args__
return subtypes
return None
def _dispatcher(type_: Any) -> Tuple[str, List[type]]:
if _is_none_type(type_):
return "none", []
subtype = _Subtype.list_(type_)
if subtype is not None:
return "list", subtype
subtype = _Subtype.dict_(type_)
if subtype is not None:
return "dict", subtype
subtype = _Subtype.tuple_(type_)
if subtype is not None:
return "tuple", subtype
subtype = _Subtype.optional(type_)
if subtype is not None:
return "optional", subtype
subtype = _Subtype.union(type_)
if subtype is not None:
return "union", subtype
subtype = _Subtype.callable(type_)
if subtype is not None:
return "callable", subtype
return "atomic", [type_]
def callable_str(*subtypes):
if subtypes:
*arg_types, return_type = subtypes
arg_str = ", ".join(_type2str(arg_type) for arg_type in arg_types)
return_type_str = _type2str(return_type)
return f"Callable[[{arg_str}], {return_type_str}]"
else:
return "Callable"
_TYPE2STR: Dict[Any, Callable] = {
"none": lambda: "None",
"atomic": lambda t: str(t.__name__),
"callable": callable_str,
"list": lambda t: f"List[{_type2str(t)}]",
"dict": lambda k, v: f"Dict[{_type2str(k)}, {_type2str(v)}]",
"tuple": lambda *t: f"Tuple[{', '.join([_type2str(x) for x in t])}]",
"optional": lambda t: f"Optional[{_type2str(t)}]",
"union": lambda *t: f"Union[{', '.join([_type2str(x) for x in t])}]",
}
def _type2str(type_: Any) -> str:
key, subtypes = _dispatcher(type_)
return _TYPE2STR[key](*subtypes)
def _val2type(value: Any):
if isinstance(value, list):
types = set(_val2type(x) for x in value)
if len(types) == 1:
return List[types.pop()] # type: ignore
return List[Union[tuple(types)]] # type: ignore
if isinstance(value, tuple):
types = tuple(_val2type(x) for x in value) # type: ignore
return Tuple[types]
return type(value)
def _type_check_err(x: Any, name: str, expected: Any) -> str:
return (
f'"{name}" has wrong type. '
f'Expected "{_type2str(expected)}", '
f'but gets: "{_type2str(_val2type(x))}"'
)
def _type_check_vtable() -> Dict[str, Callable]:
def _type_check_none(v: Any, name: str) -> Optional[str]:
return None if v is None else _type_check_err(v, name, None)
def _type_check_atomic(v: Any, name: str, type_: Any) -> Optional[str]:
return None if isinstance(v, type_) else _type_check_err(v, name, type_)
def _type_check_callable(v: Any, name: str, *_subtypes: Any) -> Optional[str]:
# Current implementation only validates that the argument is
# callable, and doesn't validate the arguments accepted by the
# callable, if any.
return None if callable(v) else _type_check_err(v, name, Callable)
def _type_check_list(v: List[Any], name: str, type_: Any) -> Optional[str]:
if not isinstance(v, (list, tuple)):
return _type_check_err(v, name, list)
for i, x in enumerate(v):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_dict(dict_obj: Dict[Any, Any], name: str, *types: Any) -> Optional[str]:
ktype_, vtype_ = types
if not isinstance(dict_obj, dict):
return _type_check_err(dict_obj, name, dict)
for k, v in dict_obj.items():
error_msg = _type_check(k, f"{name}[{k}]", ktype_)
if error_msg is not None:
return error_msg
error_msg = _type_check(v, f"{name}[{k}]", vtype_)
if error_msg is not None:
return error_msg
return None
def _type_check_tuple(v: Any, name: str, *types: Any) -> Optional[str]:
if not isinstance(v, tuple):
return _type_check_err(v, name, Tuple[types])
if len(types) != len(v):
return _type_check_err(v, name, Tuple[types])
for i, (x, type_) in enumerate(zip(v, types)):
error_msg = _type_check(x, f"{name}[{i}]", type_)
if error_msg is not None:
return error_msg
return None
def _type_check_optional(v: Any, name: str, type_: Any) -> Optional[str]:
return None if v is None else _type_check(v, name, type_)
def _type_check_union(v: Any, name: str, *types: Any) -> Optional[str]:
for type_ in types:
error_msg = _type_check(v, name, type_)
if error_msg is None:
return None
return _type_check_err(v, name, Union[types])
return {
"none": _type_check_none,
"atomic": _type_check_atomic,
"callable": _type_check_callable,
"list": _type_check_list,
"dict": _type_check_dict,
"tuple": _type_check_tuple,
"optional": _type_check_optional,
"union": _type_check_union,
}
_TYPE_CHECK: Dict[Any, Callable] = _type_check_vtable()
def _type_check(v: Any, name: str, type_: Any) -> Optional[str]:
key, subtypes = _dispatcher(type_)
return _TYPE_CHECK[key](v, name, *subtypes)
FType = TypeVar("FType", bound=Callable[..., Any])
def type_checked(func: FType) -> FType:
"""Type check the input arguments of a function."""
sig = inspect.signature(func)
@functools.wraps(func)
def wrap(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
for param in sig.parameters.values():
if param.annotation != inspect.Signature.empty:
error_msg = _type_check(
bound_args.arguments[param.name],
param.name,
param.annotation,
)
if error_msg is not None:
error_msg = f'In "{func.__qualname__}", {error_msg}'
raise TypeError(error_msg)
return func(*args, **kwargs)
return wrap # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Analysis used in TensorIR scheduling"""
from typing import List, Optional
import tvm._ffi
from tvm.runtime import Object
from ..buffer import Buffer
from ..stmt import For
from ..expr import PrimExpr
from ..function import IndexMap, PrimFunc
from . import _ffi_api
from .schedule import Schedule, BlockRV
def suggest_index_map(
buffer: Buffer,
indices: List[PrimExpr],
loops: List[For],
predicate: PrimExpr,
) -> Optional[IndexMap]:
"""Provided the access pattern to a buffer, suggest one of the possible layout
transformation to maximize the locality of the access pattern.
Parameters
----------
buffer : Buffer
The buffer to be transformed.
indices : List[PrimExpr]
The access pattern to the buffer.
loops : List[For]
The loops above the buffer.
predicate : PrimExpr
The predicate of the access.
Returns
-------
index_map : Optional[IndexMap]
The suggested index map. None if no transformation is suggested.
"""
return _ffi_api.SuggestIndexMap( # type: ignore # pylint: disable=no-member
buffer,
indices,
loops,
predicate,
)
@tvm._ffi.register_object("tir.schedule.TensorizeInfo")
class TensorizeInfo(Object):
"""Necessary information used for tensorization."""
def get_tensorize_loop_mapping(
sch: Schedule, block: BlockRV, desc_func: PrimFunc, allow_padding: bool = False
) -> Optional[TensorizeInfo]:
"""Establish a mapping between loops in a target block and an intrinsic description
Parameters
----------
sch : Schedule
The schedule to be tensorized
block : BlockRV
The target block to match against
desc_func : PrimFunc
The prim func describing the computation to be tensorized
allow_padding : bool
Whether to allow padding the block iters to match the intrinsic description
Returns
-------
tensorize_info : Optional[TensorizeInfo]
TensorizeInfo structure if a valid mapping is found, None otherwise
"""
return _ffi_api.GetTensorizeLoopMapping(sch, block, desc_func, allow_padding) # type: ignore
@tvm._ffi.register_object("tir.schedule.AutoTensorizeMappingInfo")
class AutoTensorizeMappingInfo(Object):
"""Necessary information used to perform transformations for tensorization."""
def get_auto_tensorize_mapping_info(
sch: Schedule, block: BlockRV, desc_func: PrimFunc
) -> Optional[AutoTensorizeMappingInfo]:
"""Get mapping info between a target block and an intrinsic description including layout
transformations to apply.
Parameters
----------
sch : Schedule
The schedule to be tensorized
block : BlockRV
The compute block for auto tensorization
desc_func : PrimFunc
The prim func describing the computation to be tensorized
Returns
-------
auto_tensorize_mapping_info : Optional[AutoTensorizeMappingInfo]
AutoTensorizeMappingInfo structure if potential mappings found, None otherwise.
Note
----
Returning a valid AutoTensorizeMappingInfo doesn't guarantee the block can be tensorized.
We will need to apply the suggested layout transformations and then match against the tensor
intrinsics.
"""
return _ffi_api.GetAutoTensorizeMappingInfo(sch, block, desc_func) # type: ignore
def has_block(sch: Schedule, block_name: str) -> bool:
"""Query if the given block name exists in the module associated with the provided schedule.
Parameters
----------
sch : Schedule
The schedule
block_name : str
The name of the block to query
Returns
-------
yes/no: bool
True if the given block exists in the schedule.
"""
return _ffi_api.HasBlock(sch, block_name) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/block_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of two pillar data structure for TensorIR scheduling: StmtSRef, BlockScope."""
from enum import IntEnum
from typing import List, Optional, Union
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir import Block, For
from . import _ffi_api
@register_object("tir.StmtSRef")
class StmtSRef(Object):
"""An object that refers to schedulable elements in the TensorIR, aka "sref".
Glossary
- Block sref: An StmtSref that points to a TensorIR block.
- Loop sref: An StmtSRef that points to a TensorIR for loop.
- Parent sref: The parent sref of an sref is the block/loop sref that points to its closest
schedulable statement of its ancestors on the TensorIR AST.
- Root sref: Sref to the root block. Every sref has exactly one parent sref
except for root sref.
- Sref tree: The parent-children-relationship of srefs that forms a tree,
uniquely determined by the TensorIR AST.
"""
seq_index: int
@property
def stmt(self) -> Optional[Union[Block, For]]:
"""The block/for stmt the object refers to"""
return _ffi_api.StmtSRefStmt(self) # type: ignore # pylint: disable=no-member
@property
def parent(self) -> Optional["StmtSRef"]:
"""The parent sref"""
return _ffi_api.StmtSRefParent(self) # type: ignore # pylint: disable=no-member
@staticmethod
def inline_mark() -> "StmtSRef":
"""A special StmtSRef, which doesn't point to any stmt in the AST,
only serving as a "mark" to hint compute-at to do the work of compute-inline"""
return _ffi_api.StmtSRefInlineMark() # type: ignore # pylint: disable=no-member
@staticmethod
def root_mark() -> "StmtSRef":
"""A special StmtSRef, which doesn't point to any stmt in the AST,
only serving as a "mark" to hint compute-at to do nothing"""
return _ffi_api.StmtSRefRootMark() # type: ignore # pylint: disable=no-member
class DepKind(IntEnum):
"""Type of dependency.
Attributes
----------
RAW : int = 0
Read-after-write dependency
WAW : int = 1
Write-after-write dependency
WAR : int = 2
Write-after-read dependency. Not supported in TensorIR for now.
OPAQUE: int = 3
Opaque dependency
"""
RAW = 0
WAW = 1
WAR = 2
OPAQUE = 3
@register_object("tir.Dependency")
class Dependency(Object):
"""A tuple (src, dst, kind) representing certain types of dependency.
For example, (A, B, kRAW) means block B depends on block A, and the dependency kind is
read-after-write, which means block B reads the result written by block A.
Parameters
----------
src : StmtSRef
The source of the dependency relation
dst : StmtSRef
The destination of the dependency relation
kind : DepKind
The dependency kind
"""
src: StmtSRef
dst: StmtSRef
kind: DepKind
@register_object("tir.BlockScope")
class BlockScope(Object):
"""An object corresponds to each block sref in the sref tree, which
tracks the producer-consumer dependency between blocks.
Glossary:
- Block scope: A contiguous subtree of the sref tree, rooted at
each block sref, whose components are:
- scope root: a block sref
- internal srefs: loop srefs
- scope leaves: block srefs
- Child block: The scope leaf blocks under the scope root or a specific internal sref
"""
def get_deps_by_src(self, block: StmtSRef) -> List[Dependency]:
"""Get all dependencies whose `src` is the target`block`.
Parameters
----------
block: StmtSRef
The queried block
Returns
-------
blocks: List[Dependency]
The dependencies
"""
return _ffi_api.BlockScopeGetDepsBySrc(self, block) # type: ignore # pylint: disable=no-member
def get_deps_by_dst(self, block: StmtSRef) -> List[Dependency]:
"""Get all dependencies whose `dst` is the target `block`.
Parameters
----------
block: StmtSRef
The queried block
Returns
-------
blocks: List[Dependency]
The dependencies
"""
return _ffi_api.BlockScopeGetDepsByDst(self, block) # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/instruction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Schedule instructions each corresponds to a schedule primitive"""
from typing import TYPE_CHECKING, Any, List, Union
from tvm._ffi import register_object as _register_object
from tvm.runtime import Object
from . import _ffi_api
if TYPE_CHECKING:
from .schedule import RAND_VAR_TYPE
INPUT_RV_TYPE = Union[RAND_VAR_TYPE, float, int, str, None] # pylint: disable=invalid-name
OUTPUT_RV_TYPE = Union[RAND_VAR_TYPE] # pylint: disable=invalid-name
ATTR_TYPE = Any
else:
INPUT_RV_TYPE = OUTPUT_RV_TYPE = ATTR_TYPE = Any
@_register_object("tir.InstructionKind")
class InstructionKind(Object):
"""Kind of an instruction, e.g. Split, Reorder, etc.
Besides the name, every kind of instruction has its own properties, including:
1) A boolean indicating if the instruction is pure, i.e. change nothing in the schedule state
2) A functor that applies the instruction to a TensorIR schedule
3) A functor that converts the instruction to a statement in python syntax
4) A functor that serialize its attributes to JSON
5) A functor that deserialize its attributes from JSON
Unlike `tvm.ir.op`, `InstructionKind` doesn't support unstructured properties,
mainly because there is no such usecase yet to add any other property.
Attributes
----------
name : str
The name of a kind of instructions
Note
----
The functor properties are not exposed on python side at the moment
"""
name: str
@property
def is_pure(self) -> bool:
"""Indicates if the instruction is pure, i.e. removing it alone doesn't mutate the schedule
state. For example, the instruction `GetBlock` is pure because it changes
nothing, while `ComputeInline` is not because removing it leads to a different resulting
schedule.
Returns
-------
pure : bool
The boolean flag indicating if the instruction is pure
"""
return bool(self._is_pure)
@staticmethod
def get(name: str) -> "InstructionKind":
"""Retrieve an InstructionKind using its name
Parameters
----------
name : str
The registered name of the InstructionKind
Returns
-------
kind : InstructionKind
The InstructionKind retrieved
"""
return _ffi_api.InstructionKindGet(name) # type: ignore # pylint: disable=no-member
@_register_object("tir.Instruction")
class Instruction(Object):
"""Schedule instructions each corresponds to a schedule primitive
Attributes
----------
kind : InstructionKind
The kind of the instruction
inputs : List[INPUT_RV_TYPE]
The input random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV
- float
- int
- str
- None
attrs : List[ATTR_TYPE]
The attributes of the instruction. Similar to attributes of an operator,
attributes of an instruction are arbitrary constant metadata required by the instructions.
For example, the name of the block to be retrieved in `GetBlock`.
outputs : List[OUTPUT_RV_TYPE]
The output random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV, atomic variables only, won't be constants or composite PrimExpr
"""
kind: InstructionKind
inputs: List[INPUT_RV_TYPE]
attrs: List[ATTR_TYPE]
outputs: List[OUTPUT_RV_TYPE]
def __init__(
self,
kind: InstructionKind,
inputs: List[INPUT_RV_TYPE],
attrs: List[ATTR_TYPE],
outputs: List[OUTPUT_RV_TYPE],
) -> None:
"""Constructor
Parameters
----------
kind : InstructionKind
The kind of the instruction
inputs : List[INPUT_RV_TYPE]
The input random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV
- float
- int
- str
- None
attrs : List[ATTR_TYPE]
The attributes of the instruction. Similar to attributes of an operator,
attributes of an instruction are arbitrary constant metadata required by the
instructions. For example, the name of the block to be retrieved in `GetBlock`.
outputs : List[OUTPUT_RV_TYPE]
The output random variables of the instruction,
and the type of each element can be one of the following:
- BlockRV
- LoopRV
- ExprRV, atomic variables only, won't be constants or composite PrimExpr
"""
self.__init_handle_by_constructor__(
_ffi_api.Instruction, # type: ignore # pylint: disable=no-member
kind,
inputs,
attrs,
outputs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The TensorIR schedule class"""
from typing import Callable, Dict, List, Optional, Tuple, Union
from tvm._ffi import register_object as _register_object
from tvm.error import TVMError, register_error
from tvm.ir import IRModule, PrimExpr
from tvm.runtime import Object, String
from tvm.tir import Block, Buffer, FloatImm, For, IntImm, PrimFunc
from ..function import IndexMap
from . import _ffi_api
from ._type_checker import type_checked
from .state import ScheduleState, StmtSRef, _parse_debug_mask, _parse_mod
from .trace import Trace
@register_error
class ScheduleError(TVMError):
"""Error that happens during TensorIR scheduling."""
@_register_object("tir.LoopRV")
class LoopRV(Object):
"""A random variable that refers to a loop"""
def __init__(self) -> None:
"""Construct a new LoopRV."""
self.__init_handle_by_constructor__(
_ffi_api.LoopRV # type: ignore # pylint: disable=no-member
)
@_register_object("tir.BlockRV")
class BlockRV(Object):
"""A random variable that refers to a block"""
def __init__(self) -> None:
"""Construct a new BlockRV."""
self.__init_handle_by_constructor__(
_ffi_api.BlockRV # type: ignore # pylint: disable=no-member
)
# It is a workaround for mypy: https://github.com/python/mypy/issues/7866#issuecomment-549454370
# This feature is not supported until python 3.10:
# https://docs.python.org/3.10/whatsnew/3.10.html#pep-613-typealias
ExprRV = Union[PrimExpr] # A random variable that evaluates to an integer
RAND_VAR_TYPE = Union[ExprRV, BlockRV, LoopRV] # pylint: disable=invalid-name
# Update to `Literal["detail", "fast", "none"]` once upgraded to python3.8
_ERROR_RENDER_LEVEL: Dict[str, int] = {
"detail": 0,
"fast": 1,
"none": 2,
}
def _parse_error_render_level(error_render_level: str) -> int:
if error_render_level not in _ERROR_RENDER_LEVEL:
raise ValueError(
'error_render_level can be "detail", "fast", or "none", but got: '
+ f"{error_render_level}"
)
return _ERROR_RENDER_LEVEL.get(error_render_level)
def _parse_seed(seed: Optional[int]) -> int:
if seed is None:
return -1
if not isinstance(seed, int):
raise TypeError(f"Expected `seed` to be int or None, but gets: {seed}")
if seed < 1 or seed > 2147483647:
raise ValueError(f"seed must be in the range [1, 2147483647], but gets: {seed}")
return seed
@_register_object("tir.Schedule")
class Schedule(Object):
"""The user-facing schedule class
A schedule is a set of transformations that change the order of computation but
preserve the semantics of computation. Some example of schedules:
1) Split a loop into two;
2) Reorder two loops;
3) Inline the computation of a specific buffer into its consumer
The schedule class stores auxiliary information to schedule correctly and efficiently.
Link to tutorial: https://tvm.apache.org/docs/tutorials/language/schedule_primitives.html
"""
@type_checked
def __init__(
self,
mod: Union[PrimFunc, IRModule],
*,
seed: Optional[int] = None,
debug_mask: Union[str, int] = "none",
error_render_level: str = "detail",
) -> None:
"""Construct a TensorIR schedule class from an IRModule
Parameters
----------
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to be scheduled
seed: Optional[int]
The seed value for schedule's random state
Note that None and -1 means use device random, otherwise only integer between 1 and
2147483647 is allowed.
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
error_render_level : str = "detail"
The level of error rendering. Choices: "detail", "fast", "none".
- "detail": Render a detailed error message, with the TIR and error locations printed
- "fast: Show a simple error message without rendering or string manipulation
- "none": Do not show any error message.
Note
----
The checks performed includes:
1) VerifySRefTree
2) VerifyCachedFlags
"""
# call the constructor
self.__init_handle_by_constructor__(
_ffi_api.TracedSchedule, # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_seed(seed),
_parse_debug_mask(debug_mask),
_parse_error_render_level(error_render_level),
)
@staticmethod
def _create_non_traced(
mod: Union[PrimFunc, IRModule],
*,
seed: Optional[int] = None,
debug_mask: Union[str, int] = "none",
error_render_level: str = "detail",
) -> "Schedule":
"""Construct a non-traced TensorIR schedule class from an IRModule."""
return _ffi_api.ConcreteSchedule( # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_seed(seed),
_parse_debug_mask(debug_mask),
_parse_error_render_level(error_render_level),
)
########## Utilities ##########
@property
def mod(self) -> IRModule:
"""Returns the AST of the module being scheduled"""
return _ffi_api.ScheduleGetMod(self) # type: ignore # pylint: disable=no-member
@property
def state(self) -> ScheduleState:
"""Returns the ScheduleState in the current schedule class"""
return _ffi_api.ScheduleGetState(self) # type: ignore # pylint: disable=no-member
@property
def trace(self) -> Optional[Trace]:
"""Returns the internally maintained trace of scheduling program execution"""
return _ffi_api.ScheduleGetTrace(self) # type: ignore # pylint: disable=no-member
def work_on(self, func_name: str) -> None:
"""Instruct the schedule to work on a function in the IRModule.
By default, the schedule works on the function with the name "main", or the only function in
the IRModule if there is only one. If there is multiple functions in the IRModule, and none
of their names are "main", users will have to call this method to explicitly specify which
function to work on.
This sugar function will guide the `GetBlock` method if its `func_name` is not specified.
Parameters
----------
func_name : str
The name of the function to work on.
"""
_ffi_api.ScheduleWorkOn(self, func_name) # type: ignore # pylint: disable=no-member
def copy(self) -> "Schedule":
"""Returns a copy of the schedule, including both the state and the symbol table,
* guaranteeing that
* 1) SRef tree is completely reconstructed;
* 2) The IRModule being scheduled is untouched;
* 3) All the random variables are valid in the copy, pointing to the corresponding sref
* reconstructed
Returns
-------
copy : Schedule
A new copy of the schedule
"""
return _ffi_api.ScheduleCopy(self) # type: ignore # pylint: disable=no-member
@type_checked
def seed(self, seed: int) -> None:
"""Seed the randomness
Parameters
----------
seed : int
The new random seed, -1 if use device random, otherwise non-negative
"""
return _ffi_api.ScheduleSeed(self, seed) # type: ignore # pylint: disable=no-member
def fork_seed(self) -> int:
"""Returns a forked random state as seed for new schedules
Returns
-------
seed : int
The forked random state, not the same as the current random state
"""
return _ffi_api.ScheduleForkSeed(self) # type: ignore # pylint: disable=no-member
@type_checked
def show(self, rand_var: RAND_VAR_TYPE) -> str:
"""Returns a string representation of the value that the random variable evaluates to
Parameters
----------
rand_var : Union[ExprRV, BlockRV, LoopRV]
The random variable to be evaluated
Returns
-------
str_repr : str
The string representation
"""
return str(self.get(rand_var))
########## Lookup ##########
@type_checked
def get(
self,
rand_var_or_sref: Union[RAND_VAR_TYPE, StmtSRef],
) -> Optional[Union[int, Block, For]]:
"""Returns:
- the corresponding Block that a BlockRV evaluates to;
- the corresponding For that a LoopRV evaluates to;
- the corresponding integer that a ExprRV evaluates to;
- the corresponding Block that a block sref points to;
- the corresponding For that a loop sref points to;
Parameters
----------
rand_var_or_sref : Union[ExprRV, BlockRV, LoopRV, StmtSRef]
The random variable / sref to be evaluated
Returns
-------
result : Optional[Union[int, Block, For]]
The corresponding result
"""
if isinstance(rand_var_or_sref, StmtSRef):
return rand_var_or_sref.stmt
result = _ffi_api.ScheduleGet(self, rand_var_or_sref) # type: ignore # pylint: disable=no-member
if isinstance(result, IntImm):
result = result.value
return result
@type_checked
def get_sref(self, rand_var_or_stmt: Union[BlockRV, LoopRV, Block, For]) -> Optional[StmtSRef]:
"""Returns the corresponding sref to the given
1) LoopRV
2) BlockRV
3) Block
4) For
Parameters
----------
rand_var_or_stmt : Union[BlockRV, LoopRV, Block, For]
The random variable / sref to be evaluated
Returns
-------
result : Optional[StmtSRef]
The corresponding result
"""
return _ffi_api.ScheduleGetSRef( # type: ignore # pylint: disable=no-member
self, rand_var_or_stmt
)
@type_checked
def remove_rv(self, rand_var: RAND_VAR_TYPE) -> None:
"""Remove a random variable from the symbol table
Parameters
----------
rand_var : Union[BlockRV, LoopRV, ExprRV]
The random variable to be removed
"""
return _ffi_api.ScheduleRemoveRV(self, rand_var) # type: ignore # pylint: disable=no-member
########## Schedule: Sampling ##########
@type_checked
def sample_categorical(
self,
candidates: List[int],
probs: List[float],
decision: Optional[int] = None,
) -> ExprRV:
"""Sample an integer given the probability distribution
Parameters
----------
candidates : List[int]
The candidates to be sampled from
probs : List[float]
The probability of each candidate
decision : Optional[int]
The sampling decision, if any
Returns
-------
result : ExprRV
The random variable sampled from candidates
"""
return _ffi_api.ScheduleSampleCategorical( # type: ignore # pylint: disable=no-member
self,
candidates,
probs,
decision,
)
@type_checked
def sample_perfect_tile(
self,
loop: LoopRV,
n: int,
max_innermost_factor: int = 16,
decision: Optional[List[int]] = None,
) -> List[ExprRV]:
"""Sample the factors to perfect tile a specific loop
Parameters
----------
loop : LoopRV
The loop to be tiled
n : int
The number of tiles to be sampled
max_innermost_factor : int
The maximum tile size allowed to be sampled in the innermost loop
decision: Optional[List[int]]
The sampling decision, if any
Returns
-------
result : List[ExprRV]
A list of length `n`, the random perfect tile sizes sampled
"""
return list(
_ffi_api.ScheduleSamplePerfectTile( # type: ignore # pylint: disable=no-member
self,
loop,
n,
max_innermost_factor,
decision,
)
)
@type_checked
def sample_compute_location(
self,
block: Union[BlockRV, str],
decision: Optional[int] = None,
) -> LoopRV:
"""Sample a compute-at location of the given block
Parameters
----------
block : Union[BlockRV, str]
The block whose compute-at location is to be sampled
decision : Optional[int]
The sampling decision
Returns
-------
result : LoopRV
The sampled loop where the input block is to be computed at
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleSampleComputeLocation( # type: ignore # pylint: disable=no-member
self,
block,
decision,
)
########## Schedule: Get blocks & loops ##########
@type_checked
def get_block(
self,
name: str,
func_name: Optional[str] = None,
) -> BlockRV:
"""Retrieve a block in a specific function with its name
By default, if `func_name` is not specified, the schedule will search for the block in the
function that is currently being "worked on". To switch the function to be worked on, use
`work_on` before calling this method.
Parameters
----------
name : str
The name of the block
func_name : Optional[str] = None
The name of the function
Returns
-------
block : BlockRV
The block retrieved
IndexError is raised if 0 or multiple blocks exist with the specific name.
"""
return _ffi_api.ScheduleGetBlock( # type: ignore # pylint: disable=no-member
self,
name,
func_name,
)
@type_checked
def get_loops(self, block: Union[BlockRV, str]) -> List[LoopRV]:
"""Get the parent loops of the block in its scope, from outer to inner
Parameters
----------
block : Union[BlockRV, str]
The query block
Returns
-------
loops : List[LoopRV]
A list of loops above the given block in its scope, from outer to inner
"""
block = self._normalize_block_arg(block)
return list(_ffi_api.ScheduleGetLoops(self, block)) # type: ignore # pylint: disable=no-member
@type_checked
def get_child_blocks(self, block_or_loop: Union[BlockRV, LoopRV]) -> List[BlockRV]:
"""Get the leaf blocks of a specific block/loop
Parameters
----------
block_or_loop : Union[BlockRV, LoopRV]
The query block/loop
Returns
-------
blocks : List[LoopRV]
A list of leaf blocks inside a specific block/loop
"""
return list(_ffi_api.ScheduleGetChildBlocks(self, block_or_loop)) # type: ignore # pylint: disable=no-member
@type_checked
def get_producers(self, block: Union[BlockRV, str]) -> List[BlockRV]:
"""Get the producers of a specific block
Parameters
----------
block : Union[BlockRV, str]
The block in the query
Returns
-------
producers : List[BlockRV]
A list of producers of the given block
"""
block = self._normalize_block_arg(block)
return list(_ffi_api.ScheduleGetProducers(self, block)) # type: ignore # pylint: disable=no-member
@type_checked
def get_consumers(self, block: Union[BlockRV, str]) -> List[BlockRV]:
"""Get the consumers of a specific block
Parameters
----------
block : Union[BlockRV, str]
The block in the query
Returns
-------
consumers : List[BlockRV]
A list of consumers of the given block
"""
block = self._normalize_block_arg(block)
return list(_ffi_api.ScheduleGetConsumers(self, block)) # type: ignore # pylint: disable=no-member
########## Schedule: Transform loops ##########
@type_checked
def fuse(
self,
*loops: List[LoopRV],
preserve_unit_iters: bool = True,
) -> LoopRV:
"""Fuse a list of consecutive loops into one. It requires:
1) The loops can't have annotations or thread bindings.
2) The (i+1)-th loop must be the only child of the i-th loop.
3) All loops must start with 0.
4) The domain of a loop to be fused cannot depend on another loop to be fused.
Parameters
----------
*loops : List[LoopRV]
The loops to be fused
Returns
-------
fused_loop : LoopRV
The new loop after fusion
Examples
--------
Before applying fuse, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_fuse(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do fuse:
.. code-block:: python
sch = tir.Schedule(before_fuse)
i, j = sch.get_loops(sch.get_block("B"))
sch.fuse(i, j)
print(sch.mod["main"].script())
After applying fuse, the IR becomes:
.. code-block:: python
@T.prim_func
def after_fuse(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# the 2 loops are fused into 1
for i_j_fused in T.serial(0, 16384):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_fused, 128))
vj = T.axis.S(128, T.floormod(i_j_fused, 128))
B[vi, vj] = A[vi, vj] * 2.0
"""
return _ffi_api.ScheduleFuse(self, loops, preserve_unit_iters) # type: ignore # pylint: disable=no-member
@type_checked
def split(
self,
loop: LoopRV,
factors: List[Union[int, ExprRV, None]],
preserve_unit_iters: bool = True,
) -> List[LoopRV]:
"""Split a loop into a list of consecutive loops. It requires:
1) The loop can't have annotation or thread binding.
2) The loop must start with 0.
Predicates may be added to ensure the total loop numbers keeps unchanged.
In `factors`, at most one of the factors can be None,
which will be automatically inferred.
Parameters
----------
loop : LoopRV
The loop to be split
factors: List[Union[int, ExprRV, None]]
The splitting factors
Potential inputs are:
- None
- ExprRV
- Positive constant integers
preserve_unit_iters : bool
Whether or not to preserve unit iterators in block bindings
Returns
-------
split_loops : List[LoopRV]
The new loops after split
Examples
--------
Before split, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do split:
.. code-block:: python
sch = tir.Schedule(before_split)
i, j = sch.get_loops(sch.get_block("B"))
sch.split(i, factors=[2, 64])
print(sch.mod["main"].script())
After applying split, the IR becomes:
.. code-block:: python
@T.prim_func
def after_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# the original loop is split into 2 loops
for i0, i1, j in T.grid(2, 64, 128):
with T.block("B"):
vi = T.axis.S(128, i0 * 64 + i1)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
"""
# it will be checked later in C++ implementation
# that there is at most one None in `factors`
return list(
_ffi_api.ScheduleSplit( # type: ignore # pylint: disable=no-member
self,
loop,
factors,
preserve_unit_iters,
)
)
@type_checked
def reorder(self, *ordered_loops: List[LoopRV]) -> None:
"""
Reorder a list of loops. It doesn't require the loops to be consecutive.
It requires:
1) The loops are in the same chain. That means: the loops can be ordered to [l_1, l_2, ... ,
l_n] where l_i is an ancestor of l_{i+1} and there are only single-branch loops between
l_1 and l_n (which also indicates they are under the same scope).
2) After reordering, the domain of an outer loop cannot depend on any of the inner loops.
3) For every block under the loop nests, its block binding must be affine, and the block
variables must be either data parallel or reduction.
4) No duplicated loops are allowed in the arguments.
Parameters
----------
*ordered_loops : List[LoopRV]
The loops in the new order
Examples
--------
Before reorder, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do reorder:
.. code-block:: python
sch = tir.Schedule(before_reorder)
i, j = sch.get_loops(sch.get_block("B"))
sch.reorder(j, i)
print(sch.mod["main"].script())
After applying reorder, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
# Here j and i are reordered
for j, i in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleReorder(self, ordered_loops) # type: ignore # pylint: disable=no-member
@type_checked
def add_unit_loop(self, block_or_loop: Union[LoopRV, BlockRV]) -> LoopRV:
"""Create a new unit loop on top of the specific block or loop.
Parameters
----------
block_or_loop : Union[LoopRV, BlockRV]
The block above which the new loop is created
Returns
-------
new_loop : LoopRV
The new unit loop
Examples
--------
Before add_unit_loop, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_add_unit_loop(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
Create the schedule and do add-unit-loop:
.. code-block:: python
sch = tir.Schedule(before_add_unit_loop)
sch.add_unit_loop(sch.get_block("C"))
print(sch.mod["main"].script())
After applying add-unit-loop, the IR becomes:
.. code-block:: python
@T.prim_func
def after_add_unit_loop(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u in T.serial(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
"""
return _ffi_api.ScheduleAddUnitLoop(self, block_or_loop) # type: ignore # pylint: disable=no-member
########## Schedule: Manipulate ForKind ##########
@type_checked
def parallel(self, loop: LoopRV) -> None:
"""Parallelize the input loop. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, the loop can only be contained in data-parallel block
iters' bindings
Parameters
----------
loop : LoopRV
The loop to be parallelized
Examples
--------
Before parallel, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_parallel(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do parallel:
.. code-block:: python
sch = tir.Schedule(before_parallel)
i, j = sch.get_loops(sch.get_block("B"))
sch.parallel(i)
After applying parallel, the IR becomes:
.. code-block:: python
@T.prim_func
def after_parallel(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.parallel(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleParallel(self, loop) # type: ignore # pylint: disable=no-member
@type_checked
def vectorize(self, loop: LoopRV) -> None:
"""Vectorize the input loop. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, the loop can only be contained in data-parallel block
iters' bindings
Parameters
----------
loop : LoopRV
The loop to be vectorized
Examples
--------
Before vectorize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_vectorize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do vectorize:
.. code-block:: python
sch = tir.Schedule(before_vectorize)
i, j = sch.get_loops(sch.get_block("B"))
sch.vectorize(j)
After applying vectorize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_vectorize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.serial(0, 128):
for j in T.vectorized(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleVectorize(self, loop) # type: ignore # pylint: disable=no-member
@type_checked
def bind(self, loop: LoopRV, thread_axis: str) -> None:
"""Bind the input loop to the given thread axis. It requires:
1) The scope block that the loop is in should have stage-pipeline property
2) All the blocks under the loop are complete blocks or reduction blocks, and have affine
bindings
3) For each block under the loop, if the thread axis starts with "threadIdx`, the loop can
only be contained in data-parallel block iter and reduction block iters' bindings. Otherwise
the loop can only be contained in data-parallel block iters' bindings
Parameters
----------
loop : LoopRV
The loop to be bound to the thread axis
thread_axis : str
The thread axis to be bound to the loop. Possible candidates:
- blockIdx.x/y/z
- threadIdx.x/y/z
- vthread.x/y/z
- vthread (It is a legacy behavior that will be deprecated. Please use `vthread.x/y/z`
instead.)
Examples
--------
Before bind, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_bind(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do bind:
.. code-block:: python
sch = tir.Schedule(before_bind)
i, j = sch.get_loops(sch.get_block("B"))
sch.bind(i, "blockIdx.x")
sch.bind(j, "threadIdx.x")
After applying bind, the IR becomes:
.. code-block:: python
@T.prim_func
def after_bind(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.thread_binding(0, 128, thread = "blockIdx.x"):
for j in T.thread_binding(0, 128, thread = "threadIdx.x"):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleBind(self, loop, thread_axis) # type: ignore # pylint: disable=no-member
@type_checked
def unroll(self, loop: LoopRV) -> None:
"""Unroll the input loop. It requires nothing
Parameters
----------
loop : LoopRV
The loop to be unrolled
Examples
--------
Before unroll, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_unroll(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do unroll:
.. code-block:: python
sch = tir.Schedule(before_unroll)
i, j = sch.get_loops(sch.get_block("B"))
sch.unroll(i)
After applying unroll, the IR becomes:
.. code-block:: python
@T.prim_func
def after_unroll(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i in T.unroll(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleUnroll(self, loop) # type: ignore # pylint: disable=no-member
########## Schedule: Insert cache stages ##########
@type_checked
def cache_read(
self,
block: Union[BlockRV, str],
read_buffer_index: Union[int, str, Buffer],
storage_scope: str,
consumer_blocks: Optional[List[Union[BlockRV, str]]] = None,
) -> BlockRV:
"""Create a block that reads a buffer region into a read cache. It requires:
1) There is at most one block who write the buffer in the scope.
2) The scope block have stage-pipeline property.
Parameters
----------
block : Union[BlockRV, str]
The consumer block of the target buffer.
buffer: Union[int, str, Buffer]
The index of the buffer in block's read region, the unique
name of a read buffer in the block, or a Buffer object
that is within the blocks read region.
storage_scope: str
The target storage scope.
consumer_blocks: Optional[List[Union[BlockRV, str]]]
An optional list of consumers that should read from the cache. If not specified,
all consumers will use the cache.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before cache_read, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and cache_read:
.. code-block:: python
sch = tir.Schedule(before_cache_read)
block_b = sch.get_block("B")
sch.cache_read(block_b, 0, "local")
print(sch.mod["main"].script())
After applying cache_read, the IR becomes:
.. code-block:: python
@T.prim_func
def after_cache_read(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
A_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
A_local[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_local[vi, vj] * 2.0
"""
if consumer_blocks is None:
consumer_blocks = []
# Convert any string block names into Block RVs.
consumer_blocks = [self._normalize_block_arg(b) for b in consumer_blocks]
block = self._normalize_block_arg(block)
if not isinstance(read_buffer_index, int):
_, read_buffer_index, _ = self._normalize_buffer_arg(
block, read_buffer_index, required_buffer_type="read"
)
return _ffi_api.ScheduleCacheRead( # type: ignore # pylint: disable=no-member
self, block, read_buffer_index, storage_scope, consumer_blocks
)
@type_checked
def cache_write(
self,
block: Union[BlockRV, str],
write_buffer_index: Union[int, str, Buffer],
storage_scope: str,
) -> BlockRV:
"""Create a block that reads a buffer region into a write cache. It requires:
1) There is only one block who write the buffer in the scope.
2) The scope block have stage-pipeline property.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the target buffer.
write_buffer_index: int
The index of the buffer in block's write region, the unique
name of a write buffer in the block, or a Buffer object
that is within the blocks write region.
storage_scope: str
The target storage scope.
Returns
-------
cached_block : BlockRV
The block of the cache stage
Examples
--------
Before cache_write, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and cache_write:
.. code-block:: python
sch = tir.Schedule(before_cache_write)
block_b = sch.get_block("B")
sch.cache_write(block_b, 0, "local")
print(sch.mod["main"].script())
After applying cache_write, the IR becomes:
.. code-block:: python
@T.prim_func
def after_cache_write(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_local[vi, vj]
"""
block = self._normalize_block_arg(block)
if not isinstance(write_buffer_index, int):
_, write_buffer_index, _ = self._normalize_buffer_arg(
block, write_buffer_index, required_buffer_type="write"
)
return _ffi_api.ScheduleCacheWrite( # type: ignore # pylint: disable=no-member
self, block, write_buffer_index, storage_scope
)
@type_checked
def cache_inplace(
self,
block: Union[BlockRV, str],
read_buffer_index: Union[int, str, Buffer],
storage_scope: str,
) -> List[BlockRV]:
"""Create blocks that reads & write a buffer region into a cache block.
It requires the the target block both read & write the target buffer.
Mainly for inplace operation.
Parameters
----------
block : Union[BlockRV, str]
The target block operates on the target buffer.
read_buffer_index: int
The index of the buffer in block's read region, the unique
name of a read buffer in the block, or a Buffer object
that is within the blocks read region.
storage_scope: str
The target storage scope.
Returns
-------
cached_blocks : List[BlockRV]
The blocks of the cache stage, read cache first, write cache second
Examples
--------
Before cache_inplace, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_cache_inplace(data_io: T.Buffer[(64), "int32"]):
for i0 in T.serial(1):
with T.block("A"):
T.reads(data_io[:64])
T.writes(data_io[:64])
T.evaluate(T.call_extern("call_impl", data_io.data, dtype=""))
Create the schedule and cache_inplace:
.. code-block:: python
sch = tir.Schedule(before_cache_inplace)
block_a = sch.get_block("A")
sch.cache_inplace(block_a, 0, "local")
print(sch.mod["main"].script())
After applying cache_inplace, the IR becomes:
.. code-block:: python
@T.prim_func
def cache_inplace(data_io: T.Buffer[64, "int32"]) -> None:
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
for i0 in T.serial(1):
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io[v0]
with T.block("A"):
T.reads(data_io_local[0 : 64])
T.writes(data_io_local[0 : 64])
T.evaluate(T.call_extern("call_impl", data_io_local.data, dtype=""))
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_local[v0])
T.writes(data_io[v0])
data_io[v0] = data_io_local[v0]
"""
block = self._normalize_block_arg(block)
if not isinstance(read_buffer_index, int):
_, read_buffer_index, _ = self._normalize_buffer_arg(
block, read_buffer_index, required_buffer_type="read"
)
return _ffi_api.ScheduleCacheInplace( # type: ignore # pylint: disable=no-member
self, block, read_buffer_index, storage_scope
)
@type_checked
def cache_index(
self, block: Union[BlockRV, str], buffer_index: Union[int, str, Buffer]
) -> List[BlockRV]:
"""Create a block to cache precomputed index for later use.
if there is no index computation, keep unchanged.
Parameters
----------
block : Union[BlockRV, str]
The target block operates on the target buffer.
buffer_index: int
The index of the target buffer in block's read region
Returns
-------
cached_blocks : List[BlockRV]
The blocks of the stage writing the cache buffers
Examples
--------
Before cache_inplace, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def resize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (1, 3, 40, 40))
B = T.match_buffer(b, (1, 3, 80, 80))
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
B[n, c, vi, vj] = A[n, c, vi//4 + vj//4, vj//2]
Create the schedule and cache_index:
.. code-block:: python
sch = tir.Schedule(resize)
block_a = sch.get_block("A")
sch.cache_index(block_a, 0)
print(sch.mod["main"].script())
After applying cache_index, the IR becomes:
.. code-block:: python
@T.prim_func
def resize_cache_index(
A: T.Buffer[(1, 3, 40, 40), "float32"], B: T.Buffer[(1, 3, 80, 80), "float32"]
) -> None:
index_var_0 = T.alloc_buffer([80, 80], dtype="int32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0, ax1 in T.grid(80, 80):
with T.block("index_0"):
v0 = T.axis.spatial(80, ax0)
v1 = T.axis.spatial(80, ax1)
T.reads()
T.writes(index_var_0[v0, v1])
index_var_0[v0, v1] = v0 // 4 + v1 // 4
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = v0 // 2
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[n, c, vi // 4 + vj // 4, vj // 2])
T.writes(B[n, c, vi, vj])
B[n, c, vi, vj] = A[n, c, index_var_0[vi, vj], index_var_1[vj]]
"""
block = self._normalize_block_arg(block)
if not isinstance(buffer_index, int):
_, buffer_index, _ = self._normalize_buffer_arg(
block, buffer_index, required_buffer_type="read"
)
return _ffi_api.ScheduleCacheIndex( # type: ignore # pylint: disable=no-member
self, block, buffer_index
)
@type_checked
def reindex(
self,
block: Union[BlockRV, str],
buffer: Union[Tuple[str, int], str, Buffer],
) -> BlockRV:
"""Create a block that read/write a buffer region into a read/write cache with reindexing.
The layout of the cache will be the same as by the iterators of the block that reads/writes
the buffer. It requires:
1) There is only one block who reads/writes the target buffer
2) There is only one buffer load/store of this buffer in the block
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
Returns
-------
reindex_block : BlockRV
The block of the reindex stage
Examples
--------
Before transform_layout, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reindex(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] * 2.0
Create the schedule and do transform_layout:
.. code-block:: python
sch = tir.Schedule(before_reindex)
block = sch.get_block("B")
sch.reindex(block, ("read", 0))
After applying reindex, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reindex(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"]
) -> None:
A_reindex = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A_reindex"):
vi, vj = T.axis.remap("SS", [i, j])
A_reindex[vi, vj] = A[vj, vi]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_reindex[vi, vj] * 2.0
"""
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, _ = self._normalize_buffer_arg(block, buffer)
assert buffer_index_type in ["read", "write"], "Invalid buffer_index_type"
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
return _ffi_api.ScheduleReIndex( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum
)
########## Schedule: Compute location ##########
@type_checked
def compute_at(
self,
block: Union[BlockRV, str],
loop: LoopRV,
preserve_unit_loops: bool = False,
index: int = -1,
) -> None:
"""Compute-At. Move a producer block under the specific loop, and regenerate the
loops induced by the block so that the buffer region produced by the producer block could
cover those regions consumed by its consumer blocks under the given loop. It requires:
1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block`
2) The scope block has stage-pipeline property
3) The subtree of the scope block, where the given block is in, satisfies the compact
dataflow condition. i.e. all the blocks in the scope block's subtree must be either
complete block or reduction block
4) The block is not an output block with regard to the scope block, i.e. the buffers written
by the block are allocated under the scope block
5) All the consumers of the block are under the given loop
Parameters
----------
block : Union[BlockRV, str]
The block to be moved
loop: LoopRV
The loop where the block to be moved under
preserve_unit_loops: bool
Whether to keep the trivial loops whose extents are 1
index: int
The block index of the loop body subtree blocks:
- `index = -1` means inserted into the last possible insertion point;
- `index = -2` means inserted into the first possible insertion point;
- Otherwise, `index` is a nonnegative number that indicates the insertion point
Examples
--------
Before compute-at, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do compute-at:
.. code-block:: python
sch = tir.Schedule(before_compute_at)
block = sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=False)
print(sch.mod["main"].script())
After applying compute-at, the IR becomes:
.. code-block:: python
@T.prim_func
def after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleComputeAt( # type: ignore # pylint: disable=no-member
self,
block,
loop,
preserve_unit_loops,
index,
)
@type_checked
def reverse_compute_at(
self,
block: Union[BlockRV, str],
loop: LoopRV,
preserve_unit_loops: bool = False,
index: int = -1,
) -> None:
"""Reverse-Compute-At. Move a consumer block under the specific loop, and regenerate the
loops induced by the block so that the buffer region consumed by the consumer block could
cover those regions produced by its producer blocks under the given loop. It requires:
1) `block` and `loop` are under the same scope, `loop` is not the ancestor of `block`
2) The scope block has stage-pipeline property
3) The subtree of the scope block, where the given block is in, satisfies the compact
dataflow condition. i.e. all the blocks in the scope block's subtree must be either
complete block or reduction block
4) All the producers of the block are under the given loop
Parameters
----------
block : Union[BlockRV, str]
The block to be moved
loop: LoopRV
The loop where the block to be moved under
preserve_unit_loops: bool
Whether to keep the trivial loops whose extents are 1
index: int
The block index of the loop body subtree blocks:
- `index = -1` means inserted into the last possible insertion point;
- `index = -2` means inserted into the first possible insertion point;
- Otherwise, `index` is a nonnegative number that indicates the insertion point
Examples
--------
Before reverse-compute-at, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do reverse-compute-at:
.. code-block:: python
sch = tir.Schedule(before_reverse_compute_at)
block = sch.get_block("C")
loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
print(sch.mod["main"].script())
After applying reverse-compute-at, the IR becomes:
.. code-block:: python
@T.prim_func
def after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleReverseComputeAt( # type: ignore # pylint: disable=no-member
self,
block,
loop,
preserve_unit_loops,
index,
)
@type_checked
def compute_inline(self, block: Union[BlockRV, str]) -> None:
"""Inline a block into its consumer(s). It requires:
1) The block is a complete non-root block, which only produces one buffer
2) The block must not be the only leaf in the scope.
3) The body of the block must be a BufferStore statement in
the form of, ``A[i, j, k, ...] = ...`` where the indices of
the LHS are all distinct atomic variables, and no variables
other than those indexing variables are allowed in the
statement.
Parameters
----------
block : Union[BlockRV, str]
The block to be inlined to its consumer(s)
Examples
--------
Before compute-inline, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do compute-inline:
.. code-block:: python
sch = tir.Schedule(before_inline)
sch.compute_inline(sch.get_block("B"))
print(sch.mod["main"].script())
After applying compute-inline, the IR becomes:
.. code-block:: python
@T.prim_func
def after_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleComputeInline(self, block) # type: ignore # pylint: disable=no-member
@type_checked
def reverse_compute_inline(self, block: Union[BlockRV, str]) -> None:
"""Inline a block into its only producer. It requires:
1) The block is a complete non-root block, which only produces and consumes one buffer
2) The block must not be the only leaf in the scope.
3) The only producer of the block is a read-after-write producer and a
complete non-root block
4) The body of the block must be a BufferStore statement in the form of,
``B[f(i, j, k, ...)] = g(i, j, k, A[i, j, k, ...] ...)`` where the
indices of each `BufferLoad` on the RHS are all distinct atomic
variables, and no variables other than those indexing variables are
allowed in the statement.
Parameters
----------
block : Union[BlockRV, str]
The block to be inlined to its producer
Examples
--------
Before reverse-compute-inline, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do reverse-compute-inline:
.. code-block:: python
sch = tir.Schedule(before_inline)
sch.reverse_compute_inline(sch.get_block("C"))
print(sch.mod["main"].script())
After applying reverse-compute-inline, the IR becomes:
.. code-block:: python
@T.prim_func
def after_inline(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleReverseComputeInline(self, block) # type: ignore # pylint: disable=no-member
########## Schedule: Reduction ##########
@type_checked
def decompose_reduction(self, block: Union[BlockRV, str], loop: LoopRV) -> BlockRV:
"""Decompose a reduction block into two separate blocks.
a) The init block, which is translated from the init statement of the reduction block;
b) The update block, which is the original block without init statement.
The init block is inserted right before the given loop.
The schedule primitive requires:
1) The input block is a reduction block.
2) The input loop is the ancestor of the block.
3) The input loop is not lower than all the loops related to reduce block var.
Parameters
----------
block : Union[BlockRV, str]
The reduction block to be decomposed
loop : LoopRV
The loop above which the init block is inserted before.
Returns
-------
init_block : BlockRV
The init block
Examples
--------
Before decompose-reduction, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_decompose(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
for i, j, k in tir.grid(128, 128, 128):
with tir.block([128, 128, tir.reduce_axis(0, 128)], "C") as [vi, vj, vk]:
with tir.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
Create the schedule and do decompose-reduction with specified loop:
.. code-block:: python
sch = tir.Schedule(before_decompose)
C = sch.get_block("C")
i, j, k = sch.get_loops(C)
sch.decompose_reduction(C, i)
print(sch.mod["main"].script())
After applying decompose-reduction, the IR becomes:
.. code-block:: python
@T.prim_func
def after_decompose(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
for i in tir.serial(128):
for j in tir.serial(128):
with tir.block([128, 128]) as [vi, vj]:
C[vi, vj] = 0.0
for i, j, k in tir.grid(128, 128, 128):
with tir.block([128, 128, tir.reduce_axis(0, 128)], "C") as [vi, vj, vk]:
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleDecomposeReduction(self, block, loop) # type: ignore # pylint: disable=no-member
@type_checked
def rfactor(self, loop: LoopRV, factor_axis: int) -> LoopRV:
"""Factorize an associative reduction block by the specified loop.
An associative reduction cannot be parallelized directly,
because it leads to potential race condition during accumulation.
Alternatively, the reduction could be factorized on a loop with the following steps:
- Step 1: evenly slice the reduction into `n` separate chunks, where `n` is the loop extent
- Step 2: compute the chunks separately and write the result into `n` intermediate buffers;
- Step 3: accumulate the `n` separate buffer into the result buffer.
Note that the Step 2 above introduces opportunities for parallelization.
RFactor is a schedule primitive that implements the transformation described above:
Given a block that writes to buffer `B`, it factorizes a loop of extent `n`.
For example, the pseudocode below accumulates `B[i] = sum(A[i, : , : ])`:
.. code-block:: python
for i in range(128): # loop i is a data parallel loop
for j in range(128): # loop j is a reduction loop
for k in range(128): # loop k is a reduction loop
B[i] = B[i] + A[i, j, k]
Suppose RFactor is applied on the innermost loop `k` and `factor_axis = 1`.
RFactor then creates an intermediate buffer and two blocks.
1. The intermediate buffer, or "rf-buffer" is a buffer of rank `ndim(B) + 1` and
size `size(B) * n`, whose shape expands from `shape(B)` by adding an axis of `n`
at the position specified by `factor_axis`. For example,
* shape(B) = [1, 2, 3], factor_axis = 0 => shape(B_rf) = [n, 1, 2, 3]
* shape(B) = [1, 2, 3], factor_axis = 1 => shape(B_rf) = [1, n, 2, 3]
* shape(B) = [1, 2, 3], factor_axis = 2 => shape(B_rf) = [1, 2, n, 3]
* shape(B) = [1, 2, 3], factor_axis = 3 => shape(B_rf) = [1, 2, 3, n]
2. The rfactor block, or "rf-block", is a block that writes to the `rf-buffer` without
accumulating over the loop `k`, i.e. the loop `k` is converted from a reduction loop
to a data parallel loop. In our example, the rf-block is:
.. code-block:: python
B_rf = np.zeros((128, 128)) # the rf-buffer
for k in range(128): # loop k is converted to a data parallel loop
for i in range(128): # loop i is a data parallel loop (unchanged)
for j in range(128): # loop j is a reduction loop (unchanged)
B_rf[i, k] = B_rf[i, k] + A[i, j, k]
3. The write-back block, or `wb-block`, is a block that accumulates the rf-buffer into
the result buffer. All the reduction loops are removed except the loop `k` for accumulation.
In our example, the wb-block is:
.. code-block:: python
for i in range(128): # loop i is a data parallel loop (unchanged)
# loop j is removed because it is a reduction loop
for k in range(128): # loop k is a reduction loop (unchanged)
B[i] = B[i] + B_rf[i, k]
Parameters
----------
loop : LoopRV
The loop outside block for which we want to do rfactor
factor_axis : int
The position where the new dimension is placed in the new introduced rfactor buffer
Returns
-------
rf_block : BlockRV
The block which computes partial results over each slices (i.e., the first block
as described in the above illustration)
Examples
--------
Before rfactor, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128,))
for ii, i, j in T.grid(128, 128, 128):
with T.block("B"):
vii, vi, vj = T.axis.remap("SRR", [ii, i, j])
with T.init():
B[vii] = 0.0
B[vii] = B[vii] + A[vii, vi, vj]
Create the schedule and do rfactor:
.. code-block:: python
sch = tir.Schedule(before_rfactor)
_, _, k = sch.get_loops(sch.get_block("B"))
sch.rfactor(k, 0)
print(sch.mod["main"].script())
After applying rfactor, the IR becomes:
.. code-block:: python
@T.prim_func
def after_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128])
B_rf = T.alloc_buffer([128, 128])
for i2, ii, i in T.grid(128, 128, 128):
with T.block("B_rf"):
vi2, vii, vi = T.axis.remap("SSR", [i2, ii, i])
with T.init():
B_rf[vi2, vii] = 0.0
B_rf[vi2, vii] = (B_rf[vi2, vii] + A[vii, vi, vi2])
for ii, i2 in T.grid(128, 128):
with T.block("B"):
vii, vi2 = T.axis.remap("SR", [ii, i2])
with T.init():
B[vii] = 0.0
B[vii] = B[vii] + B_rf[vi2, vii]
Note
----
Rfactor requires:
1) `loop` has only one child block, and it is a reduction block;
2) `loop` is a reduction loop, i.e. the loop variable is bound to only reduction variables
in the block binding;
3) `loop` is not parallelized, vectorized, unrolled or bound to any thread axis;
4) The block scope that `loop` is in is a staged-pipeline;
5) The outermost loop outside the reduction block should has the reduction block as its
first child block;
6) The outermost reduction loop should have only one child block;
7) An unary extent loop that is not bound to any reduction or data parallel variables in
the block binding should not appear under some reduction loop;
8) The reduction block should write to only one buffer, and its init and body are both
simple `BufferStore`s, and the pattern is registered as an associative reducer.
The pre-defined patterns include: plus, multiplication, min and max;
9) Each of the loops on top of the block cannot be bound to a data parallel and a
reduction block binding at the same time;
10) `factor_axis` should be in range `[-ndim(B) - 1, ndim(B)]`,
where `B` is the buffer that the reduction block writes to.
Negative indexing is normalized according to numpy convention.
"""
return _ffi_api.ScheduleRFactor(self, loop, factor_axis) # type: ignore # pylint: disable=no-member
######## Schedule: Block annotation ########
@type_checked
def storage_align( # pylint: disable=too-many-arguments
self,
block: Union[BlockRV, str],
buffer_index: int,
axis: int,
factor: int,
offset: int,
) -> None:
"""Set alignment requirement for specific dimension such that
stride[axis] == k * factor + offset for some k. This is useful to set memory layout for more
friendly memory access pattern. For example, we can set alignment to be factor=2, offset=1
to avoid bank conflict for thread access on higher dimension in GPU shared memory.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer.
buffer_index : int
The index of the buffer in block's write region.
axis : int
The dimension to be specified for alignment.
factor : int
The factor multiple of alignment.
offset : int
The required offset factor.
Examples
--------
Before storage_align, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_storage_align(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do storage_align:
.. code-block:: python
sch = tir.Schedule(before_storage_align)
sch.storage_align(sch.get_block("B"), buffer_index=0, axis=0, factor=128, offset=1)
print(sch.mod["main"].script())
After applying storage_align, the IR becomes:
.. code-block:: python
@T.prim_func
def after_storage_align(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.block_attr({"buffer_dim_align": [[[0, 128, 1]]]})
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
After lowering passes, buffer B will have strides as [129, 1].
Note
----
Storage_align requires the buffer to be an intermediate buffer defined via `alloc_buffer`.
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleStorageAlign( # type: ignore # pylint: disable=no-member
self, block, buffer_index, axis, factor, offset
)
@type_checked
def set_scope(self, block: Union[BlockRV, str], buffer_index: int, storage_scope: str) -> None:
"""Set the storage scope of a buffer, where the buffer is
specified by the a block and a write-index
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer
buffer_index : int
The index of the buffer in block's write region
storage_scope : str
The storage scope to be set
Examples
--------
Before set_scope, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_set_scope(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do set_scope:
.. code-block:: python
sch = tir.Schedule(before_set_scope)
sch.set_scope(sch.get_block("B"), buffer_index=0, storage_scope="shared")
print(sch.mod["main"].script())
After applying set_scope, the IR becomes:
.. code-block:: python
@T.prim_func
def after_set_scope(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_shared[vi, vj] + T.float32(1)
Note
----
Set_scope requires the buffer to be an intermediate buffer defined via `alloc_buffer`.
"""
block = self._normalize_block_arg(block)
_ffi_api.ScheduleSetScope( # type: ignore # pylint: disable=no-member
self, block, buffer_index, storage_scope
)
########## Schedule: Blockize & Tensorize ##########
@type_checked
def blockize(self, loop: LoopRV) -> BlockRV:
"""Convert the subtree rooted at a specific loop into a block.
Parameters
----------
loop : LoopRV
The root of the subtree.
Returns
-------
result : BlockRV
The new block.
Examples
--------
Before blockize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_blockize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"]
) -> None:
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
Create the schedule and do set_scope:
.. code-block:: python
sch = tir.Schedule(before_blockize)
B = sch.get_block("B")
_, _, i1, _ = sch.get_loops(B)
sch.blockize(i1)
print(sch.mod["main"].script())
After applying blockize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_blockize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"]
)-> None:
for i_0, j_0 in T.grid(8, 8):
with T.block("B_o"):
vio, vjo = T.axis.remap("SS", [i_0, j_0])
T.reads(A[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
T.writes(B[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vio * 16 + vi, vjo * 16 + vj])
T.writes(B[vio * 16 + vi, vjo * 16 + vj])
B[vio * 16 + vi, vjo * 16 + vj] = A[vio * 16 + vi, vjo * 16 + vj] \
* T.float32(2)
Note
----
blockize requires there is exactly one block under the given loop and the bindings of the
block are divisible by the subspace represented by the loops starting at the given loop.
"""
return _ffi_api.ScheduleBlockize(self, loop) # type: ignore # pylint: disable=no-member
@type_checked
def tensorize(self, block_or_loop: Union[BlockRV, LoopRV], tensor_intrin: str) -> None:
"""Tensorize the computation enclosed by loop with the tensor intrinsic.
Parameters
----------
block_or_loop : Union[BlockRV, LoopRV]
The loop to be tensorized.
tensor_intrin : str
The tensor intrin or the name of the tensor intrin.
Examples
--------
Before tensorize, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_tensorize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
# body
# with T.block("root")
for i_0, j_0, k_0, i_1, j_1, k_1 in T.grid(8, 8, 8, 16, 16, 16):
with T.block("update"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
vk = T.axis.reduce(128, k_0 * 16 + k_1)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
Declare and register the tensor intrinsic:
.. code-block:: python
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=128, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=128, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=128, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset // 256,
A.data,
A.elem_offset // 256,
B.data,
B.elem_offset // 256,
C.data,
C.elem_offset // 256,
dtype="handle",
)
)
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
Create the schedule and do tensorize:
.. code-block:: python
sch = tir.Schedule(before_tensorize)
update = sch.get_block("update")
_, _, _, i1, _, _ = sch.get_loops(update)
sch.tensorize(i1, "test_mma_intrin")
print(sch.mod["main"].script())
After applying tensorize, the IR becomes:
.. code-block:: python
@T.prim_func
def after_tensorize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
# body
# with T.block("root")
for i_0, j_0, k_0 in T.grid(8, 8, 8):
with T.block("update_o"):
vio, vjo, vko = T.axis.remap("SSR", [i_0, j_0, k_0])
T.reads(
C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vio * 16 : vio * 16 + 16, vko * 16 : vko * 16 + 16],
B[vjo * 16 : vjo * 16 + 16, vko * 16 : vko * 16 + 16],
)
T.writes(C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(
A[vio * 16 : vio * 16 + 16, vko * 16 : vko * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
B_1 = T.match_buffer(
B[vjo * 16 : vjo * 16 + 16, vko * 16 : vko * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
C_1 = T.match_buffer(
C[vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
[16, 16],
dtype="float32",
offset_factor=1,
)
T.evaluate(
T.tvm_mma_sync(
C_1.data,
C_1.elem_offset // 256,
A_1.data,
A_1.elem_offset // 256,
B_1.data,
B_1.elem_offset // 256,
C_1.data,
C_1.elem_offset // 256,
dtype="handle",
)
)
"""
_ffi_api.ScheduleTensorize( # type: ignore # pylint: disable=no-member
self, block_or_loop, tensor_intrin
)
########## Schedule: Annotation ##########
PrimAnnotationValueT = Union[str, int, float, ExprRV]
AnnotationValueT = Union[
PrimAnnotationValueT,
List[PrimAnnotationValueT],
Dict[str, Union[PrimAnnotationValueT, List[PrimAnnotationValueT]]],
]
@type_checked
def annotate(
self,
block_or_loop: Union[BlockRV, LoopRV],
ann_key: str,
ann_val: AnnotationValueT,
) -> None:
"""Annotate a block/loop with a key value pair
Parameters
----------
block_or_loop: Union[BlockRV, LoopRV]
The block/loop to be annotated
ann_key : str
The annotation key
ann_val : AnnotationValueT
The annotation value
Examples
--------
Before annotate, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_annotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do annotate:
.. code-block:: python
sch = tir.Schedule(before_annotate)
sch.annotate(sch.get_block("B"), "ann_key", "ann_value")
print(sch.mod["main"].script())
After applying annotate, the IR becomes:
.. code-block:: python
@T.prim_func
def after_annotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"ann_key", "ann_value"})
B[vi, vj] = A[vi, vj] * 2.0
"""
if isinstance(ann_val, str):
ann_val = String(ann_val)
elif isinstance(ann_val, int):
ann_val = IntImm("int32", ann_val)
elif isinstance(ann_val, float):
ann_val = FloatImm("float32", ann_val)
_ffi_api.ScheduleAnnotate( # type: ignore # pylint: disable=no-member
self, block_or_loop, ann_key, ann_val
)
@type_checked
def unannotate(self, block_or_loop: Union[BlockRV, LoopRV], ann_key: str) -> None:
"""Unannotate a block/loop's annotation with key ann_key
Parameters
----------
block_or_loop: Union[BlockRV, LoopRV]
The block/loop to be unannotated
ann_key : str
The annotation key
Examples
--------
Before unannotate, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_unannotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"ann_key", "ann_value"})
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do annotate:
.. code-block:: python
sch = tir.Schedule(before_unannotate)
sch.unannotate(sch.get_block("B"), "ann_key")
print(sch.mod["main"].script())
After applying unannotate, the IR becomes:
.. code-block:: python
@T.prim_func
def after_unannotate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
"""
_ffi_api.ScheduleUnannotate( # type: ignore # pylint: disable=no-member
self, block_or_loop, ann_key
)
########## Schedule: Layout transformation ##########
def _normalize_block_arg(self, block: Union[BlockRV, str]) -> BlockRV:
if isinstance(block, str):
return self.get_block(block)
return block
def _normalize_buffer_arg(
self,
block: BlockRV,
buffer: Union[Tuple[str, int], int, str, Buffer],
required_buffer_type=None,
) -> Tuple[str, int, Buffer]:
block_obj: Block = self.get(block)
block_name = block_obj.name_hint
def iter_buffers():
for i, read in enumerate(block_obj.reads):
yield "read", i, read.buffer
for i, write in enumerate(block_obj.writes):
yield "write", i, write.buffer
if isinstance(buffer, int):
buffer = (required_buffer_type, buffer)
if isinstance(buffer, str):
possible_buffers = {}
# String lookup requires ensuring that the name is unique
for buffer_index_type, buffer_index, buf in iter_buffers():
if buf.name == buffer:
possible_buffers[buf] = (buffer_index_type, buffer_index)
assert possible_buffers, f"Could not find buffer '{buffer}' in block '{block_name}'"
assert (
len(possible_buffers) == 1
), f"Multiple buffers named '{buffer}' in block '{block_name}'"
buffer_obj, (buffer_index_type, buffer_index) = next(iter(possible_buffers.items()))
elif isinstance(buffer, Buffer):
# Buffer lookup has unique id, can break out early
found = False
for buffer_index_type, buffer_index, buffer_obj in iter_buffers():
if buffer_obj.same_as(buffer):
found = True
break
assert found, "Could not find buffer '{buffer.name}' in block '{block_name}'"
elif isinstance(buffer, tuple):
buffer_index_type, buffer_index = buffer
assert buffer_index_type in ["read", "write",], (
f"Invalid buffer_index_type. "
f"Expected 'read' or 'write', "
f"but received {buffer_index_type}"
)
buffer_list = block_obj.reads if buffer_index_type == "read" else block_obj.writes
assert 0 <= buffer_index < len(buffer_list), (
f"Invalid buffer_index {buffer_index}. "
f"Block {block_name} has only "
f"{len(buffer_list)} {buffer_index_type} buffers."
)
buffer_obj = buffer_list[buffer_index].buffer
else:
raise TypeError(f"Invalid type for argument 'buffer': {type(buffer)}")
if required_buffer_type is not None:
assert buffer_index_type == required_buffer_type, (
f"Expected buffer to be read buffer, "
f"but {buffer_obj.name} was a {buffer_index_type} buffer "
f"in the specified block"
)
return (buffer_index_type, buffer_index, buffer_obj)
@type_checked
def transform_layout(
self,
block: Union[BlockRV, str],
buffer: Union[Tuple[str, int], str, Buffer],
index_map: Union[IndexMap, Callable],
pad_value: Optional[Union[int, float, PrimExpr, IndexMap, Callable]] = None,
) -> None:
"""Apply a transformation represented by IndexMap to buffer
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
index_map : Union[IndexMap, Callable]
The transformation to apply.
If `index_map` is a callable, and the returned list
contains IndexMap.AXIS_SEPARATOR, the SetAxisSeparators
primitive will be called in addition to the
TransformLayout primitive.
pad_value: Optional[Union[int, float, PrimExpr, IndexMap, Callable]]
The value to be used for any padding introduced by the
transformation. If the schedule contains a producer block
for the specified buffer, the pad value will be written as
part of the producer block if possible, or after the producer
block otherwise. Otherwise, if the buffer is an input, will
insert an annotation block to state that the padding contains
the known value.
The pad value may not contain instances of BufferLoad,
except where it loads a value from the buffer being
transformed (e.g. to create a circular buffer with
padding that consists of repeated elements).
Note: If applied to an input buffer, the calling scope is
responsible for ensuring that the pad_value is present.
Algebraic symplifications, branch elimination, and other
optimizations may assume that this precondition is met, and
may result in incorrect results being returned.
If None, the transformation may not introduce padding.
If an int, float or PrimExpr, the transformation is the
specific value to be present in the padding.
If an IndexMap or Callable, the transformation is the
value to be present in the padding in terms of the
transformed index.
Examples
--------
Before transform_layout, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_transform_layout(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do transform_layout:
.. code-block:: python
sch = tir.Schedule(before_storage_align)
sch.transform_layout(sch.get_block("B"), buffer=("write",0),
index_map=lambda m, n: (m // 16, n // 16, m % 16, n % 16))
print(sch.mod["main"].script())
After applying transform_layout, the IR becomes:
.. code-block:: python
@T.prim_func
def two_elementwise_transformed_intermediate_buffer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((8, 8, 16, 16), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi // 16, vj // 16, vi % 16, vj % 16] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi // 16, vj // 16, vi % 16, vj % 16] + 1.0
"""
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, buffer_obj = self._normalize_buffer_arg(block, buffer)
ndim = len(buffer_obj.shape)
if callable(index_map):
index_map, axis_separators = IndexMap.from_func_with_separators(index_map, ndim=ndim)
else:
axis_separators = []
if pad_value is None:
pass
elif callable(pad_value):
pad_value = IndexMap.from_func(pad_value, ndim=len(index_map.final_indices))
elif not isinstance(pad_value, IndexMap):
# Explicitly convert python int/float arguments to the
# buffer's type. If the default `tvm.runtime.convert`
# behavior is applied, these would be converted to
# int32/float32, which may not match the buffer's type.
if isinstance(pad_value, int):
pad_value = IntImm(buffer_obj.dtype, pad_value)
elif isinstance(pad_value, float):
pad_value = FloatImm(buffer_obj.dtype, pad_value)
pad_value = IndexMap.from_func(
lambda *indices: pad_value, ndim=len(index_map.final_indices)
)
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
_ffi_api.ScheduleTransformLayout( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum, index_map, pad_value
)
if axis_separators:
_ffi_api.ScheduleSetAxisSeparator( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum, axis_separators
)
@type_checked
def transform_block_layout(
self,
block: Union[BlockRV, str],
index_map: Union[IndexMap, Callable],
) -> None:
"""Apply a transformation represented by IndexMap to block
Parameters
----------
block : Union[BlockRV, str]
The block to be transformed
index_map : Union[IndexMap, Callable]
The transformation to apply.
Examples
--------
Before transform_block_layout, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_transform_block_layout(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"]
) -> None:
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
Create the schedule and do transform_block_layout:
.. code-block:: python
sch = tir.Schedule(before_transform_block_layout)
sch.transform_block_layout(sch.get_block("B"), lambda i, j: (i * 16 + j,))
print(sch.mod["main"].script())
After applying transform_block_layout, the IR becomes:
.. code-block:: python
@T.prim_func
def after_transform_block_layout(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"]
) -> None:
for i in range(256):
with T.block("B"):
vi, = T.axis.remap("S", [i])
B[vi // 16, vi % 16] = A[vi // 16, vi % 16] * 2.0
"""
block = self._normalize_block_arg(block)
if callable(index_map):
index_map = IndexMap.from_func(index_map)
_ffi_api.ScheduleTransformBlockLayout( # type: ignore # pylint: disable=no-member
self, block, index_map
)
@type_checked
def set_axis_separator(
self,
block: Union[BlockRV, str],
buffer: Union[Tuple[str, int], str, Buffer],
axis_separators: Optional[List[int]],
) -> None:
"""Set the axis separator of a buffer, where the buffer is specified by a block and a read
or write index.
Parameters
----------
block : Union[BlockRV, str]
The block that accesses the target buffer. If a string,
this must uniquely identify a block.
buffer: Union[Tuple[str,int], Buffer, str]
The buffer to be transformed, or a specification of how to
identify the buffer to be transformed.
If `buffer` if a tuple of ``(str,int)``, the first item
should be either "read" or "write", and the second item is
an index into the block's read or write regions.
If `buffer` is a string, it is the name of the buffer,
which must exist within the reads/writes of the block. In
addition, the reads/writes of the block may not contain
more than one buffer with this name.
If `buffer` is a Buffer object, it must exist within the
reads/writes of the block.
axis_separators : Optional[List[int]]
The axis separators.
Examples
--------
Before set_axis_separator, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_set_axis_separator(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
Create the schedule and do set_axis_separator:
.. code-block:: python
sch = tir.Schedule(before_set_axis_separator)
sch.set_axis_separators(sch.get_block("B"), buffer_index=0, buffer_index_type="write",
axis_separators=[1])
print(sch.mod["main"].script())
After applying set_axis_separator, the IR becomes:
.. code-block:: python
@T.prim_func
def after_set_axis_separators(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
"""
axis_separators = axis_separators or []
block = self._normalize_block_arg(block)
buffer_index_type, buffer_index, _ = self._normalize_buffer_arg(block, buffer)
buffer_index_type_enum = 0 if buffer_index_type == "read" else 1
_ffi_api.ScheduleSetAxisSeparator( # type: ignore # pylint: disable=no-member
self, block, buffer_index, buffer_index_type_enum, axis_separators
)
########## Schedule: Padding decomposition #########
@type_checked
def decompose_padding(self, block: Union[BlockRV, str], loop: LoopRV) -> BlockRV:
"""Decompose a block of padding computation pattern into two separate blocks.
a) The block which fill const pad values into full write region;
b) The block which fill in-bound values into region where pad predicate is true.
The pad value filling block is inserted right before the given loop.
The schedule primitive requires:
1) The input block is a complete block.
2) The input loop is the ancestor of the block.
3) The input block is a block which match padding pattern.
Parameters
----------
block : Union[BlockRV, str]
The padding block to be decomposed.
loop : LoopRV
The loop above which the pad value filling block is inserted before.
Returns
-------
pad_value_block : BlockRV
The block filling const pad values.
Examples
--------
Before decompose-padding, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in range(140):
with T.block("block"):
vi = T.axis.remap("S", [i])
y[vi] = T.if_then_else(vi >= 6 and vi < 134, x[vi - 6], 0, dtype="int32")
Create the schedule and do decompose-padding with specified loop:
.. code-block:: python
sch = tir.Schedule(before_decompose, debug_mask="all")
block = sch.get_block("block")
sch.decompose_padding(block, sch.get_loops(block)[0])
print(sch.mod["main].script())
After applying decompose-padding, the IR becomes:
.. code-block:: python
@T.prim_func
def after_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in T.serial(140):
with T.block("block_pad_const"):
vi = T.axis.spatial(140, i)
y[vi] = 0
for i in T.serial(128):
with T.block("block"):
vi = T.axis.spatial(128, i)
y[vi + 6] = x[vi]
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleDecomposePadding( # type: ignore # pylint: disable=no-member
self, block, loop
)
@type_checked
def can_decompose_padding(self, block: Union[BlockRV, str], loop: LoopRV) -> bool:
"""Check whether the block match padding pattern and can be decomposed."""
return _ffi_api.CanDecomposePadding(self, block, loop) # type: ignore # pylint: disable=no-member
@type_checked
def pad_einsum(self, block: Union[BlockRV, str], padding: List[int]) -> None:
"""Pad the computation of Einsum.
This schedule primitives identifies the Einsum pattern in the block body, and find its
producer blocks. It then pads the computation of the Einsum pattern and its producer blocks.
The output buffer and the producer buffer is resized according to the padding size. It
requires the output buffer and the producer buffer to be allocated inside the PrimFunc.
The padding is a list of non-negative integers, each element corresponds to the padding for
each block iter in the order of block iters. The block and it's producer blocks should have
trivial bindings, i.e. each block iter is bound to a single loop variable. After padding,
thblock iter extent and the corresponding outer loop is extended by the padding size.
The size of the producer buffers are infered from the padding size of the Einsum
computation. The producer buffers are padded by the initial value of the corresponding
reduction.
Parameters
----------
block : Union[BlockRV, str]
The block that matches the Einsum pattern.
padding : List[int]
The padding for each block iter.
Examples
--------
Before applying pad-einsum, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_pad_einsum(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
B_shared = T.alloc_buffer((127, 127), "float32", scope="shared")
C_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
for i0, i1 in T.grid(128, 127):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
A_shared[i, j] = A[i, j]
for i0, i1 in T.grid(127, 127):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
B_shared[i, j] = B[i, j]
for i0, i1, i2 in T.grid(128, 127, 127):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C_shared[i, j] = T.float32(0)
C_shared[i, j] = C_shared[i, j] + A_shared[i, k] * B_shared[k, j]
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
C[i, j] = C_shared[i, j]
Create the schedule and do pad-einsum with specified block:
.. code-block:: python
sch = tir.Schedule(before_pad_einsum, debug_mask="all")
block = sch.get_block("C_shared")
sch.pad_einsum(block, [0, 1, 1])
print(sch.mod["main"].script())
After applying decompose-padding, the IR becomes:
.. code-block:: python
@T.prim_func
def after_pad_einsum(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
B_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i0, i1 in T.grid(128, 128):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(A[i, j])
T.writes(A_shared_padded[i, j])
A_shared_padded[i, j] = T.if_then_else(
j < 127, A[i, j], T.float32(0), dtype="float32"
)
for i0, i1 in T.grid(128, 128):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(B[i, j])
T.writes(B_shared_padded[i, j])
B_shared_padded[i, j] = T.if_then_else(
i < 127 and j < 127, B[i, j], T.float32(0), dtype="float32"
)
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A_shared_padded[i, k], B_shared_padded[k, j])
T.writes(C_shared_padded[i, j])
with T.init():
C_shared_padded[i, j] = T.float32(0)
C_shared_padded[i, j] = (
C_shared_padded[i, j] + A_shared_padded[i, k] * B_shared_padded[k, j]
)
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(C_shared_padded[i, j])
T.writes(C[i, j])
C[i, j] = C_shared_padded[i, j]
"""
block = self._normalize_block_arg(block)
return _ffi_api.SchedulePadEinsum( # type: ignore # pylint: disable=no-member
self, block, padding
)
######## Schedule: Buffer transformation ########
@type_checked
def rolling_buffer(
self,
block: Union[BlockRV, str],
write_buffer_index: int,
) -> None:
"""Compute the target buffer via rolling buffering, select the outermost rollable
axis with a positive bound overlap that appears in the block's ancestor loops
as `rolling axis`, fold and circularize the buffer along the rolling dimension,
append block predicate to avoid recomputing overlapping elements. It requires:
1) The block is not an output block and has only RAW dependencies.
2) The buffer to be an intermediate buffer defined via `alloc_buffer`.
3) The LCA of the producer and consumer of the buffer is a for loop, typically,
the producer and consumer of the buffer are cascaded through compute_at.
4) The access region of the buffer has at least one dimension that contains
a positive bound overlap.
Parameters
----------
block : Union[BlockRV, str]
The producer block of the buffer.
write_buffer_index : int
The index of the buffer in block's write region.
Examples
--------
Before rolling_buffer, in TensorIR, the IR is:
.. code-block:: python
@T.prim_func
def before_rolling_buffer(
A: T.Buffer[(12, 12), "int8"], C: T.Buffer[(8, 8), "int8"]
) -> None:
# body
# with T.block("root")
B = T.alloc_buffer([10, 10], dtype="int8")
for i0, i1 in T.grid(2, 2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 3, 3):
with T.block("B"):
ax0_1 = T.axis.spatial(10, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(10, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
B[ax0_1, ax1_1] = T.max(
B[ax0_1, ax1_1], A[ax0_1 + rv0, ax1_1 + rv1]
)
for ax0, ax1, ax2, ax3 in T.grid(4, 4, 3, 3):
with T.block("C"):
ax0_1 = T.axis.spatial(8, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(8, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
C[ax0_1, ax1_1] = T.max(
C[ax0_1, ax1_1], B[ax0_1 + rv0, ax1_1 + rv1]
)
Create the schedule and do rolling_buffer:
.. code-block:: python
sch = tir.Schedule(before_rolling_buffer)
sch.rolling_buffer(sch.get_block("B"), write_buffer_index=0)
print(sch.mod["main"].script())
After applying rolling_buffer, the IR becomes:
.. code-block:: python
@T.prim_func
def after_rolling_buffer(
A: T.Buffer[(12, 12), "int8"],
C: T.Buffer[(8, 8), "int8"]
) -> None:
# body
# with T.block("root")
B = T.alloc_buffer([6, 10], dtype="int8")
for i0, i1 in T.grid(2, 2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 3, 3):
with T.block("B"):
T.where((i0 < 1 or 2 <= ax0) and (i1 < 1 or 2 <= ax1))
ax0_1 = T.axis.spatial(10, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(10, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
B[ax0_1 % 6, ax1_1] = T.max(
B[ax0_1 % 6, ax1_1], A[ax0_1 + rv0, ax1_1 + rv1]
)
for ax0, ax1, ax2, ax3 in T.grid(4, 4, 3, 3):
with T.block("C"):
ax0_1 = T.axis.spatial(8, i0 * 4 + ax0)
ax1_1 = T.axis.spatial(8, i1 * 4 + ax1)
rv0, rv1 = T.axis.remap("RR", [ax2, ax3])
C[ax0_1, ax1_1] = T.max(
C[ax0_1, ax1_1], B[ax0_1 % 6 + rv0, ax1_1 + rv1]
)
Note
----
The region_cover property of the consumer block of the target buffer will become false.
"""
block = self._normalize_block_arg(block)
return _ffi_api.ScheduleRollingBuffer(self, block, write_buffer_index) # type: ignore # pylint: disable=no-member
########## Schedule: Misc ##########
@type_checked
def enter_postproc(self) -> None:
"""A no-op that marks the start of postprocessing phase of scheduling"""
_ffi_api.ScheduleEnterPostproc(self) # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This file defines ScheduleState, the core data structure of TensorIR scheduling."""
from collections import namedtuple
from enum import IntEnum
from typing import Dict, Optional, Union
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.tir import Block, BlockRealize, For, PrimFunc
from . import _ffi_api
from .block_scope import BlockScope, StmtSRef
CachedFlags = namedtuple("CachedFlags", ["affine_binding", "region_cover", "stage_pipeline"])
class ScheduleDebugMask(IntEnum):
"""The bitmask of the `debug_mask` flag in the ScheduleState class.
If the `debug_mask` flag has a certain bit on, then the correpsonding
verification pass will be conducted. For example, if `(debug_mask & VERIFY_SREF_TREE) != 0`,
then the correctness of the sref tree will be verified after each schedule instruction.
Attributes
----------
VERIFY_SREF_TREE : int = 1
Verify the correctness of the sref tree
VERIFY_CACHED_FLAGS : int = 2
Verify the correctness of affine_binding, region_cover and stage_pipeline
"""
VERIFY_SREF_TREE = 1
VERIFY_CACHED_FLAGS = 2
def _parse_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
if isinstance(mod, PrimFunc):
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
return mod
def _parse_debug_mask(debug_mask: Union[str, int]) -> int:
if isinstance(debug_mask, str):
if debug_mask == "all":
debug_mask = ScheduleDebugMask.VERIFY_SREF_TREE | ScheduleDebugMask.VERIFY_CACHED_FLAGS
elif debug_mask == "none":
debug_mask = 0
else:
raise ValueError(f"Unrecognizable `debug_mask`: {debug_mask}")
if not isinstance(debug_mask, bool) and not isinstance(debug_mask, int):
raise TypeError(f"`debug_mask` should be integer or boolean, but gets: {debug_mask}")
return debug_mask
@register_object("tir.ScheduleState")
class ScheduleState(Object):
"""The state of scheduling, which exposes a `Replace` method as
the primary resort for all the scheduling primitives to manipulate the TensorIR.
The data structure contains the following information
1) The AST being scheduled (mod)
2) The sref tree of schedulable statements (indicated by the srefs)
3) The dependency information of each block scope (block_info)
4) A reverse mapping from the AST nodes to that in the sref tree (get_sref)
5) A debug flag, if set, extra checking is enabled (debug_mask)
Parameters
----------
mod : IRModule
The AST of the module being scheduled
debug_mask : int
Do extra correctness checking after the object construction
and each time after calling the Replace method.
"""
mod: IRModule
debug_mask: int
def __init__(
self,
mod: Union[PrimFunc, IRModule],
*,
debug_mask: Union[str, int] = "none",
) -> None:
"""Construct a schedule state from an IRModule or a PrimFunc
Parameters
----------
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to be scheduled
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
"""
self.__init_handle_by_constructor__(
_ffi_api.ScheduleState, # type: ignore # pylint: disable=no-member
_parse_mod(mod),
_parse_debug_mask(debug_mask),
)
def get_sref(self, stmt: Union[Block, For]) -> Optional[StmtSRef]:
"""Return the corresponding sref that points to the stmt
Parameters
----------
stmt : Union[Block, For]
The schedulable statement in the TensorIR to be retrieved for its sref
Returns
-------
sref : StmtSRef
The corresponding sref
"""
return _ffi_api.ScheduleStateGetSRef(self, stmt) # type: ignore # pylint: disable=no-member
def get_block_scope(self, block_sref: StmtSRef) -> BlockScope:
"""Get the BlockScope correpsonding to the block sref
Parameters
----------
block_sref : StmtSRef
The block sref to be retrieved
Returns
-------
sref : StmtSRef
The corresponding sref
"""
return _ffi_api.ScheduleStateGetBlockScope( # type: ignore # pylint: disable=no-member
self, block_sref
)
def _get_cached_flags(self, block_sref: StmtSRef) -> CachedFlags:
"""Get the cached flags of the corresponding block
Parameters
----------
block_sref : StmtSRef
The block sref to be retrieved
Returns
-------
flags : CachedFlags
Three flags: affine_binding, region_cover, stage_pipeline
Note
----
It is an API intended for internal testing use.
"""
(
affine_binding,
region_cover,
stage_pipeline,
) = _ffi_api.ScheduleStateGetCachedFlags( # type: ignore # pylint: disable=no-member
self, block_sref
)
return CachedFlags(
affine_binding=bool(affine_binding.value),
region_cover=bool(region_cover.value),
stage_pipeline=bool(stage_pipeline.value),
)
def replace(
self,
src_sref: StmtSRef,
tgt_stmt: Union[Block, For, BlockRealize],
block_sref_reuse: Optional[Dict[Block, Block]] = None,
) -> None:
"""
Replace the part of the AST, as being pointed to by `src_sref`,
with a specific statement `tgt_stmt`, and maintain the sref tree accordingly.
Replace will try to perform copy on write as much as possible when the ScheduleState holds
the only copy to the IRModule and IR nodes.
Only 3 types of replacements are allowed: from `src_sref->stmt` to `tgt_stmt`.
1) Block -> Block
2) Loop -> Loop
3) Loop -> BlockRealize
Parameters
----------
src_sref : StmtSRef
The sref to the statement to be replaced in the TensorIR AST
tgt_stmt : Union[Block, For, BlockRealize]
The statement to be replaced to
block_sref_reuse : Optional[Dict[Block, Block]] = None
Maps an old block (to be replaced in the subtree under `src_sref->stmt`)
to a new block (replaced to, in the subtree under `tgt_stmt`), and enforces
reuse of srefs between them (rather than create new srefs) i.e. after being replaced,
the sref that points to the old block will point to the new one
Note
----
The reuse of loop srefs are detected automatically according to the reuse of loop vars.
"""
if block_sref_reuse is None:
block_sref_reuse = {}
_ffi_api.ScheduleStateReplace( # type: ignore # pylint: disable=no-member
self,
src_sref,
tgt_stmt,
block_sref_reuse,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/testing.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities for the TensorIR schedule API"""
from typing import Sequence, Union
import tvm
from tvm.ir import IRModule, assert_structural_equal
from tvm.tir import PrimFunc
from tvm.tir.schedule import Schedule, Trace
def verify_trace_roundtrip(
sch: Schedule,
mod: Union[PrimFunc, IRModule],
*,
debug_mask: Union[str, int] = "all",
text_format: Union[str, Sequence[str]] = ["python", "json"],
) -> Schedule:
"""Serialize a traced schedule to JSON, then replay the JSON trace by applying to
a fresh new schedule, verifying the reproducibility of scheduling.
Parameters
----------
sch : tir.Schedule
The traced TensorIR schedule to be verified
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to construct the fresh new schedule
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
text_format: Union[str, Sequence[str]]
The text format or formats whose round-trip behavior should be
validated. If a single string, validate round-trips through
"""
if not isinstance(text_format, str):
for opt in text_format:
new_sch = verify_trace_roundtrip(sch, mod, debug_mask=debug_mask, text_format=opt)
return new_sch
trace = sch.trace
assert trace is not None
# Step 1. Perform a round-trip through the text-format
new_sch = Schedule(mod=mod, debug_mask=debug_mask)
if text_format == "json":
json_obj = trace.as_json()
Trace.apply_json_to_schedule(json_obj=json_obj, sch=new_sch)
elif text_format == "python":
py_trace = "\n".join(trace.as_python())
exec(py_trace, tvm.tir.__dict__, {"sch": new_sch}) # pylint: disable=exec-used
else:
assert text_format in ("json", "python"), f"Unknown text format: {text_format}"
# Step 2. Verify that the round-trip produced the same scheduling
assert_structural_equal(new_sch.mod, sch.mod)
# Step 3. Check the consistency of the text format between the old and new traces
py_repr = "\n".join(trace.as_python())
new_py_repr = "\n".join(new_sch.trace.as_python())
assert py_repr == new_py_repr
# Step 4. Return the new schedule in case it could be useful
return new_sch
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/trace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An execution trace of a scheduling program"""
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from tvm._ffi import register_object as _register_object
from tvm.runtime import Object
from ...ir import Array, Map
from ...runtime import String
from ..expr import FloatImm, IntImm
from . import _ffi_api
from .instruction import ATTR_TYPE, INPUT_RV_TYPE, Instruction
if TYPE_CHECKING:
from .schedule import Schedule
DECISION_TYPE = Any
JSON_TYPE = Any
def _json_from_tvm(obj):
if obj is None:
return None
if isinstance(obj, Array):
return [_json_from_tvm(i) for i in obj]
if isinstance(obj, Map):
return {_json_from_tvm(k): _json_from_tvm(v) for k, v in obj.items()}
if isinstance(obj, String):
return str(obj)
if isinstance(obj, (IntImm, FloatImm)):
return obj.value
raise TypeError("Not supported type: " + str(type(obj)))
@_register_object("tir.Trace")
class Trace(Object):
"""An execution trace of a scheduling program.
A trace has two parts:
1) The instructions invoked so far
2) The random decisions made upon those instructions, if any
A trace can be serialized to:
1) Roundtrippable JSON format: can be saved to file and loaded back
2) Python syntax: allows users to copy-paste the trace to reproduce the scheduling process
A trace can be applied to a TensorIR schedule by re-applying all its instructions possibly with
their decisions accordingly. Re-sampling is invoked if a sampling instruction doesn't have its
corresponding decision; Otherwise the existing decision will be reused accordingly.
Attributes
----------
insts : List[Instruction]
The instructions invoked so far in the program execution
decisions : Dict[Instruction, DECISION_TYPE]
The random decisions made upon those instructions
"""
insts: List[Instruction]
decisions: Dict[Instruction, DECISION_TYPE]
def __init__(
self,
insts: List[Instruction],
decisions: Dict[Instruction, DECISION_TYPE],
) -> None:
"""Constructor
Parameters
----------
insts : List[Instruction]
The instructions invoked so far in the program execution
decisions : Dict[Instruction, DECISION_TYPE]
The random decisions made upon those instructions
"""
self.__init_handle_by_constructor__(
_ffi_api.Trace, # type: ignore # pylint: disable=no-member
insts,
decisions,
)
def get_decision(self, inst: Instruction) -> Optional[DECISION_TYPE]:
"""Retrieve the decision made on a specific instruction
Parameters
----------
insts : Instruction
The instruction whose decision is to be retrieved
Returns
-------
decision : Optional[DECISION_TYPE]
The corresponding decision; None if there is no decision made on the instruction
"""
return _ffi_api.TraceGetDecision(self, inst) # type: ignore # pylint: disable=no-member
def append(
self,
inst: Instruction,
decision: Optional[DECISION_TYPE] = None,
) -> None:
"""Append a new instruction to the trace
Parameters
----------
insts : Instruction
The new instruction to be appended
decision : Optional[DECISION_TYPE] = None
The random decision made on this instruction
"""
_ffi_api.TraceAppend(self, inst, decision) # type: ignore # pylint: disable=no-member
def pop(self) -> Optional[Instruction]:
"""Remove the last instruction, along with the decision made on that instruction, if any
Returns
-------
popped_inst : Instruction
Returns the instruction removed; NullOpt if the trace is empty
"""
return _ffi_api.TracePop(self) # type: ignore # pylint: disable=no-member
def apply_to_schedule(
self,
sch: "Schedule",
remove_postproc: bool,
decision_provider: Optional[
Callable[
[Instruction, List[INPUT_RV_TYPE], List[ATTR_TYPE], DECISION_TYPE], DECISION_TYPE
]
] = None,
) -> None:
"""Apply the trace to a TensorIR schedule
Parameters
----------
sch : Schedule
The schedule to be applied onto
remove_postproc : bool
If postprocessing instructions are removed
decision_provider: Optional[Callable] = None
A callback that allows users to mutate decisions on the fly when applying instructions.
The signature of the callback is:
- The 1st argument: The instruction
- The 2nd argument: The input random variables
- The 3rd argument: The attributes
- The 4th argument: The decision
- Return: A new decision
"""
_ffi_api.TraceApplyToSchedule( # type: ignore # pylint: disable=no-member
self,
sch,
remove_postproc,
decision_provider,
)
def as_json(self, remove_postproc: bool = False) -> JSON_TYPE:
"""Serialize the trace as a JSON-style object
Parameters
----------
remove_postproc : bool = False
If postprocessing instructions are removed
Returns
-------
json: JSON_TYPE
The JSON-style object
"""
obj = _ffi_api.TraceAsJSON(self, remove_postproc) # type: ignore # pylint: disable=no-member
return _json_from_tvm(obj)
def as_python(self, remove_postproc: bool = False) -> List[str]:
"""Serialize the trace as a sequence of python statements
Parameters
----------
remove_postproc : bool = False
If postprocessing instructions are removed
Returns
-------
py_stmts: List[str]
A sequence of python statements
"""
return _ffi_api.TraceAsPython(self, remove_postproc) # type: ignore # pylint: disable=no-member
def with_decision(
self,
inst: Instruction,
decision: DECISION_TYPE,
remove_postproc: bool,
) -> "Trace":
"""Create a new trace with an instruction whose decision is changed,
assuming this instruction exists in the resulting trace
Parameters
----------
inst : Instruction
The instruction whose decision is to be changed
decision : DECISION_TYPE
The decision to be changed to
remove_postproc : bool
If postprocessing instructions are removed
Returns
-------
trace: Trace
The new trace with the decision changed
"""
return _ffi_api.TraceWithDecision( # type: ignore # pylint: disable=no-member
self,
inst,
decision,
remove_postproc,
)
def simplified(self, remove_postproc: bool) -> "Trace":
"""Simplify the trace with dead-code elimination
Parameters
----------
remove_postproc : bool
If postprocessing instructions are removed
Returns
-------
trace: Trace
A simplified trace
"""
return _ffi_api.TraceSimplified(self, remove_postproc) # type: ignore # pylint: disable=no-member
@staticmethod
def apply_json_to_schedule(json_obj: JSON_TYPE, sch: "Schedule") -> None:
"""Apply a JSON-serialized trace to a TensorIR schedule
Parameters
----------
json_obj : JSON_TYPE
The JSON-serialized trace
sch : Schedule
The TensorIR schedule
"""
_ffi_api.TraceApplyJSONToSchedule(json_obj, sch) # type: ignore # pylint: disable=no-member
def show(self, style: Optional[str] = None) -> None:
"""A sugar for print highlighted trace.
Parameters
----------
style : str, optional
Pygments styles extended by "light" (default) and "dark", by default "light"
"""
from tvm.script.highlight import ( # pylint: disable=import-outside-toplevel
cprint,
)
cprint(str(self), style=style)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/schedule/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Transformation on TIR schedule."""
from typing import Optional
from tvm.tir.schedule import Schedule, BlockRV, LoopRV
from . import _ffi_api
def tile_with_tensor_intrin(
sch: Schedule, block: BlockRV, intrin_name: str, allow_padding: bool = False
) -> Optional[LoopRV]:
"""Tile a subset of loops in the block according to the given tensor intrinsic.
Parameters
----------
sch : Schedule
The schedule to which tiling is applied
block : BlockRV
The block whose subset of loops will be tiled
intrin_name : str
The name of a tensor intrinsic, must be registerd via TensorIntrin.register(...) beforehand
allow_padding : bool
Whether to allow padding when tiling
Returns
-------
tiled_loop_rv : Optional[LoopRV]
LoopRV corresponding to the outermost loop of a block tiled according to the given intrin
NullOpt if no valid loop mapping is found
"""
return _ffi_api.TileWithTensorIntrin(sch, block, intrin_name, allow_padding) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/stmt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Statement AST Node in TVM.
Each statement node have subfields that can be visited from python side.
.. code-block:: python
x = tvm.tir.Var("n", "int32")
a = tvm.tir.Var("array", "handle")
st = tvm.tir.stmt.Store(a, x + 1, 1)
assert isinstance(st, tvm.tir.stmt.Store)
assert(st.buffer_var == a)
"""
from enum import IntEnum
from typing import List, Mapping, Optional, Union
import tvm._ffi
from tvm.ir import PrimExpr, Range, Span
from tvm.runtime import Object, const
from . import _ffi_api
from .buffer import Buffer
from .expr import IterVar
class Stmt(Object):
"""Base class of all the statements."""
@tvm._ffi.register_object("tir.LetStmt")
class LetStmt(Stmt):
"""LetStmt node.
Parameters
----------
var : Var
The variable in the binding.
value : PrimExpr
The value in to be binded.
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, var, value, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.LetStmt, var, value, body, span # type: ignore
)
@tvm._ffi.register_object("tir.AssertStmt")
class AssertStmt(Stmt):
"""AssertStmt node.
Parameters
----------
condition : PrimExpr
The assert condition.
message : PrimExpr
The error message.
body : tvm.tir.Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, message, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AssertStmt, condition, message, body, span # type: ignore
)
class ForKind(IntEnum):
"""The kind of the for loop.
note
----
ForKind can change the control flow semantics
of the loop and need to be considered in all TIR passes.
"""
SERIAL = 0
PARALLEL = 1
VECTORIZED = 2
UNROLLED = 3
THREAD_BINDING = 4
@tvm._ffi.register_object("tir.For")
class For(Stmt):
"""For node.
Parameters
----------
loop_var : Var
The loop variable.
min_val : PrimExpr
The beginning value.
extent : PrimExpr
The length of the loop.
kind : ForKind
The type of the for.
body : Stmt
The body statement.
thread_binding: Optional[tir.IterVar]
The thread this loop binds to. Only valid
if kind is ThreadBinding
annotations: tvm.ir.Map
Additional annotation hints.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(
self,
loop_var,
min_val,
extent,
kind,
body,
thread_binding=None,
annotations=None,
span=None,
):
self.__init_handle_by_constructor__(
_ffi_api.For, # type: ignore
loop_var,
min_val,
extent,
kind,
body,
thread_binding,
annotations,
span,
)
@tvm._ffi.register_object("tir.While")
class While(Stmt):
"""While node.
Parameters
----------
condition : PrimExpr
The termination condition.
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.While, # type: ignore
condition,
body,
span,
)
@tvm._ffi.register_object("tir.Store")
class Store(Stmt):
"""Store node.
Parameters
----------
buffer_var : Var
The buffer Variable.
value : PrimExpr
The value we want to store.
index : PrimExpr
The index in the store expression.
predicate : PrimExpr
The store predicate.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer_var, value, index, predicate=None, span=None):
if predicate is None:
predicate = _ffi_api.const_true(value.dtype, span) # type: ignore
self.__init_handle_by_constructor__(
_ffi_api.Store, buffer_var, value, index, predicate, span # type: ignore
)
@tvm._ffi.register_object("tir.BufferStore")
class BufferStore(Stmt):
"""Buffer store node.
Parameters
----------
buffer : Buffer
The buffer.
value : PrimExpr
The value we to be stored.
indices : List[PrimExpr]
The indices location to be stored.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, value, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferStore, buffer, value, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.BufferRealize")
class BufferRealize(Stmt):
"""Buffer realize node.
Parameters
----------
buffer : Buffer
The buffer.
bounds : List[Range]
The value we to be stored.
condition : PrimExpr
The realize condition.
body : Stmt
The body of the statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, bounds, condition, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.BufferRealize, buffer, bounds, condition, body, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerStore")
class ProducerStore(Stmt):
"""ProducerStore node.
Parameters
----------
producer : DataProducer
The data producer.
value : PrimExpr
The value to be stored.
indices : list of Expr
The index arguments of the store.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, value, indices, span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerStore, producer, value, indices, span # type: ignore
)
@tvm._ffi.register_object("tir.Allocate")
class Allocate(Stmt):
"""Allocate node.
Parameters
----------
buffer_var : Var
The buffer variable.
dtype : str
The data type of the buffer.
extents : list of Expr
The extents of the allocate
condition : PrimExpr
The condition.
body : Stmt
The body statement.
annotations: Optional[Mapping[str, Object]]
Additional annotation hints
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer_var, dtype, extents, condition, body, annotations=None, span=None):
if annotations is None:
annotations = dict()
self.__init_handle_by_constructor__(
_ffi_api.Allocate, # type: ignore
buffer_var,
dtype,
extents,
condition,
body,
annotations,
span,
)
@tvm._ffi.register_object("tir.AllocateConst")
class AllocateConst(Stmt):
"""Allocate constant node.
Parameters
----------
buffer_var : Var
The buffer variable.
dtype : str
The data type of the buffer.
extents : list of Expr
The extents of the allocate
data_or_idx : Union[NDArray, int]
If an NDArray, this is the const data associated with the
constant. If an integer, this is the index into the
"constants" attribute of the `IRModule` that contains the
`AllocateConst`.
body : Stmt
The body statement.
annotations : Optional[Map]
Additional annotations about the allocation.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer_var, dtype, extents, data_or_idx, body, annotations=None, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AllocateConst, buffer_var, dtype, extents, data_or_idx, body, annotations, span
)
@tvm._ffi.register_object("tir.DeclBuffer")
class DeclBuffer(Stmt):
"""DeclBuffer node.
Parameters
----------
buffer: Buffer
The buffer being declared.
body: Stmt
The body statement to be executed.
span: Optional[Span]
The location of this DeclBuffer in the source code.
"""
def __init__(self, buffer, body, span=None):
self.__init_handle_by_constructor__(_ffi_api.DeclBuffer, buffer, body, span)
@tvm._ffi.register_object("tir.AttrStmt")
class AttrStmt(Stmt):
"""AttrStmt node.
Parameters
----------
node : Node
The node to annotate the attribute
attr_key : str
Attribute type key.
value : PrimExpr
The value of the attribute
body : Stmt
The body statement.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, node, attr_key, value, body, span=None):
self.__init_handle_by_constructor__(
_ffi_api.AttrStmt, node, attr_key, value, body, span # type: ignore
)
@tvm._ffi.register_object("tir.ProducerRealize")
class ProducerRealize(Stmt):
"""ProducerRealize node.
Parameters
----------
producer : DataProducer
The data producer.
bounds : list of range
The bound of realize
condition : PrimExpr
The realize condition.
body : Stmt
The realize body
storage_scope : str
The storage scope associated with this realization
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, producer, bounds, condition, body, storage_scope="", span=None):
self.__init_handle_by_constructor__(
_ffi_api.ProducerRealize,
producer,
bounds,
condition,
body,
storage_scope,
span, # type: ignore
)
@tvm._ffi.register_object("tir.SeqStmt")
class SeqStmt(Stmt):
"""Sequence of statements.
Parameters
----------
seq : List[Stmt]
The statements
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, seq, span=None):
self.__init_handle_by_constructor__(_ffi_api.SeqStmt, seq, span) # type: ignore
def __getitem__(self, i):
return self.seq[i]
def __len__(self):
return len(self.seq)
@tvm._ffi.register_object("tir.IfThenElse")
class IfThenElse(Stmt):
"""IfThenElse node.
Parameters
----------
condition : PrimExpr
The expression
then_case : Stmt
The statement to execute if condition is true.
else_case : Stmt
The statement to execute if condition is false.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, condition, then_case, else_case, span=None):
self.__init_handle_by_constructor__(
_ffi_api.IfThenElse, condition, then_case, else_case, span # type: ignore
)
@tvm._ffi.register_object("tir.Evaluate")
class Evaluate(Stmt):
"""Evaluate node.
Parameters
----------
value : PrimExpr
The expression to be evalued.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, value, span=None):
self.__init_handle_by_constructor__(_ffi_api.Evaluate, value, span) # type: ignore
@tvm._ffi.register_object("tir.Prefetch")
class Prefetch(Stmt):
"""Prefetch node.
Parameters
----------
buffer : Buffer
The buffer to be prefetched.
bounds : list of Range
The bounds to be prefetched.
span : Optional[Span]
The location of this itervar in the source code.
"""
def __init__(self, buffer, bounds, span=None):
self.__init_handle_by_constructor__(_ffi_api.Prefetch, buffer, bounds, span) # type: ignore
@tvm._ffi.register_object("tir.BufferRegion")
class BufferRegion(Object):
"""BufferRegion node.
Parameters
----------
buffer : Buffer
The buffer of the buffer region
region : List[Range]
The region array of the buffer region
"""
buffer: Buffer
region: List[Range]
def __init__(self, buffer: Buffer, region: List[Range]):
self.__init_handle_by_constructor__(_ffi_api.BufferRegion, buffer, region) # type: ignore
@tvm._ffi.register_object("tir.MatchBufferRegion")
class MatchBufferRegion(Object):
"""MatchBufferRegion node.
Parameters
----------
buffer : Buffer
The target buffer
source : BufferRegion
The region of source buffer
"""
buffer: Buffer
source: BufferRegion
def __init__(self, buffer: Buffer, source: BufferRegion):
self.__init_handle_by_constructor__(
_ffi_api.MatchBufferRegion, buffer, source # type: ignore
)
@tvm._ffi.register_object("tir.Block")
class Block(Stmt):
"""Block node.
Parameters
----------
iter_vars : List[IterVar]
The block Variable.
reads : List[BufferRegion]
The read buffer regions of the block.
writes: List[BufferRegion]
The write buffer regions of the block.
name_hint: str
the name_hint of the block.
body: Stmt
The body of the block.
init: Optional[Stmt]
The init block of the reduction block
alloc_buffers: Optional[list[Buffer]]
The buffer allocations
match_buffers: Optional[List[MatchBufferRegion]]
The subregion buffer match
annotations: Optional[Mapping[str, Object]]
Additional annotation hints.
span : Optional[Span]
The location of this block in the source code.
"""
iter_vars: List[IterVar]
reads: List[BufferRegion]
writes: List[BufferRegion]
name_hint: str
body: Stmt
init: Optional[Stmt]
alloc_buffers: Optional[List[Buffer]]
match_buffers: Optional[List[MatchBufferRegion]]
annotations: Optional[Mapping[str, Object]]
span: Optional[Span]
def __init__(
self,
iter_vars: List[IterVar],
reads: List[BufferRegion],
writes: List[BufferRegion],
name_hint: str,
body: Stmt,
init: Optional[Stmt] = None,
alloc_buffers: Optional[List[Buffer]] = None,
match_buffers: Optional[List[MatchBufferRegion]] = None,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
if alloc_buffers is None:
alloc_buffers = []
if match_buffers is None:
match_buffers = []
if annotations is None:
annotations = {}
self.__init_handle_by_constructor__(
_ffi_api.Block, # type: ignore
iter_vars,
reads,
writes,
name_hint,
body,
init,
alloc_buffers,
match_buffers,
annotations,
span,
) # type: ignore
@tvm._ffi.register_object("tir.BlockRealize")
class BlockRealize(Stmt):
"""BlockRealize node.
Parameters
----------
iter_values : List[PrimExpr]
The binding values of the block var.
predicate : Union[PrimExpr, bool]
The predicate of the block.
block : Block
The block to realize
span : Optional[Span]
The location of this block_realize in the source code.
"""
iter_values: List[PrimExpr]
predicate: PrimExpr
block: Block
span: Optional[Span]
def __init__(
self,
iter_values: List[PrimExpr],
predicate: Union[PrimExpr, bool],
block: Block,
span: Optional[Span] = None,
):
if isinstance(predicate, bool):
predicate = const(predicate, "bool")
self.__init_handle_by_constructor__(
_ffi_api.BlockRealize, # type: ignore
iter_values,
predicate,
block,
span,
) # type: ignore
def stmt_seq(*args):
"""Make sequence of statements
Parameters
----------
args : list of Expr or Var
List of statements to be combined as sequence.
Returns
-------
stmt : Stmt
The combined statement.
"""
ret = []
for value in args:
if not isinstance(value, Stmt):
value = Evaluate(value)
ret.append(value)
if len(ret) == 1:
return ret[0]
return SeqStmt(ret)
def stmt_list(stmt):
"""Make list of stmt from blocks.
Parameters
----------
stmt : A block statement
Returns
-------
stmt_list : list of Stmt
The unpacked list of statements
"""
if isinstance(stmt, SeqStmt):
res = []
for x in stmt:
res += stmt_list(x)
return res
return [stmt]
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/stmt_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Statement functor utilities for IR transformations"""
from .function import PrimFunc
from . import _ffi_api
def ir_transform(stmt, preorder, postorder, only_enable=None):
"""Recursively visit and transform ir nodes in post DFS order.
Parameters
----------
stmt : tvm.tir.Stmt
The input to be transformed.
preorder: function
The function called in before recursive mutation
If preorder returns None, then the transform will proceed to recursive call.
If preorder returns a not None tvm.tir.Stmt/Expr, the transformer will simply return it and
won't do further recursion.
postorder : function
The function called after recursive mutation.
only_enable : Optional[List[str]]
List of types that we only enable.
Returns
-------
result : tvm.tir.Stmt
The result.
"""
return _ffi_api.IRTransform(stmt, preorder, postorder, only_enable) # type: ignore
def post_order_visit(stmt, fvisit):
"""Recursively visit the ir in post DFS order node, apply fvisit
Each node is guaranteed to be visited only once.
Parameters
----------
fvisit: function
The visitor function.
"""
return _ffi_api.PostOrderVisit(stmt, fvisit) # type: ignore
def pre_order_visit(stmt, fvisit):
"""Recursive pre-order visit on stmt AST, applying fvisit on each node.
If fvisit returns False, it won't visit the children of the node.
Parameters
----------
fvisit: function of the signature Object -> bool
The visitor function.
"""
return _ffi_api.PreOrderVisit(stmt, fvisit) # type: ignore
def substitute(node, vmap):
"""Substitute the var specified by vmap.
Parameters
----------
node: ObjectRef
The input.
vmap : Dict[Var, PrimExpr]
The variable mapping.
Returns
-------
result : tvm.tir.Stmt
The result.
"""
return _ffi_api.Substitute(node, vmap) # type: ignore
def renew_defs(func: PrimFunc):
"""Re-generate the definition nodes for a TIR, including VarDef, BufferDef.
This pass works as a simple DeepCopy to duplicate a function with different Vars and
Buffers but the same behavior
Parameters
----------
func: PrimFunc
The input function
Returns
-------
result : PrimFunc
The new generated func.
"""
return _ffi_api.RenewDefs(func) # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Intrinsics for tensorization."""
from . import arm_cpu, cuda, rocm, x86, hexagon
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/arm_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for ARM tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
from .dot_product_common import DP4A_INTRIN # pylint: disable=unused-import
# TODO(masahi): Parametrize the TVMScript description of dot product by
# shape and dtype, and share the common description with x86.
@T.prim_func
def dot_product_4x4_i8i8i32_desc(
A: T.Buffer((4,), "int8", offset_factor=1),
B: T.Buffer((4, 4), "int8", offset_factor=1),
C: T.Buffer((4,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
for i in T.serial(0, 4):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_4x4_i8i8i32_neon(
A: T.Buffer((4,), "int8", offset_factor=1),
B: T.Buffer((4, 4), "int8", offset_factor=1),
C: T.Buffer((4,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
A_int8 = A.vload([0], "int8x4")
re_int32 = T.reinterpret(A_int8, dtype="int32")
vec_ai32 = T.broadcast(re_int32, 2)
vec_a = T.reinterpret(vec_ai32, dtype="int8x8")
vec_b = B.vload([0, 0], dtype="int8x16")
# TODO(masahi): Remove duplication when inlined function call is supported
vec_b_low = T.vectorlow(vec_b, dtype="int8x8")
multiply_low = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.smull.v8i16"),
T.uint32(2),
vec_a,
vec_b_low,
dtype="int16x8",
)
pairwise_reduction_low = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.saddlp.v4i32.v8i16"),
T.uint32(1),
multiply_low,
dtype="int32x4",
)
vec_b_high = T.vectorhigh(vec_b, dtype="int8x8")
multiply_high = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.smull.v8i16"),
T.uint32(2),
vec_a,
vec_b_high,
dtype="int16x8",
)
pairwise_reduction_high = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.saddlp.v4i32.v8i16"),
T.uint32(1),
multiply_high,
dtype="int32x4",
)
C[T.ramp(T.int32(0), 1, 4)] += T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.addp.v4i32"),
T.uint32(2),
pairwise_reduction_low,
pairwise_reduction_high,
dtype="int32x4",
)
@T.prim_func
def dot_product_4x4_i8i8i32_sdot(
A: T.Buffer((4,), "int8", offset_factor=1),
B: T.Buffer((4, 4), "int8", offset_factor=1),
C: T.Buffer((4,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:4], A[0:4], B[0:4, 0:4])
T.writes(C[0:4])
A_i8x4 = A.vload([0], "int8x4")
A_i32 = T.reinterpret(A_i8x4, dtype="int32")
vec_ai32 = T.broadcast(A_i32, 4)
vec_a = T.reinterpret(vec_ai32, dtype="int8x16")
vec_b = B.vload([0, 0], dtype="int8x16")
vec_c = C.vload([0], dtype="int32x4")
C[T.ramp(T.int32(0), 1, 4)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.aarch64.neon.sdot.v4i32.v16i8"),
T.uint32(3),
vec_c,
vec_a,
vec_b,
dtype="int32x4",
)
ARM_DOT_4x4_i8_NEON_INTRIN = "dot_4x4_i8i8s32_neon"
ARM_DOT_4x4_i8_SDOT_INTRIN = "dot_4x4_i8i8s32_sdot"
TensorIntrin.register(
ARM_DOT_4x4_i8_NEON_INTRIN, dot_product_4x4_i8i8i32_desc, dot_product_4x4_i8i8i32_neon
)
TensorIntrin.register(
ARM_DOT_4x4_i8_SDOT_INTRIN, dot_product_4x4_i8i8i32_desc, dot_product_4x4_i8i8i32_sdot
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for tensorization on NVIDIA GPU."""
from typing import Dict, Tuple
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
from ..._ffi import register_func
from ...runtime import convert
from .. import Cast, IntImm, TensorIntrin
def shared_16x16_to_ldmatrix_32x8_layout(i, j):
thread_id = 4 * (i % 8) + (j % 8) // 2
return thread_id, 4 * (j // 8) + (i // 8) * 2 + (j % 2)
def shared_16x32_to_ldmatrix_32x16_layout(i, j):
thread_id = 4 * (i % 8) + (j % 16) // 4
return thread_id, 8 * (j // 16) + (i // 8) * 4 + j % 4
def shared_32x16_to_ldmatrix_32x16_layout(i, j):
thread_id = (i % 16) // 4 + 4 * (j % 8)
return thread_id, 8 * (j // 8) + (i // 16) * 4 + i % 4
@register_func("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout")
def index_map_shared_16x16_to_ldmatrix_32x8_layout(ind):
i, j = ind[0], ind[1]
thread_id, local_id = shared_16x16_to_ldmatrix_32x8_layout(i, j)
return convert([thread_id, local_id])
lift = convert
M_DIM = 16
N_DIM = 16
WARP_SIZE = 32
HALF_WARP = WARP_SIZE // 2
HALF_WARP_expr = lift(HALF_WARP)
def get_ldmatrix_intrin(k_dim, dtype, is_b, transposed, shared_scope="shared"):
local_size = (M_DIM * k_dim) // WARP_SIZE
shared_offset = None
index_map = None
if transposed:
assert is_b, "Transposed A matrix not supported"
ldmatrix_col_major = is_b and not transposed
if k_dim == 16:
assert dtype == "float16"
index_map = shared_16x16_to_ldmatrix_32x8_layout
if transposed:
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ stride * (tx % 8)
+ 8 * ((tx % HALF_WARP_expr) // 8)
)
else:
shared_offset = lambda tx, stride: stride * (tx % HALF_WARP_expr) + 8 * (
tx // HALF_WARP_expr
)
else:
assert (
k_dim == 32 and dtype == "int8"
), "Only k_dim == 16 (float16) or k_dim == 32 (int8) supported for now"
if ldmatrix_col_major:
index_map = shared_32x16_to_ldmatrix_32x16_layout
# A dummy offset, ldmatrix cannot be used for int8 + trans case.
# We still use the ldmatrix intrinsic, but lower it to a manual loop in the codegen.
# Only the stride information is required.
shared_offset = lambda _, stride: stride
elif is_b and transposed:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = (
lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr)
+ (tx % 8) * stride
+ 16 * ((tx % HALF_WARP_expr) // 8)
)
else:
index_map = shared_16x32_to_ldmatrix_32x16_layout
shared_offset = lambda tx, stride: stride * (tx % 16) + 16 * (tx // 16)
assert index_map and shared_offset
if is_b and not transposed:
row_dim = k_dim
col_dim = M_DIM
else:
row_dim = M_DIM
col_dim = k_dim
shmem_shape = (row_dim, col_dim)
@T.prim_func
def ldmatrix_desc(warp_handle: T.handle, shared_handle: T.handle) -> None:
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=16,
scope=shared_scope,
)
warp = T.match_buffer(
warp_handle, (WARP_SIZE, local_size), dtype, align=64, offset_factor=16, scope="warp"
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
for ax0, ax1 in T.grid(row_dim, col_dim):
with T.block("shared_warp"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(shared[v0, v1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.writes(warp[thread_id, local_id])
warp[thread_id, local_id] = shared[v0, v1]
@T.prim_func
def ldmatrix_impl(warp_handle: T.handle, shared_handle: T.handle) -> None:
s0 = T.var("int32")
s1 = T.var("int32")
shared = T.match_buffer(
shared_handle,
shmem_shape,
dtype,
align=64,
offset_factor=16,
scope=shared_scope,
strides=[s0, s1],
)
warp = T.match_buffer(
warp_handle, (WARP_SIZE, local_size), dtype, align=64, offset_factor=16, scope="warp"
)
with T.block("root"):
T.reads(shared[0:row_dim, 0:col_dim])
T.writes(warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_ldmatrix(
ldmatrix_col_major,
4, # Always load 4 matrices
".b16",
warp.data,
warp.elem_offset + lift(local_size) * tx,
shared.access_ptr("r"),
shared_offset(tx, s0),
dtype=dtype,
)
)
return ldmatrix_desc, ldmatrix_impl
def get_mma_intrin(k_dim, out_dtype, b_transposed):
local_size = (M_DIM * k_dim) // WARP_SIZE
local_size_out = (M_DIM * N_DIM) // 32
index_map_C = shared_16x16_to_ldmatrix_32x8_layout
if k_dim == 16:
index_map_A = shared_16x16_to_ldmatrix_32x8_layout
index_map_B = shared_16x16_to_ldmatrix_32x8_layout
mma_prefix = "m16n8k16"
elif k_dim == 32 and b_transposed:
index_map_A = index_map_B = shared_16x32_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
elif k_dim == 32 and not b_transposed:
index_map_A = shared_16x32_to_ldmatrix_32x16_layout
index_map_B = shared_32x16_to_ldmatrix_32x16_layout
mma_prefix = "m16n8k32"
else:
assert False
out_dtype_abbrv = {"float16": "fp16", "float32": "fp32", "int32": "int32"}[out_dtype]
if out_dtype in ["float16", "float32"]:
in_dtype = "float16"
in_dtype_abbrv = "fp16"
else:
in_dtype = "int8"
in_dtype_abbrv = "int8"
def maybe_cast(v):
if out_dtype in ["float32", "int32"]:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
@T.prim_func
def mma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=16, scope="warp"
)
B = T.match_buffer(
b, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=16, scope="warp"
)
C = T.match_buffer(
c, (WARP_SIZE, local_size_out), out_dtype, align=64, offset_factor=16, scope="warp"
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
for i, j, k in T.grid(M_DIM, N_DIM, k_dim):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
b_row_ind, b_col_ind = maybe_swap(k, j)
thread_id_C, local_id_C = T.meta_var(index_map_C(i, j))
thread_id_A, local_id_A = T.meta_var(index_map_A(i, k))
thread_id_B, local_id_B = T.meta_var(index_map_B(b_row_ind, b_col_ind))
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += maybe_cast(
A[thread_id_A, local_id_A]
) * maybe_cast(B[thread_id_B, local_id_B])
@T.prim_func
def mma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=16, scope="warp"
)
B = T.match_buffer(
b, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=16, scope="warp"
)
C = T.match_buffer(
c, (WARP_SIZE, local_size_out), out_dtype, align=64, offset_factor=16, scope="warp"
)
with T.block("root"):
T.reads(
C[0:WARP_SIZE, 0:local_size_out],
A[0:WARP_SIZE, 0:local_size],
B[0:WARP_SIZE, 0:local_size],
)
T.writes(C[0:WARP_SIZE, 0:local_size_out])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size),
C.data,
C.elem_offset + tx * lift(local_size_out),
False,
dtype=out_dtype,
)
)
T.evaluate(
T.ptx_mma(
mma_prefix,
"row",
"col",
in_dtype_abbrv,
in_dtype_abbrv,
out_dtype_abbrv,
A.data,
A.elem_offset + tx * lift(local_size),
B.data,
B.elem_offset + tx * lift(local_size) + lift(local_size) // 2,
C.data,
C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2,
False,
dtype=out_dtype,
)
)
return mma_sync_desc, mma_sync_impl
def get_mma_fill_intrin(dtype, local_size):
zero = IntImm("int32", 0).astype(dtype)
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_fill_desc(a: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
i, j = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(i, j))
T.reads()
T.writes(C_warp[thread_id, local_id])
C_warp[thread_id, local_id] = zero
@T.prim_func
def mma_fill_impl(a: T.handle) -> None:
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
with T.block("root"):
T.reads()
T.writes(C_warp[0:WARP_SIZE, 0:local_size])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype))
return mma_fill_desc, mma_fill_impl
def get_mma_store_intrin(dtype, local_size, scope="global"):
# Assume M = N = 16
index_map = shared_16x16_to_ldmatrix_32x8_layout
@T.prim_func
def mma_store_desc(a: T.handle, c: T.handle) -> None:
C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp")
C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
for i0, i1 in T.grid(M_DIM, N_DIM):
with T.block("C_warp"):
v0, v1 = T.axis.remap("SS", [i0, i1])
thread_id, local_id = T.meta_var(index_map(v0, v1))
T.reads(C_warp[thread_id, local_id])
T.writes(C[v0, v1])
C[v0, v1] = C_warp[thread_id, local_id]
@T.prim_func
def mma_store_impl(a: T.handle, c: T.handle) -> None:
s0 = T.var("int32")
s1 = T.var("int32")
C_warp = T.match_buffer(
a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1
)
C = T.match_buffer(
c, [M_DIM, N_DIM], dtype=dtype, scope="global", offset_factor=1, strides=[s0, s1]
)
with T.block("root"):
T.reads(C_warp[0:WARP_SIZE, 0:local_size])
T.writes(C[0:M_DIM, 0:N_DIM])
tx = T.env_thread("threadIdx.x")
T.launch_thread(tx, WARP_SIZE)
T.evaluate(
T.mma_store(
M_DIM,
N_DIM,
C.access_ptr("w"),
C_warp.data,
C_warp.elem_offset,
s0,
dtype=dtype,
)
)
return mma_store_desc, mma_store_impl
LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a"
TensorIntrin.register(LDMATRIX_16x16_A_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False))
LDMATRIX_16x16_B_INTRIN = "mma.ldmatrix_16x16_b"
TensorIntrin.register(LDMATRIX_16x16_B_INTRIN, *get_ldmatrix_intrin(16, "float16", True, False))
LDMATRIX_16x16_A_DYN_INTRIN = "mma.ldmatrix_16x16_a_dyn"
TensorIntrin.register(
LDMATRIX_16x16_A_DYN_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False, "shared.dyn")
)
LDMATRIX_16x16_B_DYN_INTRIN = "mma.ldmatrix_16x16_b_dyn"
TensorIntrin.register(
LDMATRIX_16x16_B_DYN_INTRIN, *get_ldmatrix_intrin(16, "float16", True, False, "shared.dyn")
)
LDMATRIX_16x16_B_TRANS_INTRIN = "mma.ldmatrix_16x16_b_trans"
TensorIntrin.register(
LDMATRIX_16x16_B_TRANS_INTRIN, *get_ldmatrix_intrin(16, "float16", True, True)
)
LDMATRIX_16x32_A_INTRIN = "mma.ldmatrix_16x32_a"
TensorIntrin.register(LDMATRIX_16x32_A_INTRIN, *get_ldmatrix_intrin(32, "int8", False, False))
LDMATRIX_32x16_B_INTRIN = "mma.ldmatrix_32x16_b"
TensorIntrin.register(LDMATRIX_32x16_B_INTRIN, *get_ldmatrix_intrin(32, "int8", True, False))
LDMATRIX_16x32_B_TRANS_INTRIN = "mma.ldmatrix_16x32_b_trans"
TensorIntrin.register(LDMATRIX_16x32_B_TRANS_INTRIN, *get_ldmatrix_intrin(32, "int8", True, True))
MMA_f16f16f32_INTRIN = "mma_f16f16f32"
TensorIntrin.register(MMA_f16f16f32_INTRIN, *get_mma_intrin(16, "float32", False))
MMA_f16f16f32_TRANS_INTRIN = "mma_f16f16f32_trans"
TensorIntrin.register(MMA_f16f16f32_TRANS_INTRIN, *get_mma_intrin(16, "float32", True))
MMA_f16f16f16_INTRIN = "mma_f16f16f16"
TensorIntrin.register(MMA_f16f16f16_INTRIN, *get_mma_intrin(16, "float16", False))
MMA_f16f16f16_TRANS_INTRIN = "mma_f16f16f16_trans"
TensorIntrin.register(MMA_f16f16f16_TRANS_INTRIN, *get_mma_intrin(16, "float16", True))
MMA_i8i8i32_INTRIN = "mma_i8i8i32"
TensorIntrin.register(MMA_i8i8i32_INTRIN, *get_mma_intrin(32, "int32", False))
MMA_i8i8i32_TRANS_INTRIN = "mma_i8i8i32_trans"
TensorIntrin.register(MMA_i8i8i32_TRANS_INTRIN, *get_mma_intrin(32, "int32", True))
MMA_fill_16x16_f32_INTRIN = "mma_fill_16x16_f32"
TensorIntrin.register(MMA_fill_16x16_f32_INTRIN, *get_mma_fill_intrin("float32", 8))
MMA_fill_16x16_f16_INTRIN = "mma_fill_16x16_f16"
TensorIntrin.register(MMA_fill_16x16_f16_INTRIN, *get_mma_fill_intrin("float16", 8))
MMA_fill_16x16_i32_INTRIN = "mma_fill_16x16_i32"
TensorIntrin.register(MMA_fill_16x16_i32_INTRIN, *get_mma_fill_intrin("int32", 8))
MMA_store_16x16_f32_global_INTRIN = "mma_store_16x16_f32_global_"
TensorIntrin.register(
MMA_store_16x16_f32_global_INTRIN, *get_mma_store_intrin("float32", 8, "global")
)
MMA_store_16x16_f16_global_INTRIN = "mma_store_16x16_f16_global_"
TensorIntrin.register(
MMA_store_16x16_f16_global_INTRIN, *get_mma_store_intrin("float16", 8, "global")
)
MMA_store_16x16_i32_global_INTRIN = "mma_store_16x16_i32_global_"
TensorIntrin.register(
MMA_store_16x16_i32_global_INTRIN, *get_mma_store_intrin("int32", 8, "global")
)
######## WMMA intrinsics ########
def get_wmma_fragment_index(buffer, stride, m_dim, n_dim):
"""Compute wmma fragment index using elem_offset of the buffer"""
frag_index_m = buffer.elem_offset // stride // m_dim
frag_index_n = buffer.elem_offset % stride // n_dim
num_fragments_per_row = stride // n_dim
return frag_index_m * num_fragments_per_row + frag_index_n
def get_wmma_load_intrin(
m_dim: int,
n_dim: int,
k_dim: int,
dtype: str,
shared_scope: str,
is_b: bool,
is_col_major: bool,
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_load intrins"""
wmma_fragment_scope = "wmma.matrix_{}".format("b" if is_b else "a")
layout = "col_major" if is_col_major else "row_major"
@T.prim_func
def wmma_load_desc(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope=shared_scope)
C = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope=wmma_fragment_scope
)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("load"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = A[vii, vjj]
@T.prim_func
def wmma_load_impl(a: T.handle, c: T.handle) -> None:
s1 = T.var("int32")
s0 = T.var("int32")
d1 = T.var("int32")
d0 = T.var("int32")
A = T.match_buffer(
a,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=16,
scope=shared_scope,
strides=[s1, s0],
)
C = T.match_buffer(
c,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=16,
scope=wmma_fragment_scope,
strides=[d1, d0],
)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_load_matrix_sync(
C.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(C, d1, m_dim, n_dim),
A.access_ptr("r"),
s1,
layout,
dtype="handle",
)
)
return wmma_load_desc, wmma_load_impl
def get_wmma_fill_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_fill intrins"""
zero = IntImm("int32", 0).astype(dtype)
@T.prim_func
def wmma_fill_desc(c: T.handle) -> None:
C = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope="wmma.accumulator"
)
with T.block("root"):
T.reads()
T.writes(C[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("init"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = zero
@T.prim_func
def wmma_fill_impl(c: T.handle) -> None:
d1 = T.var("int32")
d0 = T.var("int32")
C = T.match_buffer(
c,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=16,
scope="wmma.accumulator",
strides=[d1, d0],
)
with T.block("root"):
T.reads()
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_fill_fragment(
C.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(C, d1, m_dim, n_dim),
T.float32(0),
dtype="handle",
)
)
return wmma_fill_desc, wmma_fill_impl
def get_wmma_store_intrin(
m_dim: int, n_dim: int, k_dim: int, dtype: str, scope: str
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_store intrins"""
@T.prim_func
def wmma_store_desc(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope="wmma.accumulator"
)
C = T.match_buffer(c, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope=scope)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
for i, j in T.grid(m_dim, n_dim):
with T.block("store"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = A[vii, vjj]
@T.prim_func
def wmma_store_impl(a: T.handle, c: T.handle) -> None:
s1 = T.var("int32")
s0 = T.var("int32")
d1 = T.var("int32")
d0 = T.var("int32")
A = T.match_buffer(
a,
(m_dim, n_dim),
dtype,
align=64,
offset_factor=16,
scope="wmma.accumulator",
strides=[d1, d0],
)
C = T.match_buffer(
c, (m_dim, n_dim), dtype, align=64, offset_factor=16, scope=scope, strides=[s1, s0]
)
with T.block("root"):
T.reads(A[0:m_dim, 0:n_dim])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_store_matrix_sync(
A.data,
m_dim,
n_dim,
k_dim,
get_wmma_fragment_index(A, d1, m_dim, n_dim),
C.access_ptr("w"),
s1,
"row_major",
dtype="handle",
)
)
return wmma_store_desc, wmma_store_impl
def get_wmma_sync_intrin(
m_dim: int, n_dim: int, k_dim: int, in_dtype: str, out_dtype: str, b_transposed: bool
) -> Tuple[PrimFunc, PrimFunc]:
"""Generator of wmma_sync intrins"""
def maybe_cast(v):
if in_dtype != out_dtype:
return Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
b_shape_0, b_shape_1 = maybe_swap(k_dim, n_dim)
@T.prim_func
def wmma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(
a, (m_dim, k_dim), in_dtype, align=64, offset_factor=16, scope="wmma.matrix_a"
)
B = T.match_buffer(
b,
maybe_swap(k_dim, n_dim),
in_dtype,
align=64,
offset_factor=16,
scope="wmma.matrix_b",
)
C = T.match_buffer(
c, (m_dim, n_dim), out_dtype, align=64, offset_factor=16, scope="wmma.accumulator"
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:b_shape_0, 0:b_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
for i, j, k in T.grid(m_dim, n_dim, k_dim):
with T.block(""):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
B_index_0, B_index_1 = maybe_swap(vkk, vjj)
C[vii, vjj] = C[vii, vjj] + maybe_cast(A[vii, vkk]) * maybe_cast(
B[B_index_0, B_index_1]
)
@T.prim_func
def wmma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None:
a1 = T.var("int32")
a0 = T.var("int32")
b1 = T.var("int32")
b0 = T.var("int32")
c1 = T.var("int32")
c0 = T.var("int32")
A = T.match_buffer(
a,
(m_dim, k_dim),
in_dtype,
align=64,
offset_factor=16,
scope="wmma.matrix_a",
strides=[a1, a0],
)
B = T.match_buffer(
b,
maybe_swap(k_dim, n_dim),
in_dtype,
align=64,
offset_factor=16,
scope="wmma.matrix_b",
strides=[b1, b0],
)
C = T.match_buffer(
c,
(m_dim, n_dim),
out_dtype,
align=64,
offset_factor=16,
scope="wmma.accumulator",
strides=[c1, c0],
)
with T.block("root"):
T.reads(C[0:m_dim, 0:n_dim], A[0:m_dim, 0:k_dim], B[0:b_shape_0, 0:b_shape_1])
T.writes(C[0:m_dim, 0:n_dim])
T.evaluate(
T.tvm_mma_sync(
C.data,
get_wmma_fragment_index(C, c1, m_dim, n_dim),
A.data,
get_wmma_fragment_index(A, a1, m_dim, k_dim),
B.data,
get_wmma_fragment_index(B, b1, b_shape_0, b_shape_1),
C.data,
get_wmma_fragment_index(C, c1, m_dim, n_dim),
dtype="handle",
)
)
return wmma_sync_desc, wmma_sync_impl
WMMA_SYNC_16x16x16_f16f16f32_INTRIN = "wmma_sync_16x16x16_f16f16f32"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float32", False),
)
WMMA_SYNC_16x16x16_f16f16f32_TRANS_INTRIN = "wmma_sync_16x16x16_f16f16f32_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f32_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float32", True),
)
WMMA_SYNC_16x16x16_f16f16f16_INTRIN = "wmma_sync_16x16x16_f16f16f16"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float16", False),
)
WMMA_SYNC_16x16x16_f16f16f16_TRANS_INTRIN = "wmma_sync_16x16x16_f16f16f16_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_f16f16f16_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "float16", "float16", True),
)
WMMA_SYNC_16x16x16_s8s8s32_INTRIN = "wmma_sync_16x16x16_s8s8s32"
TensorIntrin.register(
WMMA_SYNC_16x16x16_s8s8s32_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "int8", "int32", False),
)
WMMA_SYNC_16x16x16_s8s8s32_TRANS_INTRIN = "wmma_sync_16x16x16_s8s8s32_trans"
TensorIntrin.register(
WMMA_SYNC_16x16x16_s8s8s32_TRANS_INTRIN,
*get_wmma_sync_intrin(16, 16, 16, "int8", "int32", True),
)
WMMA_LOAD_16x16x16_F16_A_INTRIN = "wmma_load_16x16x16_f16_a"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", False, False),
)
WMMA_LOAD_16x16x16_F16_B_INTRIN = "wmma_load_16x16x16_f16_b"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", True, False),
)
WMMA_LOAD_16x16x16_F16_A_TRANS_INTRIN = "wmma_load_16x16x16_f16_a_trans"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_A_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", False, True),
)
WMMA_LOAD_16x16x16_F16_B_TRANS_INTRIN = "wmma_load_16x16x16_f16_b_trans"
TensorIntrin.register(
WMMA_LOAD_16x16x16_F16_B_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "float16", "shared", True, True),
)
WMMA_LOAD_16x16x16_S8_A_INTRIN = "wmma_load_16x16x16_s8_a"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", False, False),
)
WMMA_LOAD_16x16x16_S8_B_INTRIN = "wmma_load_16x16x16_s8_b"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", True, False),
)
WMMA_LOAD_16x16x16_S8_A_TRANS_INTRIN = "wmma_load_16x16x16_s8_a_trans"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_A_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", False, True),
)
WMMA_LOAD_16x16x16_S8_B_TRANS_INTRIN = "wmma_load_16x16x16_s8_b_trans"
TensorIntrin.register(
WMMA_LOAD_16x16x16_S8_B_TRANS_INTRIN,
*get_wmma_load_intrin(16, 16, 16, "int8", "shared", True, True),
)
WMMA_FILL_16x16x16_F32_INTRIN = "wmma_fill_16x16x16_f32"
TensorIntrin.register(WMMA_FILL_16x16x16_F32_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "float32"))
WMMA_FILL_16x16x16_F16_INTRIN = "wmma_fill_16x16x16_f16"
TensorIntrin.register(WMMA_FILL_16x16x16_F16_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "float16"))
WMMA_FILL_16x16x16_S32_INTRIN = "wmma_fill_16x16x16_s32"
TensorIntrin.register(WMMA_FILL_16x16x16_S32_INTRIN, *get_wmma_fill_intrin(16, 16, 16, "int32"))
WMMA_STORE_16x16x16_F32_SHARED_INTRIN = "wmma_store_16x16x16_f32_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_F32_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float32", "shared")
)
WMMA_STORE_16x16x16_F16_SHARED_INTRIN = "wmma_store_16x16x16_f16_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_F16_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float16", "shared")
)
WMMA_STORE_16x16x16_S32_SHARED_INTRIN = "wmma_store_16x16x16_s32_shared"
TensorIntrin.register(
WMMA_STORE_16x16x16_S32_SHARED_INTRIN, *get_wmma_store_intrin(16, 16, 16, "int32", "shared")
)
WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN = "wmma_store_16x16x16_f32_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float32", "global")
)
WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN = "wmma_store_16x16x16_f16_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "float16", "global")
)
WMMA_STORE_16x16x16_S32_GLOBAL_INTRIN = "wmma_store_16x16x16_s32_global"
TensorIntrin.register(
WMMA_STORE_16x16x16_S32_GLOBAL_INTRIN, *get_wmma_store_intrin(16, 16, 16, "int32", "global")
)
def get_wmma_intrin_group(
store_scope: str, in_dtype: str, out_dtype: str, trans_b: bool
) -> Dict[str, str]:
"""Get a group of intrinsics for wmma tensor core with the given configurations
Parameters
----------
store_scope : str
Must be one of ["global", "shared"]. The memory scope of the result buffer.
in_dtype : str
The input data type.
out_dtype : str
The output data dtype.
trans_b : bool
Whether the input matrix B is transposed.
Returns
-------
ret : Dict[str, str]
A group of tensor intrinsics.
"""
assert store_scope in ["global", "shared"]
assert in_dtype in ["float16", "int8"]
assert out_dtype in ["float16", "float32", "int32"]
load_a_intrins = {
"float16": WMMA_LOAD_16x16x16_F16_A_INTRIN,
"int8": WMMA_LOAD_16x16x16_S8_A_INTRIN,
}
load_b_intrins = {
"float16": WMMA_LOAD_16x16x16_F16_B_TRANS_INTRIN
if trans_b
else WMMA_LOAD_16x16x16_F16_B_INTRIN,
"int8": WMMA_LOAD_16x16x16_S8_B_TRANS_INTRIN if trans_b else WMMA_LOAD_16x16x16_S8_B_INTRIN,
}
compute_intrins = {
"float16": WMMA_SYNC_16x16x16_f16f16f16_TRANS_INTRIN
if trans_b
else WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
"float32": WMMA_SYNC_16x16x16_f16f16f32_TRANS_INTRIN
if trans_b
else WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
"int32": WMMA_SYNC_16x16x16_s8s8s32_TRANS_INTRIN
if trans_b
else WMMA_SYNC_16x16x16_s8s8s32_INTRIN,
}
init_intrins = {
"float16": WMMA_FILL_16x16x16_F16_INTRIN,
"float32": WMMA_FILL_16x16x16_F32_INTRIN,
"int32": WMMA_FILL_16x16x16_S32_INTRIN,
}
store_intrins = {
"float16": WMMA_STORE_16x16x16_F16_SHARED_INTRIN
if store_scope == "shared"
else WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN,
"float32": WMMA_STORE_16x16x16_F32_SHARED_INTRIN
if store_scope == "shared"
else WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN,
"int32": WMMA_STORE_16x16x16_S32_SHARED_INTRIN
if store_scope == "shared"
else WMMA_STORE_16x16x16_S32_GLOBAL_INTRIN,
}
return {
"init": init_intrins[out_dtype],
"load_a": load_a_intrins[in_dtype],
"load_b": load_b_intrins[in_dtype],
"compute": compute_intrins[out_dtype],
"store": store_intrins[out_dtype],
}
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/dot_product_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Dot product related intrinsics."""
from tvm.script import tir as T
from .. import TensorIntrin
@T.prim_func
def dp4a_desc(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[0] = C[0] + T.cast(A[vi], "int32") * T.cast(B[vi], "int32")
@T.prim_func
def dp4a_impl(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
C[0] += T.call_pure_extern(
"__dp4a", A.vload([0], "int8x4"), B.vload([0], "int8x4"), T.int32(0), dtype="int32"
)
DP4A_INTRIN = "dp4a"
TensorIntrin.register(DP4A_INTRIN, dp4a_desc, dp4a_impl)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for Hexagon tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
@T.prim_func
def dot_product_32x4_u8u8i32_desc(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((32, 4), "uint8", offset_factor=1),
C: T.Buffer((32,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
for i in T.serial(0, 32):
for k in T.serial(0, 4):
with T.block("update"):
with T.init():
C[i] = T.int32(0)
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_32x4_u8u8i32_vrmpy(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((32, 4), "uint8", offset_factor=1),
C: T.Buffer((32,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B.vload([0, 0], dtype="uint8x128")
B_i32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[T.ramp(T.int32(0), 1, 32)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpyub.acc.128B"),
T.uint32(3),
C[T.ramp(T.int32(0), 1, 32)],
B_i32x32,
A_i32,
dtype="int32x32",
)
@T.prim_func
def dot_product_32x4_u8i8i32_desc(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((32, 4), "int8", offset_factor=1),
C: T.Buffer((32,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
for i in T.serial(0, 32):
for k in T.serial(0, 4):
with T.block("update"):
with T.init():
C[i] = T.int32(0)
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_32x4_u8i8i32_vrmpy(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((32, 4), "int8", offset_factor=1),
C: T.Buffer((32,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:32], A[0:4], B[0:32, 0:4])
T.writes(C[0:32])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B.vload([0, 0], dtype="int8x128")
B_i32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[T.ramp(T.int32(0), 1, 32)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.hexagon.V6.vrmpybusv.acc.128B"),
T.uint32(3),
C[T.ramp(T.int32(0), 1, 32)],
T.broadcast(A_i32, 32),
B_i32x32,
dtype="int32x32",
)
VRMPY_u8u8i32_INTRIN = "dot_32x4_u8u8i32_vrmpy"
TensorIntrin.register(
VRMPY_u8u8i32_INTRIN, dot_product_32x4_u8u8i32_desc, dot_product_32x4_u8u8i32_vrmpy
)
VRMPY_u8i8i32_INTRIN = "dot_32x4_u8i8i32_vrmpy"
TensorIntrin.register(
VRMPY_u8i8i32_INTRIN, dot_product_32x4_u8i8i32_desc, dot_product_32x4_u8i8i32_vrmpy
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for AMDGPU tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
from .dot_product_common import dp4a_desc
@T.prim_func
def sdot4(
A: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
B: T.Buffer((4,), "int8", offset_factor=1, align=4, scope="shared"),
C: T.Buffer((1,), "int32", offset_factor=1, align=4, scope="local"),
) -> None:
with T.block("root"):
T.reads(C[0], A[0:4], B[0:4])
T.writes(C[0])
C[0] += T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.amdgcn.sdot4"),
T.uint32(4),
T.reinterpret(A.vload([0], "int8x4"), dtype="int32"),
T.reinterpret(B.vload([0], "int8x4"), dtype="int32"),
T.int32(0),
T.bool(1),
dtype="int32",
)
AMDGPU_SDOT4_INTRIN = "sdot4"
TensorIntrin.register(AMDGPU_SDOT4_INTRIN, dp4a_desc, sdot4)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/tensor_intrin/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,missing-function-docstring
"""Intrinsics for x86 tensorization."""
from tvm.script import tir as T
from .. import TensorIntrin
# Tensorized intrinsic description and VNNI-specific implementation.
# Equivalent to the ones in topi/x86/tensor_intrin.py
@T.prim_func
def dot_product_16x4_u8i8i32_desc(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((16, 4), "int8", offset_factor=1),
C: T.Buffer((16,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16], A[0:4], B[0:16, 0:4])
T.writes(C[0:16])
for i in T.serial(0, 16):
for k in T.serial(0, 4):
with T.block("update"):
vi, vk = T.axis.remap("SR", [i, k])
C[vi] = C[vi] + T.cast(A[vk], "int32") * T.cast(B[vi, vk], "int32")
@T.prim_func
def dot_product_16x4_u8i8i32_vnni(
A: T.Buffer((4,), "uint8", offset_factor=1),
B: T.Buffer((16, 4), "int8", offset_factor=1),
C: T.Buffer((16,), "int32", offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16], A[0:4], B[0:16, 0:4])
T.writes(C[0:16])
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x64 = B.vload([0, 0], dtype="int8x64")
B_i32x16 = T.reinterpret(B_i8x64, dtype="int32x16")
C_i32x16 = C.vload([0], dtype="int32x16")
C[T.ramp(T.int32(0), 1, 16)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512"),
T.uint32(0),
C_i32x16,
T.broadcast(A_i32, 16),
B_i32x16,
dtype="int32x16",
)
VNNI_DOT_16x4_INTRIN = "dot_16x4_vnni"
TensorIntrin.register(
VNNI_DOT_16x4_INTRIN, dot_product_16x4_u8i8i32_desc, dot_product_16x4_u8i8i32_vnni
)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of all TIR transformations"""
# pylint: disable=wildcard-import, invalid-name
from .function_pass import prim_func_pass, PrimFuncPass
from .transform import *
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.transform"""
import tvm._ffi
tvm._ffi._init_api("tir.transform", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/transform/function_pass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TIR specific function pass support."""
import inspect
import types
import functools
from typing import Callable, List, Optional, Union
import tvm._ffi
from tvm.ir.transform import Pass, PassInfo
from . import _ffi_api
@tvm._ffi.register_object("tir.PrimFuncPass")
class PrimFuncPass(Pass):
"""A pass that works on each :py:func:`tvm.tir.PrimFunc` in a module. A function
pass class should be created through py:func:`tvm.tir.transform.function_pass`.
"""
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyFunctionPass(PrimFuncPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.CreatePrimFuncPass, _pass_func, pass_info # type: ignore
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def prim_func_pass(
pass_func=None,
opt_level: int = None,
name: Optional[str] = None,
required: Optional[List[str]] = None,
) -> Union[Callable, PrimFuncPass]:
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(tvm.tir.PrimFunc, IRModule, PassContext) -> tvm.tir.PrimFunc]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the function pass is dependent on.
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the following:
updated_mod = function_pass(m)
# Now constant folding should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_api.CreatePrimFuncPass(pass_arg, info) # type: ignore
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/transform/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping existing transformations."""
# pylint: disable=invalid-name
import enum
from typing import Callable, Optional
from . import _ffi_api
from . import function_pass as _fpass
def Apply(ftransform):
"""Apply ftransform to each function in the Module.
This function is a thin wrapper around tvm.tir.transform.prim_func_pass
Parameters
----------
ftransform: tvm.tir.PrimFunc -> tvm.tir.PrimFunc
The transformation pass.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
# pylint: disable=unused-argument
def _transform(func, mod, ctx):
return ftransform(func)
return _fpass.prim_func_pass(_transform, opt_level=0, name="Apply") # type: ignore
def InjectPrefetch():
"""Inject prefetch instructions into stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectPrefetch() # type: ignore
def ApplyLayoutTransforms():
"""Reshape buffers that appear in the "layout_transform_map"
fucntion attribute.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ApplyLayoutTransforms() # type: ignore
def StorageFlatten(cache_line_size, create_bound_attribute: bool = False):
"""Flatten the multi-dimensional read/write to 1D.
Parameters
----------
cache_line_size: int
The size of CPU cache line.
create_bound_attribute:
Whether to create bound attributes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageFlatten(cache_line_size, create_bound_attribute) # type: ignore
def TextureFlatten():
"""Flatten the multi-dimensional read/write to 2D.
Parameters
----------
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.TextureFlatten() # type: ignore
def InjectCopyIntrin(pragma_key: str, fintrin):
"""Inject virtual thread loops.
Parameters
----------
pragma_key : str
The pragma key for hint of copy.
fintrin : function
The function with signature copyintrin(src, dst, pad_before, pad_after, pad_value)
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectCopyIntrin(pragma_key, fintrin) # type: ignore
def CoProcSync():
"""Detect and insert sync points to co-processor.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CoProcSync() # type: ignore
def LiftAttrScope(attr_key: str):
"""Lift common attrs with attr_key to outer scope.
Parameters
----------
attr_key : str
The attribute key to be checked.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LiftAttrScope(attr_key) # type: ignore
def LoopPartition():
"""Inject virtual thread loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LoopPartition() # type: ignore
def VectorizeLoop(enable_vectorize: bool = True):
"""Lower vectorization loops.
Parameters
----------
enable_vectorize : bool
Whether vectorization is enabled.
Will lower to scalar loop when it is turned off.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.VectorizeLoop(enable_vectorize) # type: ignore
def InjectVirtualThread():
"""Inject virtual thread loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectVirtualThread() # type: ignore
def InjectDoubleBuffer():
"""Inject double buffer statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectDoubleBuffer() # type: ignore
def InjectRollingBuffer():
"""Inject rolling buffer statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectRollingBuffer() # type: ignore
def StorageRewrite():
"""Rewrite storage allocation pattern.
Moves the allocation to outer most possible scope.
Trying to share space between allocations to make
a static allocation plan when possible.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageRewrite() # type: ignore
def UnrollLoop():
"""Unroll the constant loop marked by unroll.
This pass also automatically attach pragma unroll tag to loops which meets the standard.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.UnrollLoop() # type: ignore
def RemoveNoOp():
"""Remove No Op from the Stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveNoOp() # type: ignore
def RemoveAssume():
"""Remove all instances of builtin::assume
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveAssume() # type: ignore
def RemoveStoreUndef():
"""Remove stores of undefined values from the Stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveStoreUndef() # type: ignore
def BF16Legalize():
"""Legalize bf16 typed Ops.
Runs BF16Promote, BF16CastElimination and BF16TypeLowering
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16Legalize() # type: ignore
def BF16Promote():
"""Promote bf16 to fp32. Add a cast to fp32
before Ops, then add a cast back to bf16.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16Promote() # type: ignore
def BF16CastElimination():
"""Eliminate verbose casting between fp32 and bf16
Checks if the AST has the pattern:
castto32(castto16(some_fp32_op(...)))
The verbose casting is generated by BF16Promote for multiple
bf16 Ops in a row. e.g.:
X[i] + Y[i] + T[i] =>
bf16((float32(bf16((float32(X[i]) + float32(Y[i])))) + float32(T[i])))
After this pass:
bf16(float32(X[i]) + float32(Y[i]) + float32(T[i]))
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16CastElimination() # type: ignore
def BF16TypeLowering():
"""Replace all bf16 type with uint16. Also lower the casting
between fp32 and bf16
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BF16TypeLowering() # type: ignore
def CommonSubexprElimTIR(enable_cse_tir: bool = True, identify_equiv_terms: bool = False):
"""Replace redundant computations by new variables.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CommonSubexprElimTIR(enable_cse_tir, identify_equiv_terms) # type: ignore
def RewriteUnsafeSelect():
"""Detect and rewrite unsafe select that contains memory access.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RewriteUnsafeSelect() # type: ignore
def Simplify():
"""Run arithmetic simplifications on the statements and expressions.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.Simplify() # type: ignore
def InstrumentBoundCheckers():
"""Instruments bound checkers.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InstrumentBoundCheckers() # type: ignore
def LowerCustomDatatypes():
"""Lower custom datatypes.
See tvm::datatypes::Registry for more information on adding custom datatypes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerCustomDatatypes() # type: ignore
def MakePackedAPI():
"""Transform the PrimFuncs in the module to a packed func API.
Prior to this pass, the PrimFunc may have Buffer arguments defined
in the `PrimFuncNode::buffer_map`. This pass consumes the
`buffer_map`, using it to generate `TVMArgs` and `TVMRetValue*`
arguments that implement the `PackedFunc` API.
For static shapes, the `BufferNode::shape`, `BufferNode::strides`,
and `BufferNode::elem_offset` member variables are used to
generate runtime checks on the corresponding member variables in
the user-provided `DLTensor*` or `tvm.nd.array` argument. (e.g. A
PrimFunc that accepts a buffer of shape `[16,32]` validates that
the `DLTensor::shape` array is `[16,32]`.)
For dynamic Buffers, in which one or more of these `BufferNode` member
variables use `tir.Var` that are not defined by other PrimFunc
parameters, these are instead used to define the variables based on
the corresponding `DLTensor` members. (e.g. A PrimFunc that accepts a
buffer of shape `[tir.Var("n"), tir.Var("m")]`, when passed a
`DLTensor` of shape `[16,32]`, will define `n = 16` and `n=32`, based
on the argument's shape.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MakePackedAPI() # type: ignore
def MakeUnpackedAPI():
"""Transform the PrimFuncs in the module to a C API compatible with internal calls.
Prior to this pass, the PrimFunc may have Buffer arguments defined in
the `PrimFuncNode::buffer_map`. This pass consumes the `buffer_map`,
using it to generate `T*` arguments (e.g. `float32*`) that can be
directly called by a C API.
For static shapes, no runtime validation is performed to confirm that
the argument buffer's shape matches the expected shape. For dynamic
shapes, `MakeUnpackedAPI` requires that the dynamic parameters be
passed as separate `tir.Var` parameters.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MakeUnpackedAPI() # type: ignore
def SplitHostDevice():
"""Split the function into a host function and device functions.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.SplitHostDevice() # type: ignore
def DecorateDeviceScope():
"""Decorate all the function's body as device function.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.DecorateDeviceScope() # type: ignore
def SkipAssert():
"""Skip assert stmt.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.SkipAssert() # type: ignore
def ThreadSync(storage_scope: str):
"""Insert sync between parallel read/write of shared buffers.
Parameters
----------
storage_scope: str
The target storage scope.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ThreadSync(storage_scope) # type: ignore
def LowerThreadAllreduce():
"""Lower cross thread alleduce.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerThreadAllreduce() # type: ignore
def InferFragment():
"""Infer the TensorCore fragment infomation using tensor intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InferFragment() # type: ignore
def LowerWarpMemory():
"""Lower warp memory access to low-level device related function calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerWarpMemory() # type: ignore
def LowerTVMBuiltin():
"""Lower tvm builtin intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerTVMBuiltin() # type: ignore
def LegalizePackedCalls():
"""Legalize packed calls to have its arguments wrapped in TVMValues
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LegalizePackedCalls() # type: ignore
def LowerIntrin():
"""Lower target specific intrinsic calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerIntrin() # type: ignore
def LowerDeviceStorageAccessInfo():
"""Lower attached storage access information on device.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
Run this pass after all storage access analysis finish.
"""
return _ffi_api.LowerDeviceStorageAccessInfo() # type: ignore
def CombineContextCall():
"""Combine context calls in the host function.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CombineContextCall() # type: ignore
def NarrowDataType(target_bits: int):
"""Narrow down PrimExpr datatype in stmt to target_bits.
Parameters
----------
target_bits : int
The target bit configuration.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
Run this pass after StorageFlatten.
"""
return _ffi_api.NarrowDataType(target_bits) # type: ignore
def VerifyMemory():
"""Verify if func contains illegal host side direct memory access.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.VerifyMemory() # type: ignore
# pylint: disable=no-else-return,inconsistent-return-statements
def HoistIfThenElse(variant: Optional[str] = None):
"""Hoist loop-invariant IfThenElse nodes to outside the eligible loops.
Parameters
----------
variant : Optional[String]
The variant of the pass.
variant can have any one of following values ["basic", None(Default)].
The basic variant supports basic hoisting scenarios where it expects
the For & If Nodes are in place consecutively and does not involve
global scope variables or more advanced scenarios.
Default variant supports all hoisting scenarios,i.e., {"Basic" + "Advanced"}
supported with control with PassContext configs like below:
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
if variant == "basic":
return _ffi_api.HoistIfThenElseBasic() # type: ignore
elif variant is None:
return _ffi_api.HoistIfThenElse() # type: ignore
class HoistedConditionals(enum.Flag):
"""Flags for use in HoistExpressionConfig.conditional_types
Each bitflag represents a type of expression that should be
hoisted to the outermost loop possible.
"""
Never = 0
""" No hoisting of conditionals """
IfElseStmt = 1
""" If set, look for hoist candidates in IfElseStmt """
IfElseExpr = 2
""" If set, look for hoist candidates in tir.if_then_else """
BooleanExpression = 4
""" If set, look for hoist candidates in all boolean expressions """
UsingBlockVar = 8
""" If set, allow hoisting of conditionals that use a block variable (e.g. threadIdx.x) """
All = IfElseStmt | IfElseExpr | BooleanExpression | UsingBlockVar
""" Enable all hoisting of conditionals"""
class HoistedLetBindings(enum.Flag):
"""Flags for use in HoistExpressionConfig.let_binding_types
Each bitflag represents a type of let binding expression that should be
hoisted to the outermost loop possible.
"""
Never = 0
""" No hoisting of let bindings """
RequiredByConditional = 1
""" Bindings that are used by a hoisted conditional """
LetStmt = 2
""" Bindings occuring in LetStmt """
LetExpr = 4
""" Bindings occuring in Let expressions """
All = RequiredByConditional | LetStmt | LetExpr
""" Enable all hoisting of let bindings """
def HoistExpression():
"""Generalized verison of HoistIfThenElse.
Hoist loop-invariant expressions to outside the eligible loops.
Searches for expressions in:
* LetStmt bindings
* IfThenElse conditions
* Boolean operators
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.HoistExpression() # type: ignore
def LowerCrossThreadReduction():
"""Lower cross-thread reduction from thread bindings to
intrinsic function calls.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerCrossThreadReduction() # type: ignore
def LowerInitBlock():
"""Lower block init stmt into IfThenElse statements.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerInitBlock() # type: ignore
def PlanAndUpdateBufferAllocationLocation():
"""Locate the buffer allocation to the exact position (usually is
the lca of buffer access). This pass will inject opaque block
with alloc_buffers at the allocation site.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.PlanAndUpdateBufferAllocationLocation() # type: ignore
def ConvertBlocksToOpaque():
"""Substitute all the block vars with the PrimExprs they are bound to, indicated by
the corresponding iter_values in BlockRealize, and then convert the blocks into
opaque ones by removing all the iter_values in BlockRealize and iter_vars in Block.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ConvertBlocksToOpaque() # type: ignore
def CompactBufferAllocation():
"""Compact the buffer access region. by removing the buffer regions
that are not accessed, i.e. narrowing the buffer shape and adjust
the access region if necessary.
Example
-------
Before narrowing, ``B`` is a ``[16, 16]`` buffer, but only a
skinny vector ``B[i, 0:16]`` is accessed.
.. code-block:: python
for i in range(0, 16):
with T.block():
B = T.alloc_buffer(16, 16)
for j in range(0, 16):
B[i, j] = A[i, j] + 1
for j in range(0, 16):
C[i, j] = B[i, j] + 1
This pass narrows the buffer shape and adjust its accessed region
accordingly. In this particular case, because only a ``1 * 16``
vector of ``B`` is accessed, the pass narrows ``B`` to shape ``[1,
16]``, and changes the access to ``B[i, j]`` to ``B[0, j]``.
.. code-block:: python
for i in range(0, 16):
with T.block():
B = T.alloc_buffer(1, 16)
for j in range(0, 16):
B[0, j] = A[i, j] + 1
for j in range(0, 16):
C[i, j] = B[0, j] + 1
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.CompactBufferAllocation() # type: ignore
def LowerMatchBuffer():
"""Remove match buffers inside the block. Also, it will validate the binding.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerMatchBuffer() # type: ignore
def LowerOpaqueBlock():
"""Remove the block to ensure that the TIR can not be scheduled again.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.LowerOpaqueBlock() # type: ignore
def FlattenBuffer():
"""Flatten the multi-dimensional BufferLoad and BufferStore to single dimensional
BufferLoad/BufferStore for the TIR not contains opaque block.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.FlattenBuffer() # type: ignore
def UnifyThreadBinding():
"""Unify all the thread bindings for "blockIdx.x/y/z",
"threadIdx.x/y/z", and "vthread.x/y/z". Before the unification,
two vars that are bound to a thread axis (e.g., "threadIdx.x")
use different IterVars and variables in their AttrStmts. After
the unification, we use a consolidated IterVar and a variable
for them.
Returns
-------
fpass : tvm.transform.Pass
The result pass
Note
----
`vthread` is a legacy behavior that will be deprecated, though
thread bindings of `vthread` are still also unified in this
pass. Please use `vthread.x`, `vthread.y` and `vthread.z` instead.
"""
return _ffi_api.UnifyThreadBinding() # type: ignore
def MergeDynamicSharedMemoryAllocations():
"""This pass merges multiple TIR-level dynamic shared memory allocations
into one allocation.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.MergeDynamicSharedMemoryAllocations() # type: ignore
def ConvertForLoopsToSerial():
"""Convert Parallel For Loops to Serial For Loops.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ConvertForLoopsToSerial() # type: ignore
def InjectSoftwarePipeline():
"""Transform annotated loops into pipelined one that parallelize producers and consumers
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectSoftwarePipeline() # type: ignore
def ExtractPrimFuncConstants():
"""Collects and unificates tir non-scalar constants to module's attr 'Constants' array.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ExtractPrimFuncConstants() # type: ignore
def RenormalizeSplitPattern():
"""Renormalize the split pattern from floordiv(floormod()) to floormod(floordiv())
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RenormalizeSplitPattern() # type: ignore
def BindTarget(target):
"""Annotate a PrimFunc with a given target.
Parameters
-------
target : tvm.target.Target
target
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.BindTarget(target) # type: ignore
def AnnotateEntryFunc():
"""Set a PrimFunc as the entry point if it is only function in IRModule.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.AnnotateEntryFunc() # type: ignore
def Filter(fcond: Callable):
"""Filter out PrimFuncs that does not satisfy the given condition.
`fcond` should be a function that takes a primfunc and returns boolean.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.Filter(fcond) # type: ignore
def InjectPTXAsyncCopy():
"""Rewrite global to shared memory copy on CUDA with asyncronous copy.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InjectPTXAsyncCopy() # type: ignore
def RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=False):
"""Remove weight layout rewrite block before benchmarking during tuning stage.
Parameters
----------
skip_ndarray_rewrite : bool
If True, exact rewrite of NDArray, according to the given index map, will be skipped.
Only the shape of the NDArray is transformed correctly, and the content of the destination
array will be filled with random values.
When this pass is called many times during MetaSchedule tuning, the raw data of NDArray,
before and after rewrite, does not matter. Since NDArray layout rewrite, using IndexMap's
MapNDArray, is currently slow, skipping the exact rewrite is sometimes necessary.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite) # type: ignore
def ManifestSharedMemoryLocalStage():
"""Add the explicit local stage for the shared memory access on GPU.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.ManifestSharedMemoryLocalStage() # type: ignore
def InstrumentProfileIntrinsics():
"""Insert intrinsic calls to instrument function and loop level profiling.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.InstrumentProfileIntrinsics() # type: ignore
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from . import analysis
from . import transform
from .utils import BufferInfo
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/analysis/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from .analysis import extract_buffer_info
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/analysis/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp.analysis", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/analysis/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Analysis Python API for passes"""
# pylint: disable=invalid-name
from . import _ffi_api
from ...function import PrimFunc
from ....ir.module import IRModule
def extract_buffer_info(main_func: PrimFunc, mod: IRModule):
"""Convert Parallel For Loop to Serial.
Parameters
----------
main_func: tvm.tir.PrimFunc
The main function containing calls to operator PrimFuncs.
mod : tvm.ir.IRModule
The full IRModule containing all PrimFuncs
Returns
-------
Map<tir::Stmt, BufferInfo>
extracted buffer info objects
"""
return _ffi_api.extract_buffer_info(main_func, mod)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
"""Namespace for Unified Static Memory Planner"""
from .transform import convert_pool_allocations_to_offsets
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.tir.usmp.analysis"""
import tvm._ffi
tvm._ffi._init_api("tir.usmp.transform", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/transform/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Transform Python API for passes"""
# pylint: disable=invalid-name
from typing import Dict
import tvm
from tvm.tir import Stmt
from tvm.tir.usmp.utils import PoolAllocation
from . import _ffi_api
def convert_pool_allocations_to_offsets(
pool_allocations: Dict[Stmt, PoolAllocation], emit_tvmscript_printable: bool = False
) -> tvm.transform.Pass:
"""Convert pool allocations to Load nodes with offsets from pools.
Parameters
----------
pool_allocations : Dict[Stmt, PoolAllocation]
Allocate or AllocateConst node to pool allocation mapping
emit_tvmscript_printable : bool
A toggle to emit TVMScript printable IRModule for unit tests
removing all attributes that should be attached for integration
Returns
-------
ret: tvm.transform.Pass
The registered pass that converts the allocations to offsets.
"""
return _ffi_api.ConvertPoolAllocationsToOffsets(pool_allocations, emit_tvmscript_printable)
| https://github.com/zk-ml/tachikoma |
python/tvm/tir/usmp/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Utilities and Data Structures"""
# pylint: disable=invalid-name
from typing import Optional, List
import tvm
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
from ...ir.memory_pools import PoolInfo
# The allocate node attribute to indicate candidate memory pools.
# This needs to be kept in sync with CANDIDATE_MEMORY_POOL_ATTR in
# include/tvm/tir/usmp/utils.h
CANDIDATE_MEMORY_POOL_ATTR = "candidate_memory_pools"
def use_workspace_io_is_enabled() -> bool:
"""
Check whether placing I/O tensors in the workspace is enabled.
"""
ctx = tvm.transform.PassContext.current()
return bool(ctx.config.get("tir.usmp.use_workspace_io", False))
@register_object("tir.usmp.BufferInfo")
class BufferInfo(Object):
"""BufferInfo object holds information related to buffers
that are associated with tir.allocates and tir.allocate_consts
that will be used with USMP
Parameters
----------
name_hint : str
The name associated with the buffer (derived from TIR)
size_bytes : int
The size in bytes
pool_candidates : List[PoolInfo]
The list of candidates pools this buffer could be placed
alignment : Optional[int]
The byte alignment required in the workspace memory
"""
def __init__(
self,
name_hint: str,
size_bytes: int,
pool_candidates: List[PoolInfo],
alignment: Optional[int] = None,
):
self.__init_handle_by_constructor__(
_ffi_api.BufferInfo, # type: ignore # pylint: disable=no-member
name_hint,
size_bytes,
pool_candidates,
alignment,
)
def set_conflicts(self, conflicts: list):
"""Sets the conflicting array of buffer info objects"""
_ffi_api.BufferInfoSetConflicts(self, conflicts)
@register_object("tir.usmp.PoolAllocation")
class PoolAllocation(Object):
"""PoolAllocation object holds information related to an allocation
that indicates an offset in a pool
Parameters
----------
pool_info : PoolInfo
The PoolInfo to which this allocation corresponds to
byte_offset : int
The offset in the pool where the allocate node should be placed
"""
def __init__(self, pool_info: PoolInfo, byte_offset: int):
self.__init_handle_by_constructor__(
_ffi_api.PoolAllocation, # type: ignore # pylint: disable=no-member
pool_info,
byte_offset,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""TVM Operator Inventory.
TOPI is the operator collection library for TVM, to provide sugars
for constructing compute declaration as well as optimized schedules.
Some of the schedule function may have been specially optimized for a
specific workload.
"""
from tvm._ffi.libinfo import __version__
# Ensure C++ schedules get registered first, so python schedules can
# override them.
from . import cpp
from .math import *
from .tensor import *
from .generic_op_impl import *
from .reduction import *
from .transform import *
from .broadcast import *
from .sort import *
from .scatter import *
from .sparse_fill_empty_rows import *
from .sparse_reshape import *
from .scatter_add import *
from .argwhere import *
from .scan import *
from .einsum import *
from .unique import *
from .searchsorted import *
from .stft import *
from . import generic
from . import nn
from . import x86
from . import cuda
from . import gpu
from . import arm_cpu
from . import mali
from . import bifrost
from . import intel_graphics
from . import utils
from . import rocm
from . import vision
from . import image
from . import sparse
from . import hls
from . import random
from . import hexagon
from . import adreno
# error reporting
from .utils import InvalidShapeError
# not import testing by default
# because testing can have extra deps that are not necessary
# we can import them from test cases explicitly
# from . import testing
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""Qualcomm Adreno GPU specific declaration and schedules."""
from .conv2d_nchw import *
from .depthwise_conv2d_nchw import *
from .conv2d_nhwc import *
from .depthwise_conv2d_nhwc import *
from .pooling import *
from .conv2d_alter_op import *
from .conv2d_nchw_winograd import *
from .conv2d_nhwc_winograd import *
from .injective import schedule_injective
from .reduction import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op for Qualcomm Adreno GPU"""
import logging
import re
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from ..utils import get_const_tuple
from .utils import infer_tile_size
from ..nn import conv2d_alter_layout
logger = logging.getLogger("topi")
# Number of wildcards for matching of supported layouts to be transformed
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWo_matcher = re.compile("^OIHW[0-9]+o$")
_NHWCc_matcher = re.compile("^NHWC[0-9]+c$")
_HWIOo_matcher = re.compile("^HWIO[0-9]+o$")
_HWOIo_matcher = re.compile("^HWOI[0-9]+o$")
@conv2d_alter_layout.register("adreno")
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
"""
Prepare of the new conv2d with proper target blocked layout attributes
OpenCL Textures supports 1d/2d/3d/4d tetures but read happens always only for 4 elements
in a line. Thus way we are supporting for now only 4d conversions on the end
NCHW -> NCHW4c & OIHW ->OIHW4o
NHWC -> NHWC4c & HWIO -> HWIO4o & HWOI -> HWOI4o
"""
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
new_attrs = {k: attrs[k] for k in attrs.keys()}
# Parse the attributes.
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
out_dtype = out_type.dtype
if isinstance(dispatch_ctx, autotvm.task.ApplyGraphBest):
cfg = dispatch_ctx.query(target, None)
workload = cfg.workload
else:
impl, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
if impl.name.find("winograd") != -1:
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
assert (data_layout == "NCHW" and kernel_layout == "OIHW") or (
data_layout == "NHWC" and kernel_layout == "HWIO"
)
if data_layout == "NCHW":
N, CI, H, W = get_const_tuple(data_tensor.shape)
CO, _, KH, KW = get_const_tuple(kernel_tensor.shape)
weight = inputs[1]
else:
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "HWIO", "OIHW")
# Pre-compute weight transformation in winograd
tile_size = infer_tile_size(data_tensor, data_layout)
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(
weight, tile_size=tile_size
)
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
if "conv2d_nchw_winograd" in topi_tmpl:
suffix = "_acc32" if "acc32" in topi_tmpl else ""
wkl_name = "conv2d_nchw_winograd_without_weight_transform" + suffix + ".image2d"
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
tile_size = infer_tile_size(data_tensor, data_layout)
if len(data_tensor.shape) == 5:
assert data_layout == "NCHW4c" and kernel_layout == "OIHW4o"
N, CI, H, W, CB = get_const_tuple(data_tensor.shape)
CO, _, KH, KW, COB = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "OIHW4o", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.layout_transform(weight, "HWOI", "HWIO4o")
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO * COB
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI * CB, CO, COB),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data_tensor.shape)
CO, _, KH, KW = get_const_tuple(kernel_tensor.shape)
# pre-compute weight transformation in winograd
# alpha, alpha, CO, CI
weight = relay.nn.contrib_conv2d_winograd_weight_transform(inputs[1], tile_size=tile_size)
weight = relay.transpose(weight, axes=[2, 3, 0, 1]) # HWOI -> OIHW
# (oc, ic, h, w) -> (h, w, ic, oc)
new_attrs["kernel_layout"] = "HWIO"
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel_tensor.dtype
)
in_channel_block = CI % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = CO % 4
if num_filter_block == 0:
num_filter_block = 4
if in_channel_block != 4 or num_filter_block != 4:
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
new_attrs["data_layout"] = "NCHW%dc" % in_channel_block
# (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4)
new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block
new_attrs["out_layout"] = "NCHW%dc" % num_filter_block
# Store altered operator's config
new_data = te.placeholder(
(N, CI // in_channel_block, H, W, in_channel_block), dtype=data_dtype
)
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO // num_filter_block, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_weight,
strides,
padding,
dilation,
out_dtype,
],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if "conv2d_nhwc_winograd" in topi_tmpl:
suffix = "_acc32" if "acc32" in topi_tmpl else ""
wkl_name = "conv2d_nhwc_winograd_without_weight_transform" + suffix + ".image2d"
if dilation != (1, 1):
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None
tile_size = infer_tile_size(data_tensor, data_layout)
if len(data_tensor.shape) == 5:
assert data_layout == "NHWC4c" and kernel_layout == "HWIO4o"
N, CI, H, W, CB = get_const_tuple(data_tensor.shape)
KH, KW, _, CO, COB = get_const_tuple(kernel_tensor.shape)
weight = relay.layout_transform(inputs[1], "HWIO4o", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.layout_transform(weight, "HWOI", "HWIO4o")
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO * COB
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI * CB, CO, COB),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
assert data_layout == "NHWC" and kernel_layout == "HWIO"
N, H, W, CI = get_const_tuple(data_tensor.shape)
KH, KW, _, CO = get_const_tuple(kernel_tensor.shape)
# pre-compute weight transformation in winograd
weight = relay.layout_transform(inputs[1], "HWIO", "OIHW")
weight = relay.nn.contrib_conv2d_winograd_weight_transform(weight, tile_size=tile_size)
weight = relay.transpose(weight, axes=[0, 1, 3, 2]) # HWOI -> HWIO
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
# Store the same config for the altered operator (workload)
new_data = data_tensor
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO), dtype=kernel_tensor.dtype
)
in_channel_block = CI % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = CO % 4
if num_filter_block == 0:
num_filter_block = 4
if in_channel_block != 4 or num_filter_block != 4:
new_workload = autotvm.task.args_to_workload(
[new_data, new_weight, strides, padding, dilation, out_dtype],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
new_attrs["data_layout"] = "NHWC%dc" % in_channel_block
# (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4)
new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block
new_attrs["out_layout"] = "NHWC%dc" % num_filter_block
# Store altered operator's config
new_data = te.placeholder(
(N, H, W, CI // in_channel_block, in_channel_block), dtype=data_dtype
)
new_weight = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, CI, CO // num_filter_block, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_weight,
strides,
padding,
dilation,
out_dtype,
],
wkl_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight, **new_attrs
)
if "conv2d_nchwc" in topi_tmpl: # covers both conv2d_nchwc and depthwise_conv2d_nchwc
if data_layout == "NCHW" and kernel_layout == "OIHW":
batch, in_channels, in_height, in_width = data_tensor.shape
out_channles, _, kernel_h, kernel_w = kernel_tensor.shape
in_channel_block = in_channels % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = out_channles % 4
if num_filter_block == 0:
num_filter_block = 4
# no support yet for tensors that cannot be divisible by factor 4
if num_filter_block != 4:
return None
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, in_filter_channel, kh, kw = get_const_tuple(kernel_tensor.shape)
# update new attrs
new_attrs["channels"] = out_channel
if in_channel_block == 4:
new_attrs["data_layout"] = "NCHW%dc" % in_channel_block
else:
new_attrs["data_layout"] = "NCHW"
# (oc, ic, h, w) -> (OC, ic, h, w, oc)
new_attrs["kernel_layout"] = "OIHW%do" % num_filter_block
new_attrs["out_layout"] = "NCHW%dc" % num_filter_block
# Store altered operator's config for applying of tuned AutoTVM statistics
if in_channel_block == 4:
new_data = te.placeholder(
(batch_size, in_channel // in_channel_block, height, width, in_channel_block),
dtype=data_dtype,
)
else:
new_data = data_tensor
new_kernel = te.placeholder(
(out_channel // num_filter_block, in_filter_channel, kh, kw, num_filter_block),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
out_dtype,
],
topi_tmpl, # "conv2d_nchwc.image2d",
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NCHWc_matcher.match(data_layout)
assert _OIHWo_matcher.match(kernel_layout)
return relay.nn.conv2d(*inputs, **new_attrs)
if "conv2d_nhwc" in topi_tmpl: # covers both conv2d_nhwcc and depthwise_conv2d_nhwcc
if (data_layout == "NHWC" and kernel_layout == "HWIO") or (
data_layout == "NHWC" and kernel_layout == "HWOI"
):
if kernel_layout == "HWIO":
batch_size, in_height, in_width, in_channels = data_tensor.shape
kernel_h, kernel_w, in_filter_channel, out_channles = kernel_tensor.shape
else:
batch_size, in_height, in_width, in_channels = data_tensor.shape
kernel_h, kernel_w, out_channles, in_filter_channel = kernel_tensor.shape
in_channel_block = in_channels % 4
if in_channel_block == 0:
in_channel_block = 4
num_filter_block = out_channles % 4
if num_filter_block == 0:
num_filter_block = 4
# no support yet for tensors cannot be divisible by factor 4
if num_filter_block != 4:
return None
# update new attrs
new_attrs["channels"] = out_channles
if in_channel_block == 4:
new_attrs["data_layout"] = "NHWC%dc" % in_channel_block
else:
new_attrs["data_layout"] = "NHWC"
# (h, w, ic, oc) -> (h, w, ic, OC, oc)
if kernel_layout == "HWIO":
new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block
else:
new_attrs["kernel_layout"] = "HWOI%do" % num_filter_block
new_attrs["out_layout"] = "NHWC%dc" % num_filter_block
# Store altered operator's config for applying of tuned AutoTVM statistics
if in_channel_block == 4:
new_data = te.placeholder(
(
batch_size,
in_height,
in_width,
in_channels // in_channel_block,
in_channel_block,
),
dtype=data_dtype,
)
else:
new_data = data_tensor
if kernel_layout == "HWIO":
new_kernel = te.placeholder(
(
kernel_h,
kernel_w,
in_filter_channel,
out_channles // num_filter_block,
num_filter_block,
),
dtype=kernel_tensor.dtype,
)
else:
new_kernel = te.placeholder(
(
kernel_h,
kernel_w,
out_channles // num_filter_block,
in_filter_channel,
num_filter_block,
),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
else:
assert _NHWCc_matcher.match(data_layout)
assert _HWIOo_matcher.match(kernel_layout) or _HWOIo_matcher.match(kernel_layout)
return relay.nn.conv2d(*inputs, **new_attrs)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d nchw schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_default_conv2d_config,
get_texture_storage,
)
@autotvm.register_topi_schedule("conv2d_nchwc.image2d")
def schedule_conv2d_nchwc(cfg, outs):
"""Create the schedule for conv2d_nchw"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_conv2d_latest_op":
schedule_conv2d_NCHWc_KCRSk(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchwc.image2d")
def conv2d_nchwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_channels, in_height, in_width = Input.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_channel_chunks, in_height, in_width, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
else:
Input = pack_input(
Input,
"NCHW",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
else:
batch, in_channel_chunks, in_height, in_width, in_channel_block = Input.shape
if len(Filter.shape) == 4:
out_channles, in_filter_channels, kernel_h, kernel_w = Filter.shape
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
kshape = (out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Filter = pack_filter(
Filter,
"OIHW",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NCHW",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
rcc = te.reduce_axis((0, in_channel_chunks), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_channel_chunks, out_height, out_width, out_channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, rcc * in_channel_block + rcb, ry, rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_channles, out_height_orig, out_width_orig),
lambda n, c, y, x: dummy_cast[n, c // out_channel_block, y, x, c % out_channel_block],
tag="adreno_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype(out_dtype),
tag="adreno_conv2d_latest_op",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
4. pad should be scheduled separately to create independent opencl kernel. If pad is
inlined into convolution, this gives 1.5x performance drop
5. We are using cache_read for intermediate tensors to produce texture and guarantee
the best performance on the next stage.
The weights are managed through static texture planning mechanism and guarantied come
in texture memory scope.
Thus way we are calling cache_read only for data tensor
6. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
7. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
pad_data, kernel = s[conv].op.input_tensors
filter_pack_rt = bool(
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
)
if "pad_temp" in pad_data.op.name:
input_pad_temp = pad_data.op.input_tensors[0]
else:
input_pad_temp = pad_data
input_pack_rt = bool(
isinstance(input_pad_temp.op, tvm.te.ComputeOp) and "input_pack" in input_pad_temp.op.tag
)
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
if conv.shape[1] % 2 == 0:
min_threads_div = 2
else:
min_threads_div = 1
cfg.define_split(
"tile_fc",
fc,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8
and entity.size[2] >= min_threads_div
and entity.size[2] < 256,
)
cfg.define_split(
"tile_y",
y,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 24
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[1], conv.shape[2], conv.shape[3])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
# There are several conditions that have to be handled:
# 1. If we are in the tuning, we always add cache read for data to main conv kernel
# to get texture in tuning opencl kernel
# 2. If we are repacking input in runtime, we should always explicit schedule this one more
# stage of data copy from 4d to 5d (referred as pack_data).
# 3. If we have pad (independently if we have runtime repack or not) we should inline it in the
# cache_read("texture")
if autotvm.GLOBAL_SCOPE.in_tuning or input_pack_rt:
if autotvm.GLOBAL_SCOPE.in_tuning:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
else:
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
s[pad_data].compute_inline()
else:
pack_data = pad_data
bind_data_copy(s[pack_data])
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
elif "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
# create cache stage
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if autotvm.GLOBAL_SCOPE.in_tuning or filter_pack_rt:
if not autotvm.GLOBAL_SCOPE.in_tuning:
bind_data_copy(s[kernel])
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
# tile and bind spatial axes
n, fc, y, x, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
bf = s[latest_blocked].fuse(n, bf)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, conv, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[conv].vectorize(fb)
s[conv].unroll(rcb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OCC, OH, OW, OCB = get_const_tuple(latest_blocked.shape)
_, IC, KH, KW, _ = get_const_tuple(kernel.shape)
ICKHKW = IC * KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_nchw_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd NCHW template for Adreno backend"""
import logging
from tvm import autotvm
from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl
logger = logging.getLogger("conv2d_nchw_winograd")
@autotvm.register_topi_compute("conv2d_nchw_winograd.image2d")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.image2d")
def schedule_conv2d_nchw_winograd(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at")
@autotvm.register_topi_compute("conv2d_nchw_winograd_without_weight_transform.image2d")
def conv2d_nchw_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nchw_winograd_without_weight_transform.image2d")
def schedule_conv2d_nchw_winograd_without_weight_transform(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at", pre_computed=True)
def conv2d_nchw_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
return conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, "NCHW"
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d nhwc schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("conv2d_nhwc.image2d")
def schedule_conv2d_nhwc(cfg, outs):
"""Create the schedule for conv2d_nhwc"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_conv2d_latest_op":
schedule_conv2d_NHWC(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc.image2d")
def conv2d_nhwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Convolution operator in NHWC layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_height, in_width, in_channels = Input.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_height, in_width, in_channel_chunks, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
else:
Input = pack_input(
Input,
"NHWC",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
else:
batch, in_height, in_width, in_channel_chunks, in_channel_block = Input.shape
if len(Filter.shape) == 4:
kernel_h, kernel_w, in_filter_channels, out_channles = Filter.shape
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
kshape = (kernel_h, kernel_w, in_filter_channels, out_channel_chunks, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Filter = pack_filter(
Filter,
"HWIO",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
kernel_h, kernel_w, in_filter_channels, out_channel_chunks, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NHWC",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
rcc = te.reduce_axis((0, in_channel_chunks), name="rcc")
rcb = te.reduce_axis((0, in_channel_block), name="rcb")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_height, out_width, out_channel_chunks, out_channel_block),
lambda nn, yy, xx, fc, fb: te.sum(
(
temp[nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcc, rcb]
* Filter[ry, rx, rcc * in_channel_block + rcb, fc, fb]
).astype(out_dtype),
axis=[ry, rx, rcc, rcb],
),
tag="conv2d_nhwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, fc, fb: conv[n, y, x, fc, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_height_orig, out_width_orig, out_channles),
lambda n, y, x, c: dummy_cast[n, y, x, c // out_channel_block, c % out_channel_block],
tag="adreno_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, ffc, ffb: conv[n, y, x, ffc, ffb].astype(out_dtype),
tag="adreno_conv2d_latest_op",
)
def schedule_conv2d_NHWC(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
4. pad should be scheduled separately to create independent opencl kernel. If pad is
inlined into convolution, this gives 1.5x performance drop
5. We are using cache_read for intermediate tensors to produce texture and guarantee
the best performance on the next stage.
The weights are managed through static texture planning mechanism and guarantied come
in texture memory scope.
Thus way we are calling cache_read only for data tensor
6. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
7. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
pad_data, kernel = s[conv].op.input_tensors
filter_pack_rt = bool(
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
)
if "pad_temp" in pad_data.op.name:
input_pad_temp = pad_data.op.input_tensors[0]
else:
input_pad_temp = pad_data
input_pack_rt = bool(
isinstance(input_pad_temp.op, tvm.te.ComputeOp) and "input_pack" in input_pad_temp.op.tag
)
##### space definition begin #####
n, y, x, fc, fb = s[conv].op.axis
ry, rx, rcc, rcb = s[conv].op.reduce_axis
if conv.shape[3] % 2 == 0:
min_threads_div = 2
else:
min_threads_div = 1
cfg.define_split(
"tile_fc",
fc,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8
and entity.size[2] >= min_threads_div
and entity.size[2] < 256,
)
cfg.define_split(
"tile_y",
y,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entity: entity.size[1] <= 8 and entity.size[2] <= 16,
)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 24
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[3], conv.shape[1], conv.shape[2])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
# There are several conditions that have to be handled:
# 1. If we are in the tuning, we always add cache read for data to main conv kernel
# to get texture in tuning opencl kernel
# 2. If we are repacking input in runtime, we should always explicit schedule this one more
# stage of data copy from 4d to 5d (referred as pack_data).
# 3. If we have pad (independently if we have runtime repack or not) we should inline it in the
# cache_read("texture")
if autotvm.GLOBAL_SCOPE.in_tuning or input_pack_rt:
if autotvm.GLOBAL_SCOPE.in_tuning:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
else:
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
pack_data = pad_data
bind_data_copy(s[pack_data])
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
elif "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
# create cache stage
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
if autotvm.GLOBAL_SCOPE.in_tuning or filter_pack_rt:
if not autotvm.GLOBAL_SCOPE.in_tuning:
bind_data_copy(s[kernel])
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
# tile and bind spatial axes
n, y, x, fc, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
by = s[latest_blocked].fuse(n, by)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, y, x, fc, fb = s[conv].op.axis
ry, rx, rcc, rcb = s[conv].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, conv, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[conv].vectorize(fb)
s[conv].unroll(rcb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OH, OW, OCC, OCB = get_const_tuple(latest_blocked.shape)
KH, KW, IC, _, _ = get_const_tuple(kernel.shape)
ICKHKW = IC * KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd NHWC template for Adreno backend"""
import logging
from tvm import autotvm
from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl
logger = logging.getLogger("conv2d_nhwc_winograd")
@autotvm.register_topi_compute("conv2d_nhwc_winograd.image2d")
def conv2d_nhwc_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
return conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=False
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd.image2d")
def schedule_conv2d_nhwc_winograd(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at")
@autotvm.register_topi_compute("conv2d_nhwc_winograd_without_weight_transform.image2d")
def conv2d_nhwc_winograd_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
return conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed=True
)
@autotvm.register_topi_schedule("conv2d_nhwc_winograd_without_weight_transform.image2d")
def schedule_conv2d_nhwc_winograd_without_weight_transform(cfg, outs):
return schedule_conv2d_winograd_impl(cfg, outs, tag="dummy_compute_at", pre_computed=True)
def conv2d_nhwc_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
return conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, "NHWC"
)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/conv2d_winograd_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Common Winograd implementation for Adreno backend"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi import nn
from tvm.topi.utils import get_const_int, get_const_tuple, traverse_inline
from ..nn.winograd_util import winograd_transform_matrices
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
bind_data_copy,
get_texture_storage,
infer_tile_size,
)
def conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, pre_computed, layout
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
layout: str
NHWC or NCHW values are accepted
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
assert layout in ("NCHW", "NHWC")
tile_size = infer_tile_size(data, layout)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
convert_from4d = False
if len(data.shape) == 4:
convert_from4d = True
if layout == "NCHW":
N, DCI, H, W = get_const_tuple(data.shape)
else:
N, H, W, DCI = get_const_tuple(data.shape)
if not pre_computed:
if layout == "NCHW":
out_channels, CI, KH, KW = get_const_tuple(kernel.shape)
else:
KH, KW, CI, out_channels = get_const_tuple(kernel.shape)
else:
alpha, _, CI, out_channels = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(CI, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning is True:
if layout == "NCHW":
dshape = (N, in_channel_chunks, H, W, in_channel_block)
else:
dshape = (N, H, W, in_channel_chunks, in_channel_block)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
kshape = (out_channel_chunks, CI, KH, KW, out_channel_block)
else:
kshape = (KH, KW, CI, out_channel_chunks, out_channel_block)
else:
kshape = (alpha, alpha, CI, out_channel_chunks, out_channel_block)
data = tvm.te.placeholder(dshape, data.dtype, name="data_placeholder")
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel_placeholder")
else:
data = pack_input(
data, layout, N, in_channel_chunks, in_channel_block, in_channel_tail, H, W
)
kernel_layout = "OIHW" if layout == "NCHW" else "HWIO"
if not pre_computed: # kernel tensor is raw tensor, do strict check
kernel = pack_filter(
kernel,
kernel_layout,
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
KH,
KW,
)
else:
kernel = pack_filter(
kernel,
"HWIO",
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
alpha,
alpha,
)
if layout == "NCHW":
N, DCI, H, W, CB = get_const_tuple(data.shape)
else:
N, H, W, DCI, CB = get_const_tuple(data.shape)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
CO, CI, KH, KW, COB = get_const_tuple(kernel.shape)
else:
KH, KW, CI, CO, COB = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
alpha, _, CI, CO, COB = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"adreno winograd conv2d doesn't support dynamic input\
height or width."
)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
if layout == "NCHW":
data_pad = nn.pad(data, (0, 0, pt, pl, 0), (0, 0, pb, pr, 0), name="data_pad")
else:
data_pad = nn.pad(data, (0, pt, pl, 0, 0), (0, pb, pr, 0, 0), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, data.dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
if layout == "NCHW":
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[co][ci][r_kh][r_kw][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[r_kh][r_kw][ci][co][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if layout == "NCHW":
N, CI, _, _, CB = get_const_tuple(data.shape)
else:
N, _, _, CI, CB = get_const_tuple(data.shape)
# pack input tile
if layout == "NCHW":
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][c][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][cb],
name="d",
)
else:
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][c][cb],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
data_pack = te.compute(
(P, CI, alpha, alpha, CB),
lambda p, ci, eps, nu, cb: te.sum(
input_tile[r_a][r_b][ci][p][cb] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# repack transformed data
data_pack_trans = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pack[p][c][eps][nu][cb],
name="data_pack_trans",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
cb = te.reduce_axis((0, CB), name="cb")
bgemm = te.compute(
(alpha, alpha, CO, P, COB),
lambda eps, nu, co, p, cob: te.sum(
(
kernel_pack[eps][nu][ci * CB + cb][co][cob] * data_pack_trans[eps][nu][ci][p][cb]
).astype(out_dtype),
axis=[ci, cb],
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
inverse = te.compute(
(CO, P, m, m, COB),
lambda co, p, vh, vw, cob: te.sum(
bgemm[r_a][r_b][co][p][cob] * (A[r_a][vh] * A[r_b][vw]).astype(out_dtype),
axis=[r_a, r_b],
),
name="inverse",
)
# output
if layout == "NCHW":
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, out_channels, H, W),
lambda n, c, h, w: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
output = te.compute(
(N, CO, H, W, COB),
lambda n, co, h, w, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, H, W, out_channels),
lambda n, h, w, c: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
else:
output = te.compute(
(N, H, W, CO, COB),
lambda n, h, w, co, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="dummy_compute_at",
)
if isinstance(N, int):
cfg.add_flop(2 * N * CO * COB * H * W * CI * CB * KH * KW)
return output
def schedule_conv2d_winograd_impl(cfg, outs, tag, pre_computed=False):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == tag:
schedule_conv2d_winograd(cfg, s, op.output(0), pre_computed=pre_computed)
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_conv2d_winograd(cfg, s, output, pre_computed):
"""Schedule winograd template"""
inverse = s[output].op.input_tensors[0]
bgemm, A = s[inverse].op.input_tensors
kernel_pack, data_pack_trans = s[bgemm].op.input_tensors
data_pack = s[data_pack_trans].op.input_tensors[0]
input_tile, B = s[data_pack].op.input_tensors
pad_data = s[input_tile].op.input_tensors[0]
# data transform
s[B].compute_inline()
s[A].compute_inline()
# probably will improve real topology execution
if autotvm.GLOBAL_SCOPE.in_tuning:
# Padding to texture
AA = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [input_tile])
bind_data_copy(s[AA])
s[input_tile].compute_inline()
OL = s.cache_write(data_pack, "local")
c, p, eps, nu, cb = s[data_pack].op.axis
fused = s[data_pack].fuse(c, p, eps, nu)
bx, tx = s[data_pack].split(fused, 128)
s[data_pack].vectorize(cb)
s[data_pack].bind(bx, te.thread_axis("blockIdx.x"))
s[data_pack].bind(tx, te.thread_axis("threadIdx.x"))
_, _, eps, nu, cb = s[OL].op.axis
r_a, r_b = s[OL].op.reduce_axis
s[OL].unroll(eps)
s[OL].unroll(nu)
s[OL].unroll(r_a)
s[OL].unroll(r_b)
s[OL].vectorize(cb)
s[OL].compute_at(s[data_pack], tx)
s[data_pack].set_scope(get_texture_storage(data_pack.shape))
s[data_pack_trans].compute_inline()
# transform kernel
if not pre_computed:
kernel, G = s[kernel_pack].op.input_tensors
eps, nu, ci, co, cob = s[kernel_pack].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# skip this part during tuning to make recrods accurate
# this part will be pre-computed during pre-compute optimization pass
s[G].pragma(s[G].op.axis[0], "debug_skip_region")
s[kernel_pack].pragma(eps, "debug_skip_region")
else:
s[G].compute_inline()
r_a, r_b = s[kernel_pack].op.reduce_axis
for axis in [eps, nu, r_a, r_b]:
s[kernel_pack].unroll(axis)
fused = s[kernel_pack].fuse(ci, co)
bb, tt = s[kernel_pack].split(fused, 128)
s[kernel_pack].reorder(bb, tt, eps, nu, r_a, r_b, cob)
s[kernel_pack].vectorize(cob)
s[kernel_pack].bind(bb, te.thread_axis("blockIdx.x"))
s[kernel_pack].bind(tt, te.thread_axis("threadIdx.x"))
else:
kernel = kernel_pack
if isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag:
# manage scheduling of datacopy
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
bind_data_copy(s[kernel])
elif isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
s[pad_data].compute_inline()
##### space definition begin #####
cfg.define_knob("auto_unroll_max_step", [0, 4, 16])
b1, b2, y, x, cb = s[bgemm].op.axis
rcc = s[bgemm].op.reduce_axis[0]
alpha = get_const_int(b1.dom.extent)
cfg.define_split(
"tile_y", y, num_outputs=3, filter=lambda entry: entry.size[2] <= 64 and entry.size[1] <= 16
)
min_x_div = 1
for bn in range(4, 0, -1):
if bgemm.shape[3] % bn == 0:
min_x_div = bn
break
cfg.define_split(
"tile_x",
x,
num_outputs=3,
filter=lambda entry: entry.size[2] <= 64
and entry.size[1] >= min_x_div
and entry.size[1] <= 16,
)
cfg.define_split("tile_rc", rcc, num_outputs=2)
cfg.multi_filter(
filter=lambda entity: 32 <= (entity["tile_y"].size[2] * entity["tile_x"].size[2]) < 1024
)
##### space definition end #####
# batch gemm
OL = s.cache_write(bgemm, "local")
if (
autotvm.GLOBAL_SCOPE.in_tuning
or isinstance(kernel.op, tvm.te.ComputeOp)
and "filter_pack" in kernel.op.tag
):
BB = s.cache_read(kernel_pack, get_texture_storage(kernel_pack.shape), [OL])
bind_data_copy(s[BB])
by = s[bgemm].fuse(b1, b2, y)
# tile and bind spatial axes
bgemm_scope, by = s[bgemm].split(by, nparts=1)
by, vy, ty = cfg["tile_y"].apply(s, bgemm, by)
bx, vx, tx = cfg["tile_x"].apply(s, bgemm, x)
s[bgemm].bind(by, te.thread_axis("blockIdx.y"))
s[bgemm].bind(bx, te.thread_axis("blockIdx.x"))
s[bgemm].bind(vy, te.thread_axis("vthread"))
s[bgemm].bind(vx, te.thread_axis("vthread"))
s[bgemm].bind(ty, te.thread_axis("threadIdx.y"))
s[bgemm].bind(tx, te.thread_axis("threadIdx.x"))
s[bgemm].reorder(bgemm_scope, by, bx, vy, vx, ty, tx, cb)
s[bgemm].vectorize(cb)
s[bgemm].set_scope(get_texture_storage(bgemm.shape))
# tile reduction axes
s[OL].compute_at(s[bgemm], tx)
b1, b2, y, x, cb = s[OL].op.axis
(rcc, rcb) = s[OL].op.reduce_axis
b = s[OL].fuse(b1, b2)
s[OL].reorder(b, y, x, rcc, rcb, cb)
# s[OL].unroll(rcb)
s[OL].pragma(rcb, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[OL].pragma(rcb, "unroll_explicit", True)
s[OL].vectorize(cb)
# schedule inverse, output and fusion
if output.op in s.outputs:
OL = None
else:
OL = output
s[OL].set_scope("local")
output = s.outputs[0]
if len(s[output].op.axis) == 4:
n, co, h, w = s[output].op.axis
cb = None
else:
n, co, h, w, cb = s[output].op.axis
inverse_scope, n = s[output].split(n, nparts=1)
fused = s[output].fuse(n, co, h, w)
bb, tt = s[output].split(fused, 128)
if cb is not None:
s[output].reorder(bb, tt, cb)
s[output].vectorize(cb)
s[output].bind(bb, te.thread_axis("blockIdx.x"))
s[output].bind(tt, te.thread_axis("threadIdx.x"))
if OL is not None:
s[OL].compute_at(s[output], tt)
co, p, vh, vw, cb = s[inverse].op.axis
r_a, r_b = s[inverse].op.reduce_axis
for axis in [vh, vw, r_a, r_b]:
s[inverse].unroll(axis)
s[inverse].vectorize(cb)
s[inverse].compute_at(s[output], tt)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/depthwise_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""depthwise_conv2d_nchw(c) schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("depthwise_conv2d_nchwc.image2d")
def schedule_depthwise_conv2d_nchwc(cfg, outs):
"""Create the schedule for depthwise conv2d_nchw4c_ohwi4o"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_dw_conv2d_latest_op":
schedule_depthwise_conv2d_NCHWc_KCRSk(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nchwc.image2d")
def depthwise_conv2d_nchwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Depthwise convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_channels, in_height, in_width = Input.shape
out_channles, in_filter_channels, kernel_h, kernel_w = Filter.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_channel_chunks, in_height, in_width, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
kshape = (out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Input = pack_input(
Input,
"NCHW",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
Filter = pack_filter(
Filter,
"OIHW",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
batch, in_channel_chunks, in_height, in_width, in_channel_block = Input.shape
out_channel_chunks, in_filter_channels, kernel_h, kernel_w, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NCHW",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_channel_chunks, out_height, out_width, out_channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc // in_filter_channels,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[ffc // in_filter_channels, ffc % in_filter_channels, ry, rx, ffb]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_channles, out_height_orig, out_width_orig),
lambda n, c, y, x: dummy_cast[n, c // out_channel_block, y, x, c % out_channel_block],
tag="adreno_dw_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_channel_chunks, out_height_orig, out_width_orig, out_channel_block),
lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype(out_dtype),
tag="adreno_dw_conv2d_latest_op",
)
def schedule_depthwise_conv2d_NCHWc_KCRSk(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. For depthwise convolution it's better to inline pad into the conv2d compute, the
divergence in opencl kernel will not so significant as for regular conv2d.
3. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
4. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=3)
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 32
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[1], conv.shape[2], conv.shape[3])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
if (
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
): # len(latest.op.axis) == 4:
# manage scheduling of datacopy
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
bind_data_copy(s[pad_data])
bind_data_copy(s[kernel])
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
if autotvm.GLOBAL_SCOPE.in_tuning or len(latest.op.axis) == 4:
# create cache stage for tuning only or in case of 4d case
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
# tile and bind spatial axes
n, fc, y, x, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
bf = s[latest_blocked].fuse(n, bf)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[conv].vectorize(fb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OCC, OH, OW, OCB = get_const_tuple(latest_blocked.shape)
_, _, KH, KW, ICB = get_const_tuple(kernel.shape)
KHKW = KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW * ICB)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/depthwise_conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""depthwise_conv2d_nhwc(c) schedule on Qualcomm Adreno GPU"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple, traverse_inline
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
expand_spatial_dimensions,
add_pad,
bind_data_copy,
get_texture_storage,
get_default_conv2d_config,
)
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.image2d")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Create the schedule for depthwise conv2d_nchw4c_ohwi4o"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "adreno_dw_conv2d_latest_op":
schedule_depthwise_conv2d_NHWC_HWOI(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.image2d")
def depthwise_conv2d_nhwc(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""
Depthwise convolution operator in NCHWc layout.
Algo:
1. Convert into blocked format if we have 4d original tensor.
In case of AutoTVM we override the convert by just tensors since such conversion
will be absent for real blocked convolution, no sense to include into tuning
2. Expand spatial dimensions to have width and height be dividable by factor 4
This leads to slightly bigger amount of compute but allow utilize GPU much better
3. Add paddings. This happens even if we do not need pad originaly. This is useful
due to work arounding of the gaps of texture annotation between Primary Functions
and limited support of textures in schedules. Later on this pad will be executed
separately and will produce texture
4. 5d Convolution compute with accumulating into out_dtype
5. Cast to the origin output data type
6. For case of 4d convolution: convert of output from 5d to 4d
"""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
convert_from4d = False
if len(Input.shape) == 4:
batch, in_height, in_width, in_channels = Input.shape
kernel_h, kernel_w, out_channles, in_filter_channels = Filter.shape
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(in_channels, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channles, 4)
if autotvm.GLOBAL_SCOPE.in_tuning:
dshape = (batch, in_height, in_width, in_channel_chunks, in_channel_block)
Input = tvm.te.placeholder(dshape, Input.dtype, name="data_placeholder")
kshape = (kernel_h, kernel_w, out_channel_block, in_filter_channels, out_channel_chunks)
Filter = tvm.te.placeholder(kshape, Filter.dtype, name="kernel_placeholder")
else:
convert_from4d = True
Input = pack_input(
Input,
"NHWC",
batch,
in_channel_chunks,
in_channel_block,
in_channel_tail,
in_height,
in_width,
)
Filter = pack_filter(
Filter,
"HWOI",
out_channel_chunks,
out_channel_block,
out_channel_tail,
in_filter_channels,
in_channel_chunks,
in_channel_block,
in_channel_tail,
kernel_h,
kernel_w,
)
else:
batch, in_height, in_width, in_channel_chunks, in_channel_block = Input.shape
kernel_h, kernel_w, out_channel_chunks, in_filter_channels, out_channel_block = Filter.shape
out_height_orig, out_height, out_width_orig, out_width = expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
)
temp = add_pad(
Input,
"NHWC",
out_height_orig,
out_width_orig,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
)
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
conv = te.compute(
(batch, out_height, out_width, out_channel_chunks, out_channel_block),
lambda nn, yy, xx, ffc, ffb: te.sum(
(
temp[nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, ffc, ffb]
* Filter[ry, rx, ffc, 0, ffb]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nhwc",
)
if convert_from4d and not autotvm.GLOBAL_SCOPE.in_tuning:
dummy_cast = te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, fc, fb: conv[n, y, x, fc, fb].astype(out_dtype),
tag="dummy_cast",
)
return te.compute(
(batch, out_height_orig, out_width_orig, out_channles),
lambda n, y, x, c: dummy_cast[n, y, x, c // out_channel_block, c % out_channel_block],
tag="adreno_dw_conv2d_latest_op",
)
else:
return te.compute(
(batch, out_height_orig, out_width_orig, out_channel_chunks, out_channel_block),
lambda n, y, x, ffc, ffb: conv[n, y, x, ffc, ffb].astype(out_dtype),
tag="adreno_dw_conv2d_latest_op",
)
def schedule_depthwise_conv2d_NHWC_HWOI(cfg, s, output):
"""
schedule optimized for batch size = 1
Algo:
1. Split output axis to three parts: global work size, vthread, local worksize.
The limitations for tuning includes heuristics from some tuned networks to limit
search space and not pay much time for useles configurations.
2. In case of 4d convolution schedule copying of the input (and filter) into
5d tensors
3. For depthwise convolution it's better to inline pad into the conv2d compute, the
divergence in opencl kernel will not so significant as for regular conv2d.
4. For 5d convolution we schedule the latest op with binding 5d axis and vectorize
for textures
For 4d tensor we are doing the same for the latest blocked stage, i.e. conversion
of data type
5. In case of 4d conv we need to schedule postops as well
"""
latest = s.outputs[0].output(0)
if len(latest.op.axis) == 4:
latest_blocked = dummy = output.op.input_tensors[0]
conv = dummy.op.input_tensors[0]
else:
conv = output.op.input_tensors[0]
latest_blocked = latest
##### space definition begin #####
n, y, x, fc, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=3)
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
cfg.define_knob("unroll_explicit", [0, 1])
cfg.multi_filter(
filter=lambda entity: ( # pylint: disable=chained-comparison
entity["tile_fc"].size[1] * entity["tile_y"].size[1] * entity["tile_x"].size[1]
)
<= 32
and 32
<= (entity["tile_fc"].size[2] * entity["tile_y"].size[2] * entity["tile_x"].size[2])
< 1024
)
if cfg.is_fallback:
get_default_conv2d_config(cfg, conv.shape[3], conv.shape[1], conv.shape[2])
##### space definition end #####
pad_data, kernel = s[conv].op.input_tensors
if (
isinstance(kernel.op, tvm.te.ComputeOp) and "filter_pack" in kernel.op.tag
): # len(latest.op.axis) == 4:
# manage scheduling of datacopy
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
pack_data = pad_data.op.input_tensors[0]
bind_data_copy(s[pack_data])
else:
bind_data_copy(s[pad_data])
bind_data_copy(s[kernel])
pad_data, kernel = s[conv].op.input_tensors
if "pad_temp" in pad_data.op.name:
s[pad_data].compute_inline()
s[conv].set_scope("local")
if latest_blocked == latest and output != latest:
s[output].compute_inline()
if autotvm.GLOBAL_SCOPE.in_tuning or len(latest.op.axis) == 4:
# create cache stage for tuning only or in case of 4d case
AT = s.cache_read(pad_data, get_texture_storage(pad_data.shape), [conv])
bind_data_copy(s[AT])
WT = s.cache_read(kernel, get_texture_storage(kernel.shape), [conv])
bind_data_copy(s[WT])
# tile and bind spatial axes
n, y, x, fc, fb = s[latest_blocked].op.axis
kernel_scope, n = s[latest_blocked].split(n, nparts=1)
bf, vf, tf = cfg["tile_fc"].apply(s, latest_blocked, fc)
by, vy, ty = cfg["tile_y"].apply(s, latest_blocked, y)
bx, vx, tx = cfg["tile_x"].apply(s, latest_blocked, x)
by = s[latest_blocked].fuse(n, by)
s[latest_blocked].bind(bf, te.thread_axis("blockIdx.z"))
s[latest_blocked].bind(by, te.thread_axis("blockIdx.y"))
s[latest_blocked].bind(bx, te.thread_axis("blockIdx.x"))
s[latest_blocked].bind(vf, te.thread_axis("vthread"))
s[latest_blocked].bind(vy, te.thread_axis("vthread"))
s[latest_blocked].bind(vx, te.thread_axis("vthread"))
s[latest_blocked].bind(tf, te.thread_axis("threadIdx.z"))
s[latest_blocked].bind(ty, te.thread_axis("threadIdx.y"))
s[latest_blocked].bind(tx, te.thread_axis("threadIdx.x"))
s[latest_blocked].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fb)
s[latest_blocked].vectorize(fb)
s[conv].compute_at(s[latest_blocked], tx)
# tile reduction axes
n, y, x, fc, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, conv, ry)
rxo, rxi = cfg["tile_rx"].apply(s, conv, rx)
s[conv].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[conv].vectorize(fb)
# unroll
s[latest_blocked].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[latest_blocked].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val)
if latest_blocked != latest:
s[latest].compute_root()
bind_data_copy(s[latest], 1)
if latest != output:
s[output].compute_inline()
N, OH, OW, OCC, OCB = get_const_tuple(latest_blocked.shape)
KH, KW, _, _, _ = get_const_tuple(kernel.shape)
KHKW = KH * KW
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/injective.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable,
"""Schedule for composition of injective operator"""
import tvm
from tvm import te
from .utils import bind_data_copy
from .. import utils
def schedule_injective_from_existing(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
bind_data_copy(sch[out])
return sch
def schedule_injective(outs):
"""Schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
for out in outs:
if not utils.is_empty_shape(out.shape):
schedule_injective_from_existing(s, out)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""pooling schedules for Qualcomm Adreno GPU"""
import tvm
from tvm import te
from .. import tag
def schedule_pool(outs, layout):
"""Schedule for various pooling operators.
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
s: Schedule
The computation schedule for pool.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, tvm.te.ComputeOp):
s[PaddedInput].compute_inline()
num_thread = tvm.target.Target.current(allow_none=False).max_num_threads
num_thread = int(num_thread * 2)
if Pool.op in s.outputs:
Out = Pool
OL = s.cache_write(Pool, "local")
else:
Out = outs[0].op.output(0)
s[Pool].set_scope("local")
fused = s[Out].fuse(*s[Out].op.axis[:-1])
bx, tx = s[Out].split(fused, factor=num_thread)
s[Out].bind(bx, te.thread_axis("blockIdx.x"))
s[Out].bind(tx, te.thread_axis("threadIdx.x"))
s[Out].vectorize(s[Out].op.axis[-1])
if Pool.op in s.outputs:
s[OL].compute_at(s[Out], tx)
s[OL].vectorize(s[OL].op.axis[-1])
else:
s[Pool].compute_at(s[Out], tx)
s[Pool].vectorize(s[Pool].op.axis[-1])
scheduled_ops = []
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith("pool"):
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,too-many-locals,len-as-condition
"""Schedule for reduce operators"""
import numpy
from tvm import te
from ..utils import get_const_tuple
from .injective import schedule_injective_from_existing
from .utils import get_div
from ..cuda.reduction import schedule_reduce_impl
def _schedule_reduce_adreno(op, sch, is_idx_reduce=False):
if is_idx_reduce:
real_output = op.output(0)
temp_idx_input = op.input_tensors[0].op.output(0)
temp_val_input = op.input_tensors[0].op.output(1)
else:
real_output = op.output(0)
shape = get_const_tuple(real_output.shape)
latest4 = shape[-1] == 4
div4 = numpy.prod(shape) % 4 == 0
# Fuse and split the axis
if latest4:
fused_outer = sch[real_output].fuse(
*[sch[real_output].op.axis[i] for i in range(len(sch[real_output].op.axis) - 1)]
)
else:
fused_outer = sch[real_output].fuse(
*[sch[real_output].op.axis[i] for i in range(len(sch[real_output].op.axis))]
)
ftc = numpy.prod(shape)
a = fused_outer
if latest4:
sch[real_output].vectorize(sch[real_output].op.axis[-1])
elif div4 and not is_idx_reduce:
a, b = sch[real_output].split(fused_outer, factor=4)
sch[real_output].vectorize(b)
ftc = ftc / 4
num_thread = get_div(ftc, 128)
bx, outer_in = sch[real_output].split(a, factor=num_thread)
sch[real_output].bind(bx, te.thread_axis("blockIdx.x"))
sch[real_output].bind(outer_in, te.thread_axis("threadIdx.y"))
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[real_output], outer_in)
sch[temp_val_input].compute_at(sch[real_output], outer_in)
def schedule_reduce(outs):
return schedule_reduce_impl(outs, _schedule_reduce_adreno, schedule_injective_from_existing)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/adreno/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""util functions to be reused in different compute/schedule on Qualcomm Adreno GPU"""
import tvm
import numpy
from tvm import te
from tvm._ffi.registry import register_func
from tvm.topi.utils import simplify
from tvm.topi import nn
from tvm.autotvm.task.space import SplitEntity
from ..utils import get_const_tuple
def get_div(value, start):
"""Returns the maximum divider for `value` starting from `start` value"""
div = 1
for d in range(start, 0, -1):
if (value % d) == 0:
div = d
break
return div
def split_to_chunks(extent, block):
"""
Splits the trip count value to chunks and block, returns the remainder as well
the chunks and blocks covers or overlaps the origin value
If extent can be divisible by block:
extent = chunks * block
else
extent = (chunks - 1) * block + tail
Parameters
----------
extent: int
tripcount for original compute
block: int
size of the block
Returns
----------
out: tuple of the (chunks, block, tail)
chunks = ceildiv(extent, block)
tail = number of origin elements in the latest chunk
"""
tail = extent % block
chunks = extent // block
if tail == 0:
tail = block
else:
chunks += 1
return chunks, block, tail
def pack_input(Input, layout, batch, chunks, block, original_tail, in_height, in_width):
"""
Adds compute stages for packing of the data in runtime. Extends channel dimensions
to be dividable by factor 4
This function should be substituted by Schedule.transform_layout() in the future: see
https://github.com/apache/tvm-rfcs/blob/main/rfcs/0039-buffer-physical-layout.md
Parameters
----------
Input: tvm.te.Tensor
Input tensor to be repacked in runtime
layout: string
Layout of origin 4d tensor
NCHW or NHWC are acceptable
batch: int
Batch size
chunks: int
Number of channel chunks been in the final tensor
block: int
size of the channel block
original_tail: int
Tail in the latest chunk diffing original number of channels vs blocked one
If original_tail != block:
original_channels = chunks * block - original_tail
else
original_channels = chunks * block
in_height: int
Height of the feature map
in_width: int
Width of the feature map
"""
pad_value = tvm.tir.const(0, Input.dtype)
def _reorder_data_nchw(*indices):
condition = []
condition.append(indices[1] == chunks - 1)
condition.append(indices[4] >= original_tail)
condition = tvm.tir.all(*condition)
return tvm.tir.if_then_else(
condition,
pad_value,
Input[indices[0], indices[1] * block + indices[4], indices[2], indices[3]],
)
def _reorder_data_nhwc(*indices):
condition = []
condition.append(indices[3] == chunks - 1)
condition.append(indices[4] >= original_tail)
condition = tvm.tir.all(*condition)
return tvm.tir.if_then_else(
condition,
pad_value,
Input[indices[0], indices[1], indices[2], indices[3] * block + indices[4]],
)
# compute:
if layout == "NCHW":
reordered_data = te.compute(
[batch, chunks, in_height, in_width, block],
_reorder_data_nchw,
name="input_pack",
tag="input_pack",
)
elif layout == "NHWC":
reordered_data = te.compute(
[batch, in_height, in_width, chunks, block],
_reorder_data_nhwc,
name="input_pack",
tag="input_pack",
)
else:
assert False, "Adreno util function pack_input does not accept unknown layout"
return reordered_data
def pack_filter(
Filter,
layout,
out_chunks,
out_block,
out_original_tail,
in_filter_channels,
in_chunks,
in_block,
in_original_tail,
kernel_h,
kernel_w,
):
"""
Adds compute stages for packing of the filter in runtime. Extends channels dimensions
to be dividable by factor 4
This function should be substituted by Schedule.transform_layout() in the future: see
https://github.com/apache/tvm-rfcs/blob/main/rfcs/0039-buffer-physical-layout.md
Parameters
----------
Filter: tvm.te.Tensor
Filter tensor to be repacked in runtime
layout: string
Layout of origin 4d tensor
NCHW or NHWC are acceptable
out_chunks: int
Number of chunks for filters
out_block: int
Size of the block for output channels
out_original_tail: int
Original size of the latest chunk of output filters
in_filter_channels: int
Number of filter channels. might be different vs input channels in the
data due to groups/depthwise nature
in_chunks: int
Number of input data channel chunks
in_block: int
Size of the block for input data channels
in_original_tail
Original size of the latest chunk for input data channels
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
"""
pad_value = tvm.tir.const(0, Filter.dtype)
def _reorder_weights_depthwise_oihw(*indices):
conditionA = []
conditionA.append(indices[0] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
return tvm.tir.if_then_else(
conditionAT,
pad_value,
Filter[indices[0] * out_block + indices[4], indices[1], indices[2], indices[3]],
)
def _reorder_weights_depthwise_hwoi(*indices):
conditionA = []
conditionA.append(indices[2] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
return tvm.tir.if_then_else(
conditionAT,
pad_value,
Filter[indices[0], indices[1], indices[2] * out_block + indices[4], indices[3]],
)
def _reorder_weights_oihw(*indices):
conditionA = []
conditionA.append(indices[0] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
conditionO = []
conditionO.append(conditionAT)
conditionO.append(indices[1] >= in_chunks * in_block + in_original_tail)
conditionOT = tvm.tir.any(*conditionO)
return tvm.tir.if_then_else(
conditionOT,
pad_value,
Filter[indices[0] * out_block + indices[4], indices[1], indices[2], indices[3]],
)
def _reorder_weights_hwio(*indices):
conditionA = []
conditionA.append(indices[3] == out_chunks - 1)
conditionA.append(indices[4] >= out_original_tail)
conditionAT = tvm.tir.all(*conditionA)
conditionO = []
conditionO.append(conditionAT)
conditionO.append(indices[2] >= in_chunks * in_block + in_original_tail)
conditionOT = tvm.tir.any(*conditionO)
return tvm.tir.if_then_else(
conditionOT,
pad_value,
Filter[indices[0], indices[1], indices[2], indices[3] * out_block + indices[4]],
)
if in_filter_channels == 1:
if layout == "OIHW":
reordered_filter = te.compute(
[out_chunks, in_filter_channels, kernel_h, kernel_w, out_block],
_reorder_weights_depthwise_oihw,
name="filter_pack",
tag="filter_pack",
)
elif layout == "HWOI":
reordered_filter = te.compute(
[kernel_h, kernel_w, out_chunks, in_filter_channels, out_block],
_reorder_weights_depthwise_hwoi,
name="filter_pack",
tag="filter_pack",
)
else:
assert False, "Adreno util function def pack_filter does not accept unknown layout"
else:
if layout == "OIHW":
reordered_filter = te.compute(
[out_chunks, in_filter_channels, kernel_h, kernel_w, out_block],
_reorder_weights_oihw,
name="filter_pack",
tag="filter_pack",
)
elif layout == "HWIO":
reordered_filter = te.compute(
[kernel_h, kernel_w, in_filter_channels, out_chunks, out_block],
_reorder_weights_hwio,
name="filter_pack",
tag="filter_pack",
)
else:
assert False, "Adreno util function def pack_filter does not accept unknown layout"
return reordered_filter
def expand_spatial_dimensions(
in_height, in_width, kernel_h, kernel_w, dilation_h, dilation_w, padding, stride_h, stride_w
):
"""
Expands spatial dimensions to be dividable by factor 4. This will allow us to do extrimely
better parallel computation on GPU. The drawback of this solution - it will be number of
useless computations. By fact the speed-up of parallelism significantly overcomes the slowdown
of extra compute and eventuially this is useful approach, at least for GPU
Parameters
----------
in_height: int
Height of the feature map
in_width: int
Width of the feature map
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
dilation_h: int
Vertical dilation of the conv2d kernel
dilation_w: int
Horizontal dilation of the conv2d kernel
padding: tuple or list
Conv2d paddings
stride_h: int
Vertical stride of the conv2d kernel
stride_w: int
Horizontal stride of the conv2d kernel
"""
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height_orig = out_height = simplify(
(in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
)
out_width_orig = out_width = simplify(
(in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
)
# can output shape be divded by 2 or even 4?
# if it cannot be divided, need to extend for further help with split
# theortically there should be addition padding for inputs, but it will be optimized by
# cache_read InferBound. We must proceed pad here exactly to produce tensor which is
# required for calculation of original out size, not more! In other case intermediate
# tensor might be allcoated with less sizes while compute will try to fill the expanded
# one - data discrepancy as a result
# And in case of textures it is not a problem if we provide texture of less size because
# 1. It is not important which values would be for extra calc - these calculations are
# required only for better utilizatin of GPU fit to working groups
# 2. When we request pixel out opf bound, texture will handle this correctly. As mentioned
# above, the value itself is not important
if out_height % 2 != 0:
out_height += 1
if out_width % 2 != 0:
out_width += 1
if out_height % 4 != 0:
out_height += 2
if out_width % 4 != 0:
out_width += 2
return out_height_orig, out_height, out_width_orig, out_width
def add_pad(
data,
layout,
out_height,
out_width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
padding,
stride_h,
stride_w,
):
"""Computes required padding values by the parameters of conv2d and adds
compute for extending of original tensor
Parameters
----------
data: tvm.te.Tensor
5d tensor, the layout of spatial dimensions are defined as separate argument
layout: string
Layout of origin 4d tensor
out_height: int
Height of the output feature map
out_width: int
Width of the output feature map
kernel_h: int
Height of the conv2d kernel
kernel_w: int
Width of the conv2d kernel
dilation_h: int
Height dilation value from conv2d attributes
dilation_w: int
Width dilation value from conv2d attributes
padding: list / tuple of n ints
Padding values from conv2d attributes
stride_h: int
Height stride value from conv2d attributes
stride_w: int
Width stride value from conv2d attributes
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as Input.
"""
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
# compute graph
if layout == "NCHW":
y_axis = 2
x_axis = 3
if len(data.shape) == 4:
_, _, in_height, in_width = data.shape
else:
_, _, in_height, in_width, _ = data.shape
elif layout == "NHWC":
y_axis = 1
x_axis = 2
if len(data.shape) == 4:
_, in_height, in_width, _ = data.shape
else:
_, in_height, in_width, _, _ = data.shape
else:
assert False, "not supported layout in adreno util add_pad"
pad_before = [0, 0, 0, 0, 0]
pad_after = [0, 0, 0, 0, 0]
pad_before[y_axis] = pad_top
pad_before[x_axis] = pad_left
pad_after[y_axis] = pad_down
pad_after[x_axis] = pad_right
# calculation of real used input size:
input_latest_w = (out_width - 1) * stride_w + (kernel_w - 1) * dilation_w + 1
input_latest_h = (out_height - 1) * stride_h + (kernel_h - 1) * dilation_h + 1
if input_latest_w < in_width + pad_before[x_axis] + pad_after[x_axis]:
pad_after[x_axis] -= in_width + pad_before[x_axis] + pad_after[x_axis] - input_latest_w
if input_latest_h < in_height + pad_before[y_axis] + pad_after[y_axis]:
pad_after[y_axis] -= in_height + pad_before[y_axis] + pad_after[y_axis] - input_latest_h
if (
pad_before[0] == 0
and pad_before[1] == 0
and pad_before[2] == 0
and pad_before[3] == 0
and pad_after[0] == 0
and pad_after[1] == 0
and pad_after[2] == 0
and pad_after[3] == 0
):
return data
else:
return nn.pad(data, pad_before, pad_after, name="pad_temp")
def bind_data_copy(stage, axis_to_vectorize=None):
"""
Schedules the eltwise stages like copying of data or postops
Parameters
----------
stage: tvm.te.Tensor
axis_to_vectorize:
Causes to split certain axis, moves inner part to the end of schedule
and enable vectorization by this axis
If parameter is not pointed, the schedule will be vectorized if the most inner
dim is eq to 4 (size of the vector in texture)
"""
shape = get_const_tuple(stage.op.output(0).shape)
if axis_to_vectorize and len(shape) == 4 and shape[axis_to_vectorize] % 4 == 0:
ax0, ax1, ax2, ax3 = stage.op.axis
if axis_to_vectorize == 1:
oax1, iax1 = stage.split(ax1, factor=4)
stage.reorder(ax0, oax1, ax2, ax3, iax1)
stage.vectorize(iax1)
fused = stage.fuse(ax0, oax1, ax2, ax3)
elif axis_to_vectorize == 3:
oax3, iax3 = stage.split(ax3, factor=4)
stage.reorder(ax0, ax1, ax2, oax3, iax3)
stage.vectorize(iax3)
fused = stage.fuse(ax0, ax1, ax2, oax3)
ftc = numpy.prod(shape) / 4
div = get_div(ftc, 128)
block, thread = stage.split(fused, factor=div)
stage.bind(block, te.thread_axis("blockIdx.z"))
stage.bind(thread, te.thread_axis("threadIdx.z"))
else:
if shape[-1] == 4:
axes = stage.op.axis
fused = stage.fuse(*axes[:-1])
ftc = numpy.prod(shape[:-1])
div = get_div(ftc, 64)
block, thread = stage.split(fused, factor=div)
stage.bind(block, te.thread_axis("blockIdx.x"))
stage.bind(thread, te.thread_axis("threadIdx.x"))
stage.vectorize(axes[-1])
else:
ftc = numpy.prod(shape)
vthread = get_div(ftc, 8)
fused = stage.fuse(*stage.op.axis)
ftc = ftc / vthread
# 1024 is a maximum work group size on the most Adreno GPU
num_thread = get_div(ftc, 1024 // vthread)
a, b = stage.split(fused, factor=num_thread)
a, c = stage.split(a, factor=vthread)
stage.bind(c, te.thread_axis("vthread"))
stage.bind(a, te.thread_axis("blockIdx.x"))
stage.bind(b, te.thread_axis("threadIdx.x"))
def get_texture_storage(shape):
"""
Returns the texture layout acceptable for the shape
Parameters
----------
shape: array
Shape of the tensor to be packed to texture
"""
# certain limitation of the Qualcomm devices. Subject to be determined for certain device
# individually, but until we have access to remote device during compilation, we have to
# define it uniformly for all target devices
# limit = 16384
limit = tvm.target.Target.current().attrs["texture_spatial_limit"]
if shape[0] * shape[1] * shape[2] < limit and shape[3] < limit:
return "global.texture"
elif shape[0] * shape[1] < limit and shape[2] * shape[3] < limit:
return "global.texture-nhwc"
else:
return "global.texture-weight"
@register_func("tvm.info.mem.global.texture")
@register_func("tvm.info.mem.global.texture-nhwc")
@register_func("tvm.info.mem.global.texture-weight")
def mem_info_global_texture_variants():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=16,
max_num_bits=16384 * 16384 * 4 * 32,
max_simd_bits=4 * 32,
head_address=None,
)
def infer_tile_size(data, layout):
"""Compute the tile size for Winograd algorithm
Parameters
----------
data: tvm.te.Tensor
Data tensor
layout: string
Layout of data tebsir
NCHW, NCHW4c, NHWC or NHWC4c are acceptable
Returns
-------
tile_size : int
Calculated tile size
"""
assert layout in ("NCHW", "NCHW4c", "NHWC", "NHWC4c"), "Incompatible layout"
if layout in ("NCHW", "NCHW4c"):
H = get_const_tuple(data.shape)[2]
else:
H = get_const_tuple(data.shape)[1]
if H % 8 == 0:
return 4
return 2
def get_default_conv2d_config(cfg, fc, y, x):
"""Defines conv2d default parameters for split axis for Adreno conv2d and depthwise conv2d"""
# look for vthread params:
vy = 1
for n in range(5, 0, -1):
if y % n == 0:
vy = n
break
vx = 1
for n in range(5, 0, -1):
if x % n == 0 and vy * n < 9:
vx = n
break
y = y // vy
x = x // vx
tfc = 1
for n in range(64, 0, -1):
if fc % n == 0:
tfc = n
break
ty = 1
for n in range(16, 0, -1):
if y % n == 0 and tfc * n <= 512:
ty = n
break
tx = 1
for n in range(16, 0, -1):
if x % n == 0 and tfc * ty * n <= 512:
tx = n
break
fc = fc // tfc
y = y // ty
x = x // tx
cfg["tile_fc"] = SplitEntity([fc, 1, tfc])
cfg["tile_y"] = SplitEntity([y, vy, ty])
cfg["tile_x"] = SplitEntity([x, vx, tx])
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Argwhere operator"""
import tvm
from tvm.te import hybrid
@hybrid.script
def hybrid_argwhere_1d(output_shape, condition):
"""Find the indices of elements of a 1-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
1-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
valid_index = 0
for i1 in range(a1):
if condition[i1] != 0:
a[valid_index, 0] = i1
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_2d(output_shape, condition):
"""Find the indices of elements of a 2-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
2-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
if condition[i1, i2] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_3d(output_shape, condition):
"""Find the indices of elements of a 3-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
3-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
if condition[i1, i2, i3] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_4d(output_shape, condition):
"""Find the indices of elements of a 4-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
4-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
if condition[i1, i2, i3, i4] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
valid_index += 1
return a
@hybrid.script
def hybrid_argwhere_5d(output_shape, condition):
"""Find the indices of elements of a 5-D tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
5-D tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
a = output_tensor(output_shape, "int32")
a1 = condition.shape[0]
a2 = condition.shape[1]
a3 = condition.shape[2]
a4 = condition.shape[3]
a5 = condition.shape[4]
valid_index = 0
for i1 in range(a1):
for i2 in range(a2):
for i3 in range(a3):
for i4 in range(a4):
for i5 in range(a5):
if condition[i1, i2, i3, i4, i5] != 0:
a[valid_index, 0] = i1
a[valid_index, 1] = i2
a[valid_index, 2] = i3
a[valid_index, 3] = i4
a[valid_index, 4] = i5
valid_index += 1
return a
@tvm.target.generic_func
def argwhere(output_shape, condition):
"""Find the indices of elements of a tensor that are non-zero.
Parameters
----------
condition : tvm.te.Tensor
Tensor with boolean values.
Returns
-------
out : tvm.te.Tensor
Indices of non-zero elements.
"""
if len(condition.shape) == 1:
return hybrid_argwhere_1d(output_shape.shape, condition)
if len(condition.shape) == 2:
return hybrid_argwhere_2d(output_shape.shape, condition)
if len(condition.shape) == 3:
return hybrid_argwhere_3d(output_shape.shape, condition)
if len(condition.shape) == 4:
return hybrid_argwhere_4d(output_shape.shape, condition)
if len(condition.shape) == 5:
return hybrid_argwhere_5d(output_shape.shape, condition)
raise ValueError("Does not support rank higher than 5 in argwhere")
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Schedule for ARM CPU"""
from .conv1d import *
from .conv2d import *
from .depthwise_conv2d import *
from .conv2d_transpose import *
from .conv2d_int8 import *
from . import conv2d_alter_op
from .bitserial_conv2d import *
from .bitserial_dense import *
from .injective import *
from .group_conv2d import *
from .pooling import *
from .dense import *
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/arm_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Arm target utility functions"""
from tvm.target import Target
def get_tiling_B_interleaved_t(interleave_A):
"""Compute the tiling information for matrix B', where B'
is the transposed and interleaved version of matrix B in C=A*B.
The tiling information is chosen to maximize register usage during the
tile computation.
Please refer to:
- https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long
- https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product
- https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction
- Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h
In order to have more information
Parameters
----------
interleave_A: bool
determines if A is expected to be interleaved
Returns
----------
tile_rows_B: the output tile rows of B'
tile_cols_B: the output tile columns of B'
"""
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# If smmla/ummla is available, A must be interleaved.
# Each load from B' will contain 8 elements
# and we are loading 12 rows of B' (i.e., 12 columns of B)
tile_rows_B = 12
tile_cols_B = 8
elif target.features.has_dotprod:
# The number of tile rows of B' vary depending on the
# strategy:
# * If we are interleaving A, then we select 12 columns from B'(i.e.,
# 12 rows from B).
# * If we are not interleaving A, then we select 16 columns from B'(i.e.,
# 16 rows from B).
tile_rows_B = 12 if interleave_A else 16
# Dot product instruction groups 2 (u)int16x8 vectors in
# groups of 4 and compute the dot product among those groups
# This means that the number of columns in a tile of B' (i.e., the
# rows of the original matrix B) need to be 4.
tile_cols_B = 4
else:
# If no acceleration is available, A must be interleaved. In this case
# we load 4 rows of B' (i.e., 4 columns of B). Each of them will contain 16 elements
tile_rows_B = 4
tile_cols_B = 16
return tile_rows_B, tile_cols_B
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,invalid-name,unused-argument
"""Bitserial conv2d schedule on arm cpu"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm import relay
from ..nn.pad import pad
from ..nn.bitserial_conv2d import bitserial_conv2d_legalize
from ..nn.bitserial_util import bitpack, binary_op_multiplier
from ..nn.utils import get_pad_tuple
from ..utils import get_const_int, get_const_tuple, traverse_inline
def _kernel_vec_spatial_pack_nhwc(kernel, kernel_bits, VC, use_bitpack=True):
if use_bitpack:
kernel_q = bitpack(kernel, kernel_bits, pack_axis=2, bit_axis=2, pack_type="uint8")
else:
kernel_q = kernel
KH, KW, KB, CI, CO = kernel_q.shape
kvshape = (CO // VC, KH, KW, KB, VC, CI)
return te.compute(
kvshape,
lambda co, dh, dw, b, vc, ci: kernel_q[dh][dw][b][ci][co * VC + vc],
name="kernel_vec",
)
@autotvm.register_topi_compute("bitserial_conv2d_nhwc.arm_cpu")
def bitserial_conv2d_nhwc(
cfg,
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
):
"""Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
assert pack_dtype == "uint8", "only support packing into uint8 bits"
assert out_dtype == "int16", "only support output type of int16"
N, H, W, CI = get_const_tuple(data.shape)
if len(kernel.shape) == 4:
KH, KW, _, CO = get_const_tuple(kernel.shape)
CI_packed = CI // 8
else:
KH, KW, KB, CI_packed, CO = get_const_tuple(kernel.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH - 1, KW - 1
PAD_H = H + (TPAD + DPAD)
PAD_W = W + (LPAD + RPAD)
OH = (PAD_H - KH) // HSTR + 1
OW = (PAD_W - KW) // WSTR + 1
oshape = (1, OH, OW, CO)
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
# Pad input channels of weights and data when it is not a multiple of 8
if CI_packed % 8 != 0:
CI_PAD = CI_packed % 8
CI_packed += CI_PAD
else:
CI_PAD = 0
# ==================== define configuration space ====================
n, oh, ow, co = cfg.axis(N), cfg.axis(OH), cfg.axis(OW), cfg.axis(CO)
ci, kh, kw = cfg.reduce_axis(CI_packed), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
ib, kb = cfg.reduce_axis(activation_bits), cfg.reduce_axis(weight_bits)
co, vc = cfg.define_split("tile_co", co, num_outputs=2, filter=lambda x: x.size[-1] == 8)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2, filter=lambda x: x.size[-1] >= 2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda x: x.size[-1] >= 2)
ci_o, ci_i = cfg.define_split(
"tile_ci", ci, num_outputs=2, filter=lambda x: x.size[-1] == 8 or x.size[-1] == 16
)
re_axes = cfg.define_reorder(
"reorder_0",
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
policy="candidate",
candidate=[
[n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i],
[n, oh, ow, co, vh, vw, kw, kh, ci_o, kb, ib, vc, ci_i],
],
)
# binary ops
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW * binary_op_multiplier(pack_dtype))
# ====================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
data_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=3, pack_type="uint8")
kernel_vec = _kernel_vec_spatial_pack_nhwc(kernel, weight_bits, VC, len(kernel.shape) == 4)
idxm = tvm.tir.indexmod
if idxm(kernel_vec.shape[-1], 8) != 0 and CI_PAD != 0:
kernel_vec = pad(kernel_vec, [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, CI_PAD])
N, H, W, IB, CI = data_q.shape
OCO, KH, KW, KB, VC, CI = kernel_vec.shape
dvshape = (
N,
PAD_H // (VH * HSTR),
PAD_W // (VW * WSTR),
VH * HSTR + HCAT,
VW * WSTR + WCAT,
IB,
CI,
)
ovshape = (1, OH // VH, OW // VW, CO // VC, VH, VW, VC)
if TPAD != 0 and RPAD != 0:
data_pad = pad(data_q, (0, TPAD, LPAD, 0, 0), (0, DPAD, RPAD, 0, CI_PAD), name="data_pad")
elif CI_PAD != 0:
data_pad = pad(data_q, (0, 0, 0, 0, 0), (0, 0, 0, 0, CI_PAD), name="data_pad")
else:
data_pad = data_q
data_vec = te.compute(
dvshape,
lambda n, h, w, vh, vw, b, ci: data_pad[n][h * VH * HSTR + vh][w * VW * WSTR + vw][b][ci],
name="data_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
ib = te.reduce_axis((0, IB), name="ib")
kb = te.reduce_axis((0, KB), name="kb")
def _bipolar_conv(n, h, w, co, vh, vw, vc):
return te.sum(
(
tvm.tir.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype("uint16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci].astype("uint16")
)
<< (kb + ib).astype("uint16")
),
axis=[dh, dw, kb, ib, ci],
)
def _unipolar_conv(n, h, w, co, vh, vw, vc):
return te.sum(
(
(
tvm.tir.popcount(
kernel_vec[co, dh, dw, kb, vc, ci].astype("int16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci].astype("int16")
)
- tvm.tir.popcount(
~kernel_vec[co, dh, dw, kb, vc, ci].astype("int16")
& data_vec[n, h, w, vh * HSTR + dh, vw * WSTR + dw, ib, ci]
).astype("int16")
)
<< (kb + ib).astype("int16")
),
axis=[dh, dw, kb, ib, ci],
)
if unipolar:
conv_vec = te.compute(ovshape, _unipolar_conv, name="conv_vec", tag="unipolar")
else:
conv_vec = te.compute(ovshape, _bipolar_conv, name="conv_vec", tag="bipolar")
conv = te.compute(
oshape,
lambda n, h, w, co: conv_vec[
n, idxd(h, VH), idxd(w, VW), idxd(co, VC), idxm(h, VH), idxm(w, VW), idxm(co, VC)
].astype(out_dtype),
name="conv",
tag="spatial_bitserial_conv_nhwc",
)
return conv
def _intrin_popcount(m, k_i, w_b, x_b, unipolar):
pack_dtype = "uint8"
w = te.placeholder((w_b, m, k_i), dtype=pack_dtype, name="w")
x = te.placeholder(
(
x_b,
k_i,
),
dtype=pack_dtype,
name="x",
)
k = te.reduce_axis((0, k_i), name="k")
bw = te.reduce_axis((0, w_b), name="bw")
bx = te.reduce_axis((0, x_b), name="bx")
if unipolar:
dtype = "int16"
z = te.compute(
(m,),
lambda i: te.sum(
(
tvm.tir.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
- tvm.tir.popcount(~w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
)
<< (bw + bx).astype(dtype),
axis=[bw, bx, k],
),
name="z",
)
else:
dtype = "uint16"
z = te.compute(
(m,),
lambda i: te.sum(
tvm.tir.popcount(w[bw, i, k].astype(dtype) & x[bx, k].astype(dtype))
<< (bw + bx).astype(dtype),
axis=[bw, bx, k],
),
name="z",
)
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=k_i, strides=[te.var("ldw"), te.var("ldw"), 1]
) # stride can be inferred
Xb = tvm.tir.decl_buffer(
x.shape, x.dtype, name="X", offset_factor=k_i, strides=[te.var("ldw"), 1]
)
Zb = tvm.tir.decl_buffer(z.shape, z.dtype, name="Z", offset_factor=1, strides=[1])
def _intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
args_2 = tvm.tir.const(2, "uint32")
if unipolar:
vpadd = "llvm.arm.neon.vpadd.v8i8"
vpadalu = "llvm.arm.neon.vpadals.v16i8.v8i16"
full_dtype = "int8x16"
half_dtype = "int8x8"
return_dtype = "int16x8"
else:
vpadd = "llvm.arm.neon.vpadd.v8u8"
vpadalu = "llvm.arm.neon.vpadalu.v16u8.v8u16"
full_dtype = "uint8x16"
half_dtype = "uint8x8"
return_dtype = "uint16x8"
def _instr(index):
irb = tvm.tir.ir_builder.create()
if index == 1: # reduce reset
irb.emit(zz.vstore(0, tvm.tir.const(0, return_dtype)))
return irb.get()
# body and reduce update
cnts8 = [None] * 8
cnts4 = [None] * 4
cnts2 = [None] * 2
for bw in range(w_b):
for bx in range(x_b):
if k_i == 16:
for i in range(m):
w_ = ww.vload([bw, i, 0], "uint8x16").astype(full_dtype)
x_ = xx.vload([bx, 0], "uint8x16").astype(full_dtype)
if unipolar:
cnts = tvm.tir.popcount(w_ & x_) - tvm.tir.popcount(~w_ & x_)
else:
cnts = tvm.tir.popcount(w_ & x_)
upper_half = tvm.tir.call_intrin(half_dtype, "tir.vectorhigh", cnts)
lower_half = tvm.tir.call_intrin(half_dtype, "tir.vectorlow", cnts)
cnts8[i] = upper_half + lower_half
for i in range(m // 2):
cnts4[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts8[i * 2], cnts8[i * 2 + 1]
)
for i in range(m // 4):
cnts2[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts4[i * 2], cnts4[i * 2 + 1]
)
cnts = tvm.tir.call_intrin(
full_dtype, "tir.vectorcombine", cnts2[0], cnts2[1]
)
shifted_cnts = cnts << tvm.tir.const(bw + bx, pack_dtype)
out = tvm.tir.call_llvm_pure_intrin(
return_dtype, vpadalu, args_2, zz.vload(0, return_dtype), shifted_cnts
)
else: # ki == 8
for i in range(m):
w_ = ww.vload([bw, i, 0], "uint8x8").astype(half_dtype)
x_ = xx.vload([bx, 0], "uint8x8").astype(half_dtype)
if unipolar:
cnts8[i] = tvm.tir.popcount(w_ & x_) - tvm.tir.popcount(~w_ & x_)
else:
cnts8[i] = tvm.tir.popcount(w_ & x_)
for i in range(m // 2):
cnts4[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts8[i * 2], cnts8[i * 2 + 1]
)
for i in range(m // 4):
cnts2[i] = tvm.tir.call_llvm_pure_intrin(
half_dtype, vpadd, args_2, cnts4[i * 2], cnts4[i * 2 + 1]
)
cnts = tvm.tir.call_intrin(
full_dtype, "tir.vectorcombine", cnts2[0], cnts2[1]
)
shifted_cnts = cnts << tvm.tir.const(bw + bx, pack_dtype)
out = tvm.tir.call_llvm_pure_intrin(
return_dtype, vpadalu, args_2, zz.vload(0, return_dtype), shifted_cnts
)
irb.emit(zz.vstore(0, out))
return irb.get()
# body, reset, update
return _instr(0), _instr(1), _instr(2)
buffer_params = {"offset_factor": 1}
return te.decl_tensor_intrin(
z.op, _intrin_func, binds={w: Wb, x: Xb, z: Zb}, default_buffer_params=buffer_params
)
# ARM specific schedule that using custom microkernel
def _schedule_spatial_conv2d_nhwc(
cfg, s, data_pad, data_vec, kernel_vec, conv_out, output, last, unipolar
):
_, _, _, _, _, IB, CI = data_vec.shape
_, KH, KW, KB, _, _ = kernel_vec.shape
KB = get_const_int(KB)
IB = get_const_int(IB)
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
##### Schedule data padding and packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
cfg.define_split("tile_ah", cfg.axis(h), num_outputs=2, max_factor=32)
oh, ih = cfg["tile_ah"].apply(s, data_vec, h)
s[data_vec].parallel(oh)
#### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
cfg.define_split("tile_bco", cfg.axis(co), num_outputs=2, max_factor=32)
oco, ico = cfg["tile_bco"].apply(s, kernel_vec, co)
s[kernel_vec].parallel(oco)
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
kh, kw, kb, ib, ci = s[conv_out].op.reduce_axis
ci_o, ci_i = cfg["tile_ci"].apply(s, conv_out, ci)
re_axes = cfg["reorder_0"].apply(
s, conv_out, [n, oh, ow, co, vh, vw, kh, kw, ci_o, kb, ib, vc, ci_i]
)
# Use microkernel
kfactor = cfg["tile_ci"].size[1]
if kfactor % 8 == 0:
pc = _intrin_popcount(VC, kfactor, KB, IB, unipolar)
s[conv_out].tensorize(kb, pc)
n, h, w, co = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], co)
s[last].parallel(oh)
return s
@autotvm.register_topi_schedule("bitserial_conv2d_nhwc.arm_cpu")
def schedule_bitserial_conv2d_nhwc(cfg, outs):
"""Arm cpu schedule for bitserial conv2d"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_bitserial_conv_nhwc" in op.tag:
output = op.output(0)
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[1]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, te.tensor.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data.op.input_tensors[0]
unipolar = "unipolar" in conv_out.op.tag
_schedule_spatial_conv2d_nhwc(
cfg, s, data_pad, data_vec, kernel_vec, conv_out, output, outs[0], unipolar
)
traverse_inline(s, outs[0].op, _callback)
return s
@bitserial_conv2d_legalize.register("arm_cpu")
def _bitserial_conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Bitserial Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Fix different kernel layouts where possible.
if attrs["data_layout"] == "NHWC":
data, kernel = inputs
if len(kernel.data.shape) == 4:
# HWIO layout is expected for NHWC input.
if attrs["kernel_layout"] == "HWOI":
# Handle HWOI layout. This is common in TF depthwise conv2d graph.
kernel = relay.transpose(kernel, axes=(0, 1, 3, 2))
elif attrs["kernel_layout"] == "OIHW":
kernel = relay.transpose(kernel, axes=(2, 3, 1, 0))
## Set new attrs for the tranposed conv.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["kernel_layout"] = "HWIO"
conv = relay.nn.bitserial_conv2d(data, kernel, **new_attrs)
return conv
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, invalid-name, too-many-locals, too-many-arguments
"""Schedule for bitserial dense operator."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.utils import get_const_tuple
from .. import tag
from .bitserial_conv2d import _intrin_popcount
from ..nn.pad import pad
from ..nn.bitserial_util import bitpack, binary_op_multiplier
@autotvm.register_topi_compute("bitserial_dense.arm_cpu")
def bitserial_dense(cfg, data, weight, data_bits, weight_bits, pack_dtype, out_dtype, unipolar):
"""The default implementation of bitserial dense in topi.
Parameters
----------
data : tvm.te.Tensor
2-D with shape [batch, in_dim]
weight : tvm.te.Tensor
2-D with shape [out_dim, in_dim]
Returns
-------
output : tvm.te.Tensor
2-D with shape [batch, out_dim]
"""
data_packed = bitpack(data, data_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
if len(weight.shape) == 2:
weight_packed = bitpack(weight, weight_bits, pack_axis=1, bit_axis=1, pack_type=pack_dtype)
else:
weight_packed = weight
batch, DB, in_dim = get_const_tuple(data_packed.shape)
out_dim, WB, in_dim = get_const_tuple(weight_packed.shape)
# Pad Inputs so that microkernel can be used
# out_dim and in_dim need to be multiples of 8
if out_dim % 8 != 0:
out_dim_pad = out_dim % 8
data_packed = pad(data_packed, [0, 0, 0], [out_dim_pad, 0, 0], name="PaddedInput")
out_dim += out_dim_pad
######## Search space
x, y = cfg.axis(batch), cfg.axis(out_dim)
db, wb, k = cfg.reduce_axis(DB), cfg.reduce_axis(WB), cfg.reduce_axis(in_dim)
ko, ki = cfg.define_split(
"tile_k", k, num_outputs=2, filter=lambda xx: xx.size[-1] == 8 or xx.size[-1] == 16
)
xo, xi = cfg.define_split("tile_x", x, num_outputs=2)
yo, yi = cfg.define_split("tile_y", y, num_outputs=2, filter=lambda xx: xx.size[-1] == 8)
cfg.define_reorder(
"reorder_0",
[yo, xo, ko, xi, wb, db, yi, ki],
policy="candidate",
candidate=[
[yo, xo, ko, xi, wb, db, yi, ki],
[yo, xo, xi, ko, wb, db, yi, ki],
[yo, xo, ko, xi, wb, db, yi, ki],
],
)
###### Compute rule
VY = cfg["tile_y"].size[-1]
VK = cfg["tile_k"].size[-1]
wvshape = (out_dim // VY, in_dim // VK, WB, VY, VK)
oshape = (batch, out_dim)
k = te.reduce_axis((0, in_dim), name="k")
db = te.reduce_axis((0, DB), name="db")
wb = te.reduce_axis((0, WB), name="wb")
# Tile data and weights
weight_vec = te.compute(
wvshape,
lambda yo, ko, wb, vy, vk: weight_packed[yo * VY + vy][wb][ko * VK + vk],
name="weight_vec",
)
matmul_unipolar = te.compute(
oshape,
lambda x, y: te.sum(
(
tvm.tir.popcount(
weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
- tvm.tir.popcount(
~weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
)
<< (wb + db).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense_unipolar",
)
matmul = te.compute(
oshape,
lambda x, y: te.sum(
tvm.tir.popcount(
weight_vec[y // VY, k // VK, wb, y % VY, k % VK].astype(out_dtype)
& data_packed[x, db, k].astype(out_dtype)
)
<< (wb + db).astype(out_dtype),
axis=[wb, db, k],
),
tag="bitserial_dense",
)
cfg.add_flop(batch * out_dim * in_dim * binary_op_multiplier(pack_dtype))
if unipolar:
return matmul_unipolar
return matmul
@autotvm.register_topi_schedule("bitserial_dense.arm_cpu")
def schedule_bitserial_dense(cfg, outs):
"""Schedule for binary_dense.
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial dense operator.
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for bitserial_dense.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data_vec, weight_vec, output, unipolar):
z, k, _, y, x = s[weight_vec].op.axis
s[weight_vec].parallel(z)
s[weight_vec].vectorize(x)
x, y = s[output].op.axis
wb, db, k = s[output].op.reduce_axis
_, DB, _ = get_const_tuple(data_vec.shape)
_, _, WB, _, _ = get_const_tuple(weight_vec.shape)
yo, yi = cfg["tile_y"].apply(s, output, y)
xo, xi = cfg["tile_x"].apply(s, output, x)
ko, ki = cfg["tile_k"].apply(s, output, k)
cfg["reorder_0"].apply(s, output, [yo, xo, ko, xi, wb, db, yi, ki])
fused = s[output].fuse(xo, yo)
s[output].parallel(fused)
nfactor = cfg["tile_y"].size[-1]
kfactor = cfg["tile_k"].size[-1]
if nfactor % 8 == 0:
pc = _intrin_popcount(nfactor, kfactor, WB, DB, unipolar)
s[output].tensorize(wb, pc)
return s
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or "elemwise" in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp):
traverse(tensor.op)
elif op.tag == "bitserial_dense" or "bitserial_dense_unipolar":
output = op.output(0)
weight_vec = op.input_tensors[0]
data_vec = op.input_tensors[1]
data = data_vec.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
data = data.op.input_tensors[0]
unipolar = output.op.tag == "bitserial_dense_unipolar"
_schedule(cfg, s, data_vec, weight_vec, output, unipolar)
else:
raise RuntimeError("Unsupported operator: %s" % op.tag)
traverse(outs[0].op)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Conv1D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
from tvm import autotvm
from .mprofile.dsp.conv1d import (
conv1d_nwc_dsp_compute,
conv1d_nwc_dsp_schedule,
)
@autotvm.register_topi_compute("conv1d_nwc_dsp.arm_cpu")
def conv1d_nwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv1d with v7e-m DSP instructions."""
return conv1d_nwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv1d_nwc_dsp.arm_cpu")
def schedule_conv1d_nwc_dsp(cfg, outs):
return conv1d_nwc_dsp_schedule(cfg, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Conv2D schedule for ARM CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
import tvm.contrib.nnpack
from ..utils import traverse_inline, get_const_tuple
from .. import nn
from ..nn.utils import get_const_int, get_pad_tuple
from ..nn.winograd_util import winograd_transform_matrices
from .conv2d_spatial_pack import (
conv2d_spatial_pack_nchw,
conv2d_spatial_pack_nhwc,
schedule_conv2d_spatial_pack_nchw,
schedule_conv2d_spatial_pack_nhwc,
)
from .mprofile.dsp.conv2d import (
conv2d_nhwc_dsp_compute,
conv2d_nhwc_dsp_schedule,
)
from .mprofile.dsp.tensordot_conv2ds import (
conv2d_nhwc_ohwi_dsp_compute,
tensordot_conv2ds_schedule,
)
@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.arm_cpu")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout"""
return conv2d_spatial_pack_nchw(
cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2
)
@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.arm_cpu")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nchw"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
# schedule conv2d
if "spatial_conv2d_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
s[data_pad].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_spatial_pack.arm_cpu")
def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NHWC layout"""
return conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_spatial_pack.arm_cpu")
def schedule_conv2d_nhwc_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nhwc"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_conv_output_NHWC" in op.tag:
schedule_conv2d_spatial_pack_nhwc(cfg, s, op, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nchw_winograd.arm_cpu")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw layout using Winograd with weight transform"""
tile_size = 4
return _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size)
@autotvm.register_topi_schedule("conv2d_nchw_winograd.arm_cpu")
def schedule_conv2d_nchw_winograd(cfg, outs):
"""Create schedule for conv2d_nchw_winograd"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype, tile_size):
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
if dilation_h != 1 or dilation_w != 1:
kernel = nn.dilate(kernel, (1, 1, dilation_h, dilation_w))
pre_computed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else:
assert (dilation_h, dilation_w) == (1, 1), "Does not support dilation"
pre_computed = True
H_CAT, W_CAT, CO, CI, VC = get_const_tuple(kernel.shape)
CO *= VC
KH, KW = H_CAT - tile_size + 1, W_CAT - tile_size + 1
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert KH == 3 and KW == 3 and HSTR == 1 and WSTR == 1
data_pad = nn.pad(data, (0, 0, pt, pl), (0, 0, pb, pr), name="data_pad")
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
r = KW
m = tile_size
alpha = m + r - 1
A, B, G = winograd_transform_matrices(m, r, out_dtype)
K = CO
C = CI
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
tile_p = P if isinstance(N, int) else nH * nW
cfg.define_split("tile_p", cfg.axis(tile_p), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
cfg.define_split("tile_k", cfg.axis(K), num_outputs=2, filter=lambda x: x.size[-1] <= 16)
VP = cfg["tile_p"].size[-1]
VK = cfg["tile_k"].size[-1]
# pack input tile
input_tile = te.compute(
(C, idxd(P, VP), alpha, alpha, VP),
lambda c, b, eps, nu, bb: data_pad[
idxd(b * VP + bb, nH * nW),
c,
idxm(idxd(b * VP + bb, nW), nH) * m + eps,
idxm(b * VP + bb, nW) * m + nu,
],
name="d",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
VC = cfg["tile_k"].size[-1]
kvshape = (KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), CI, VC)
U = tvm.te.placeholder(kvshape, kernel.dtype, name="U")
else:
# transform kernel
if pre_computed:
U = kernel
else:
r_kh = te.reduce_axis((0, KH), "r_kh")
r_kw = te.reduce_axis((0, KW), "r_kw")
U = te.compute(
(alpha, alpha, idxd(K, VK), C, VK),
lambda eps, nu, k, c, kk: te.sum(
kernel[k * VK + kk][c][r_kh][r_kw].astype(out_dtype)
* G[eps][r_kh]
* G[nu][r_kw],
axis=[r_kh, r_kw],
),
name="U",
)
# transform image
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
V = te.compute(
(alpha, alpha, idxd(P, VP), C, VP),
lambda eps, nu, b, c, bb: te.sum(
input_tile[c][b][r_eps][r_nu][bb].astype(out_dtype) * B[r_eps][eps] * B[r_nu][nu],
axis=[r_eps, r_nu],
),
name="V",
)
# batch gemm
c = te.reduce_axis((0, C), name="c")
M = te.compute(
(alpha, alpha, K, P),
lambda eps, nu, k, b: te.sum(
U[eps][nu][idxd(k, VK)][c][idxm(k, VK)] * V[eps][nu][idxd(b, VP)][c][idxm(b, VP)],
axis=c,
),
name="M",
)
# inverse transform
r_eps = te.reduce_axis((0, alpha), "r_eps")
r_nu = te.reduce_axis((0, alpha), "r_nu")
Y = te.compute(
(K, P, m, m),
lambda k, b, vh, vw: te.sum(
M[r_eps][r_nu][k][b] * A[r_eps][vh] * A[r_nu][vw], axis=[r_eps, r_nu]
),
name="Y",
)
# unpack output
output = te.compute(
(N, K, H, W),
lambda n, k, h, w: Y[k][n * nH * nW + idxd(h, m) * nW + idxd(w, m), idxm(h, m), idxm(w, m)],
name="output",
tag="winograd_conv2d_output",
)
# we have to manually assign effective GFLOP for winograd
if isinstance(N, int):
cfg.add_flop(2 * N * K * H * W * KH * KW * C)
return output
def _schedule_winograd(cfg, s, output, last):
Y = output.op.input_tensors[0]
M, A = Y.op.input_tensors
U, V = M.op.input_tensors
d, B = V.op.input_tensors
data_pad = d.op.input_tensors[0]
# padding
s[data_pad].compute_inline()
# pack input tiles
s[d].compute_inline()
# transform kernel
if isinstance(U.op, tvm.te.ComputeOp):
kernel, G = U.op.input_tensors
s[G].compute_inline()
(
eps,
nu,
k,
c,
kk,
) = s[U].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[U].pragma(eps, "debug_skip_region")
else:
r_kh, r_kw = s[U].op.reduce_axis
s[U].reorder(k, c, eps, nu, r_kh, r_kw, kk)
for axis in [eps, nu, r_kh, r_kw]:
s[U].unroll(axis)
s[U].vectorize(kk)
s[U].parallel(k)
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
# transform image
DD = s.cache_read(d, "global", [V])
s[B].compute_inline()
eps, nu, b, c, bb = s[V].op.axis
r_eps, r_nu = s[V].op.reduce_axis
s[V].reorder(b, c, eps, nu, r_eps, r_nu, bb)
for axis in [eps, nu, r_eps, r_nu]:
s[V].unroll(axis)
s[DD].compute_at(s[V], c)
s[V].vectorize(bb)
s[V].parallel(b)
# batch gemm
eps, nu, k, b = s[M].op.axis
c = s[M].op.reduce_axis[0]
cfg.define_split("tile_c", c, num_outputs=2, filter=lambda x: x.size[-1] <= 16)
co, ci = cfg["tile_c"].apply(s, M, c)
xo, xi = cfg["tile_p"].apply(s, M, b)
s[M].reorder(eps, nu, xo, co, k, ci, xi)
cfg.define_annotate("ann_reduce", [ci], policy="try_unroll")
cfg.define_annotate("ann_spatial", [k, xi], policy="try_unroll_vec")
cfg["ann_reduce"].apply(s, M, [ci], axis_lens=[cfg["tile_c"].size[-1]], max_unroll=16, cfg=cfg)
cfg["ann_spatial"].apply(s, M, [k, xi])
# inverse transform
s[A].compute_inline()
k, b, vh, vw = s[Y].op.axis
r_eps, r_nu = s[Y].op.reduce_axis
for axis in [vh, vw, r_eps, r_nu]:
s[Y].unroll(axis)
# output
n, co, h, w = s[last].op.axis
co, coi = cfg["tile_k"].apply(s, last, co)
p = s[last].fuse(n, co)
s[M].compute_at(s[last], p)
s[last].parallel(p)
MM = s.cache_read(M, "global", [Y])
m = get_const_int(V.shape[0]) + 1 - 3
ho, wo, hi, wi = s[last].tile(h, w, m, m)
s[Y].compute_at(s[last], wo)
s[MM].compute_at(s[last], wo)
if output != last:
s[output].compute_inline()
@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack.arm_cpu")
def conv2d_nchw_winograd_nnpack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw using nnpack Winograd implementation"""
dtype = data.dtype
if dtype == "float32":
return _conv2d_arm_cpu_winograd_nnpack(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8,
)
elif dtype == "float16":
return _conv2d_arm_cpu_winograd_nnpack(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16,
)
else:
raise ValueError("Unsupported data type {} for conv2d winograd nnpack".format(dtype))
@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack.arm_cpu")
def schedule_conv2d_nchw_winograd_nnpack(cfg, outs):
"""Create schedule for conv2d_nchw_winograd_nnpack"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_nnpack_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, out_dtype, convolution_algorithm
):
"""TOPI compute callback. Use winograd NNPACK template"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(kernel.shape) == 4
CO, _, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert (
KH == 3
and KW == 3
and pt == 1
and pb == 1
and pl == 1
and pr == 1
and HSTR == 1
and WSTR == 1
)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
cfg.define_knob("winograd_nnpack_algorithm", [convolution_algorithm])
assert N == 1
with tvm.te.tag_scope("winograd_nnpack_conv2d_weight_transform"):
transformed_kernel = tvm.contrib.nnpack.convolution_inference_weight_transform(
kernel, algorithm=cfg["winograd_nnpack_algorithm"].val
)
if autotvm.GLOBAL_SCOPE.in_tuning:
transformed_kernel = te.compute(transformed_kernel.shape, lambda *args: 0.0)
with tvm.te.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data,
transformed_kernel,
bias=None,
padding=[pt, pb, pl, pr],
stride=[HSTR, WSTR],
algorithm=cfg["winograd_nnpack_algorithm"].val,
)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
def _schedule_winograd_nnpack(cfg, s, output, last):
# Could have bias.
(X, TK) = output.op.input_tensors[:2]
# transform kernel
assert isinstance(TK.op, (te.tensor.ComputeOp, te.tensor.ExternOp, te.tensor.PlaceholderOp))
if autotvm.GLOBAL_SCOPE.in_tuning and isinstance(TK.op, te.tensor.ComputeOp):
# kernel transformation will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[TK].pragma(s[TK].op.axis[0], "debug_skip_region")
@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu")
def conv2d_nchw_winograd_nnpack_without_weight_transform(
cfg, data, transformed_kernel, bias, strides, padding, dilation, out_dtype
):
"""Compute conv2d_nchw using NNPack winograd without weight transform"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
assert (dilation_h, dilation_w) == (1, 1)
assert len(transformed_kernel.shape) == 4
CO, _, _, _ = get_const_tuple(transformed_kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
KH, KW = 3, 3
pt, pl, pb, pr = get_pad_tuple(padding, (KH, KW))
assert (
KH == 3
and KW == 3
and pt == 1
and pb == 1
and pl == 1
and pr == 1
and HSTR == 1
and WSTR == 1
)
H = (IH + pt + pb - 3) // HSTR + 1
W = (IW + pl + pr - 3) // WSTR + 1
assert N == 1
with tvm.te.tag_scope("winograd_nnpack_conv2d_output"):
output = tvm.contrib.nnpack.convolution_inference_without_weight_transform(
data=data,
transformed_kernel=transformed_kernel,
bias=bias,
padding=[pt, pb, pl, pr],
stride=[HSTR, WSTR],
algorithm=cfg["winograd_nnpack_algorithm"].val,
)
# we have to manually assign effective GFLOP for winograd
cfg.add_flop(2 * N * CI * H * W * KH * KW * CO)
return output
@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu")
def schedule_conv2d_nchw_winograd_nnpack_without_weight_transform(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "winograd_nnpack_conv2d_output" in op.tag:
output = op.output(0)
_schedule_winograd_nnpack(cfg, s, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("conv2d_nhwc_dsp.arm_cpu")
def conv2d_nhwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return conv2d_nhwc_dsp_compute(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_dsp.arm_cpu")
def schedule_conv2d_nhwc_dsp(cfg, outs):
"""Create schedule for conv2d_nhwc_dsp"""
return conv2d_nhwc_dsp_schedule(cfg, outs)
@autotvm.register_topi_compute("conv2d_nhwc_ohwi_dsp.arm_cpu")
def conv2d_nhwc_ohwi_dsp(cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype):
"""Compute conv2d_nhwc_ohwi with v7e-m DSP instructions and the tensordot kernel."""
return conv2d_nhwc_ohwi_dsp_compute(
cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype
)
@autotvm.register_topi_schedule("conv2d_nhwc_ohwi_dsp.arm_cpu")
def schedule_conv2d_nhwc_ohwi_dsp(cfg, outs):
"""Create schedule for conv2d_nhwc_ohwi."""
return tensordot_conv2ds_schedule(cfg, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d_alter_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D alter op and legalize functions for arm cpu"""
import logging
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from ..nn import conv2d_alter_layout, conv2d_legalize
from ..utils import get_const_tuple
from ..x86.conv2d import _get_default_config as _get_x86_default_config
from ..x86.conv2d_int8 import _get_default_config_int8
from .conv2d_int8 import is_int8_hw_support
from .arm_utils import get_tiling_B_interleaved_t
from ..generic.conv2d import conv2d_alter_int8_common
from .mprofile.dsp.micro_kernel.common import num_simd_lanes_per_word
logger = logging.getLogger("topi")
def interleave_transpose_weights(inputs, data, kernel, interleave_A):
"""Transform the weight matrix by reshaping, interleaving and transposing it
Parameters
----------
inputs : tvm.relay.Expr
Grouped input symbols
data :
Input shape and dtype
kernel :
Input shape and dtype
interleave_A: indicates if we expect matrix A to be interleaved
Returns
----------
new_kernel : tvm.te.placeholder
A placeholder with the new shape
new_kernel_expr : tvm.relay.Expr
The relay expression of the weights
"""
assert (
data.dtype == "int8"
and kernel.dtype == "int8"
or data.dtype == "uint8"
and kernel.dtype == "uint8"
)
KH, KW, IC, OC = get_const_tuple(kernel.shape)
K = KH * KW * IC
N = OC
# Get tiling information for the interleaved transposed version of B
tile_rows_B, tile_cols_B = get_tiling_B_interleaved_t(interleave_A)
pad_K = 0
pad_N = 0
if N % tile_rows_B != 0:
pad_N = tile_rows_B - (N % tile_rows_B)
if K % tile_cols_B != 0:
pad_K = tile_cols_B - (K % tile_cols_B)
N_padded = N + pad_N
K_padded = K + pad_K
new_kernel_expr = relay.nn.contrib_conv2d_gemm_weight_transform(
inputs[1], tile_rows_B, tile_cols_B
)
new_kernel = te.placeholder(
(N_padded // tile_rows_B, K_padded // tile_cols_B, tile_rows_B, tile_cols_B), kernel.dtype
)
return new_kernel, new_kernel_expr
@conv2d_alter_layout.register(["arm_cpu"])
def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
target = tvm.target.Target.current(allow_none=False)
dispatch_ctx = autotvm.task.DispatchContext.current
_, outs = relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"), attrs, tinfos, out_type, target
)
workload = autotvm.task.get_workload(outs)
if workload is None:
# The best implementation is not an AutoTVM template,
# we then assume it's not necessary to alter this op.
return None
cfg = dispatch_ctx.query(target, workload)
topi_tmpl = workload[0]
new_attrs = {k: attrs[k] for k in attrs.keys()}
strides = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs["data_layout"]
kernel_layout = attrs["kernel_layout"]
data, kernel = tinfos
out_dtype = out_type.dtype
# Extract data types
data_tensor, kernel_tensor = tinfos
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
idxd = tvm.tir.indexdiv
if topi_tmpl == "depthwise_conv2d_nhwc_dsp.arm_cpu":
assert data_layout == "NHWC" and kernel_layout == "HWOI"
# We are not able to check if inputs[1] (the kernel) is a constant in the
# strategy function, so as a stopgap solution we use an assert here.
assert isinstance(
inputs[1], relay.Constant
), "depthwise_conv2d_nhwc_dsp.arm_cpu requires kernel be a relay Constant"
channels = get_const_tuple(data.shape)[3]
KH, KW, _, _ = get_const_tuple(kernel.shape)
simd_lanes = num_simd_lanes_per_word(data.dtype)
HWOI_kernel_np = inputs[1].data.numpy()
CHWc_kernel_np = np.zeros((channels // simd_lanes, KH, KW, simd_lanes), dtype=kernel.dtype)
for i in range(channels // simd_lanes):
CHWc_kernel_np[i] = HWOI_kernel_np[:, :, simd_lanes * i : simd_lanes * (i + 1), 0]
reshaped_new_kernel = CHWc_kernel_np.reshape((KH, KW, channels, 1))
# Store the same config for the altered operator (workload)
new_data = data
new_kernel = te.placeholder((KH, KW, channels, 1), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"depthwise_conv2d_nhwc_dsp.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(
inputs[0],
relay.Constant(tvm.nd.array(reshaped_new_kernel)),
**new_attrs,
)
# Only microTVM does layout alteration for NHWC layout with real data types
if data_layout == "NHWC" and data_dtype not in ["uint8", "int8"]:
return None
if topi_tmpl == "conv2d_nchw_spatial_pack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = "OIHW%do" % VC
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_nhwc_spatial_pack.arm_cpu":
assert (
data.dtype == "int8"
and kernel.dtype == "int8"
or data.dtype == "uint8"
and kernel.dtype == "uint8"
)
assert data_layout == "NHWC" and kernel_layout == "HWIO"
data_expr, kernel_expr = inputs
data_int16 = relay.cast(data_expr, dtype="int16")
kernel_int16 = relay.cast(kernel_expr, dtype="int16")
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_data = te.placeholder(data.shape, "int16")
new_kernel = te.placeholder(kernel.shape, "int16")
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nhwc_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(data_int16, kernel_int16, **new_attrs)
if topi_tmpl == "conv2d_nchw_winograd.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_k"].size[-1]
tile_size = 4
weight_expr = inputs[1]
weight_expr = relay.nn.contrib_conv2d_winograd_weight_transform(
weight_expr, tile_size=tile_size
)
weight_expr = relay.reshape(
weight_expr, newshape=(KH + tile_size - 1, KW + tile_size - 1, CO // VC, VC, CI)
)
weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3])
new_attrs["tile_size"] = tile_size
new_attrs["channels"] = CO
new_data = data
new_kernel = te.placeholder(
(KH + tile_size - 1, KW + tile_size - 1, idxd(CO, VC), CI, VC), kernel.dtype
)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], weight_expr, **new_attrs
)
if topi_tmpl == "conv2d_nchw_winograd_nnpack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, _, KH, KW = get_const_tuple(kernel.shape)
new_attrs["channels"] = CO
# pre-compute winograd_nnpack transform
# for winograd_nnpack_fp16, the precompute prune pass must run on device,
# where float16 is supported
weight_dtype = "float32"
weight_expr = inputs[1]
transformed_weight = relay.nn.contrib_conv2d_winograd_nnpack_weight_transform(
weight_expr,
convolution_algorithm=cfg["winograd_nnpack_algorithm"].val,
out_dtype=weight_dtype,
)
new_data = data
new_kernel = te.placeholder((CO, CI, 8, 8), "float32")
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, None, strides, padding, dilation, out_dtype],
"conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_winograd_without_weight_transform(
inputs[0], transformed_weight, **new_attrs
)
if topi_tmpl == "depthwise_conv2d_nchw_spatial_pack.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
N, CI, H, W = get_const_tuple(data.shape)
CO, M, KH, KW = get_const_tuple(kernel.shape)
VC = cfg["tile_co"].size[-1]
new_attrs["kernel_layout"] = "OIHW%do" % (cfg["tile_co"].size[-1])
# Store the same config for the altered operator (workload)
new_data = data
new_kernel = te.placeholder((idxd(CO, VC), M, KH, KW, VC), dtype=kernel.dtype)
new_workload = autotvm.task.args_to_workload(
[new_data, new_kernel, strides, padding, dilation, out_dtype],
"depthwise_conv2d_nchw_spatial_pack.arm_cpu",
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.conv2d(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc.x86":
# Converting NCHW to NCHWc.
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_x86_default_config(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
# (oc, ic, h, w) -> (OC, IC, h, w, ic, oc)
new_attrs["kernel_layout"] = "OIHW%di%do" % (ic_bn, oc_bn)
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn, oc_bn),
dtype=kernel_tensor.dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "depthwise_conv2d_NCHWc.x86":
# Converting NCHW to NCHWc.
assert data_layout == "NCHW" and kernel_layout == "OIHW"
if cfg.is_fallback:
_get_x86_default_config(
cfg, data_tensor, kernel_tensor, strides, padding, out_dtype, True, data_layout
)
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, channel_multiplier, kh, kw = get_const_tuple(kernel_tensor.shape)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
assert channel_multiplier == 1
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
new_attrs["kernel_layout"] = "OIHW1i%do" % oc_bn
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder((out_channel // oc_bn, 1, kh, kw, 1, oc_bn), dtype=kernel_dtype)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_depthwise_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NCHWc_int8.arm_cpu":
assert data_layout == "NCHW" and kernel_layout == "OIHW"
batch_size, in_channel, height, width = get_const_tuple(data_tensor.shape)
out_channel, _, kh, kw = get_const_tuple(kernel_tensor.shape)
n_elems = 4
if cfg.is_fallback:
_get_default_config_int8(
cfg,
data_tensor,
kernel_tensor,
strides,
padding,
dilation,
out_dtype,
False,
data_layout,
int32_lanes=4,
)
ic_bn, oc_bn = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1]
if cfg.is_fallback:
# ic_bn needs to be divided by n_elems below
ic_bn = max(ic_bn, n_elems)
# update new attrs
new_attrs["channels"] = out_channel
new_attrs["data_layout"] = "NCHW%dc" % ic_bn
new_attrs["kernel_layout"] = "OIHW{:n}i{:n}o{:n}i".format(ic_bn // n_elems, oc_bn, n_elems)
new_attrs["out_layout"] = "NCHW%dc" % oc_bn
# Store altered operator's config.
new_data = te.placeholder(
(batch_size, in_channel // ic_bn, height, width, ic_bn), dtype=data_dtype
)
new_kernel = te.placeholder(
(out_channel // oc_bn, in_channel // ic_bn, kh, kw, ic_bn // n_elems, oc_bn, n_elems),
dtype=kernel_dtype,
)
new_workload = autotvm.task.args_to_workload(
[
new_data,
new_kernel,
strides,
padding,
dilation,
new_attrs["data_layout"],
new_attrs["out_layout"],
out_dtype,
],
topi_tmpl,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_nchwc(*inputs, **new_attrs)
if topi_tmpl == "conv2d_NHWC_quantized_interleaved.arm_cpu":
# TODO(masahi): This schedule can easily result in a tensorization error
# if used in the fallback mode
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
KH, KW, _, OC = get_const_tuple(kernel.shape)
new_workload_name = "conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu"
new_kernel, new_kernel_expr = interleave_transpose_weights(
inputs, data, kernel, interleave_A=True
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype, (KH, KW), OC],
new_workload_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_gemm_without_weight_transform(
inputs[0], new_kernel_expr, **new_attrs
)
if topi_tmpl == "conv2d_NHWC_quantized_native.arm_cpu":
# TODO(masahi): This schedule can easily result in a tensorization error
# if used in the fallback mode
if cfg.is_fallback: # if is fallback, clear query cache and return None
autotvm.task.clear_fallback_cache(target, workload)
return None
assert data_layout == "NHWC" and kernel_layout == "HWIO"
KH, KW, _, OC = get_const_tuple(kernel.shape)
new_workload_name = "conv2d_NHWC_quantized_native_without_transform.arm_cpu"
new_kernel, new_kernel_expr = interleave_transpose_weights(
inputs, data, kernel, interleave_A=False
)
new_workload = autotvm.task.args_to_workload(
[data, new_kernel, strides, padding, dilation, out_dtype, (KH, KW), OC],
new_workload_name,
)
dispatch_ctx.update(target, new_workload, cfg)
return relay.nn.contrib_conv2d_gemm_without_weight_transform(
inputs[0], new_kernel_expr, **new_attrs
)
return None
@conv2d_legalize.register("arm_cpu")
def _conv2d_legalize(attrs, inputs, arg_types):
"""Legalizes Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input tensors.
data_tensor, kernel_tensor = arg_types[0], arg_types[1]
data_dtype = data_tensor.dtype
kernel_dtype = kernel_tensor.dtype
# Collect the output tensor.
output_tensor = arg_types[2]
# Collect the input exprs.
data, kernel = inputs
# ARM vector instructions operate on the same dtype for data and kernel, we
# provide those here and conv2d_alter_int8_common will convert to the
# correct datatype.
if is_int8_hw_support(kernel_dtype, kernel_dtype):
# ARM intrinsics need the datatypes of data and kernel to be the same
return conv2d_alter_int8_common(
data, data_tensor, kernel, kernel_tensor, output_tensor, attrs, kernel_dtype, 8, 8
)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, too-many-locals
# pylint: disable=unused-argument, redefined-builtin
"""GEMM Convolution schedule on ARM"""
import tvm
from tvm.target import Target
from tvm import te
from tvm.topi import nn
from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity
from ..utils import get_const_tuple, get_const_int
from ..nn.utils import get_pad_tuple
from .tensor_intrin import (
gemm_4x4_int8_int8_int32,
gemm_acc_4x4_int8_int8_int32,
gemm_acc_nx16_int8_int8_int32,
gemm_acc_2x2_int8_int8_int32,
)
def configure_knobs(cfg, M, K, target):
"""Configure auto-tuning knobs for the interleaved strategy"""
x, y = cfg.axis(M // 4), cfg.axis(K // 16)
cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]])
outer_loop, inner_loop = cfg.axis(4), cfg.axis(16)
cfg.define_annotate(
"A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec"
)
# Fallback configuration
if cfg.is_fallback:
cfg["reorder_gemm"] = ReorderEntity([0, 1])
cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"])
if not target.features.has_dotprod:
cfg.define_knob("gemm_quantized_unroll", [True, False])
if cfg.is_fallback:
cfg["gemm_quantized_unroll"] = OtherOptionEntity(False)
# Compute function
def compute_conv2d_gemm_without_weight_transform(
cfg,
data,
B_interleaved_t,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
interleave_A,
):
"""Compute conv2d by transforming the input,
executing GEMM and transforming the output back"""
batches, IH, IW, IC = get_const_tuple(data.shape)
KH, KW = get_const_tuple(kernel_size)
OC = get_const_int(output_channels)
kernel_area = KH * KW
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = get_const_tuple(dilation)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
# Im2col
M = OH * OW
K = IC * kernel_area
N = OC
A_shape = (batches, M, K)
if kernel_area == 1:
A = tvm.topi.reshape(data_pad, A_shape)
else:
A = te.compute(
A_shape,
lambda n, x, y: data_pad[
n,
HSTR * (x // OW) + dilation_h * ((y // IC) // KW),
WSTR * (x % OW) + dilation_w * ((y // IC) % KW),
y % IC,
],
name="data_im2col",
)
# Pad if necessary
N_transformed = B_interleaved_t.shape[0]
tile_rows_B = B_interleaved_t.shape[2]
tile_cols_B = B_interleaved_t.shape[3]
# Select the tiling strategy for A.
# The tiling information is chosen to maximize register usage during
# the tile computation.
#
# Please refer to:
# - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long
# - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product
# - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction
# - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h
# In order to have more information
#
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# If smmla/ummla is enabled, we are loading 8 rows from A. Each row
# will contain 8 elements
tile_rows_A = 8
tile_cols_A = 8
elif target.features.has_dotprod and interleave_A:
# If dot product has been enabled, and we are interleaving A
# tile size should be 8x4
tile_rows_A = 8
tile_cols_A = 4
else:
# If either there is no dot product or if we are using a native strategy
# tile size should be 4x16
tile_rows_A = 4
tile_cols_A = 16
pad_M = 0
pad_K = 0
if M % tile_rows_A != 0:
pad_M = tile_rows_A - (M % tile_rows_A)
if K % tile_cols_A != 0:
pad_K = tile_cols_A - (K % tile_cols_A)
M_padded = M + pad_M
K_padded = K + pad_K
N_padded = N_transformed * tile_rows_B
pad_before = (0, 0, 0)
pad_after = (0, pad_M, pad_K)
if pad_M != 0 or pad_K != 0:
A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded")
idxm = tvm.tir.indexmod
k = te.reduce_axis((0, K_padded), "k")
if interleave_A:
# Configuration space
configure_knobs(cfg, M_padded, K_padded, target)
# Pack the input data
A_interleaved = te.compute(
(batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A),
lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y],
name="A_interleaved",
)
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
# Execute GEMM. In the case of mmla, we need to enforce the tiling
# from the compute. This is because mmla is doing a tiled computation
# as well. So we have a big 8x12 tile, with small 2x2 sub-tiles
# generated by mmla. In theory we could make the tile 2x2 and
# fuse and split during scheduling, but this would not work
# because of possible padding
C_interleaved = te.compute(
(
batches,
M_padded // tile_rows_A,
N_transformed,
tile_rows_A // 2,
tile_rows_B // 2,
2,
2,
),
lambda b, x, y, w, z, s, t: te.sum(
A_interleaved[b, x, k // tile_cols_A, 2 * w + s, idxm(k, tile_cols_A)].astype(
"int32"
)
* B_interleaved_t[y, k // tile_cols_B, 2 * z + t, idxm(k, tile_cols_B)].astype(
"int32"
),
axis=k,
),
name="C_interleaved",
)
# Unpack the result
C = te.compute(
(batches, M, N),
lambda b, x, y: C_interleaved[
b,
x // tile_rows_A,
y // tile_rows_B,
idxm(x, tile_rows_A) // 2,
idxm(y, tile_rows_B) // 2,
idxm(idxm(x, tile_rows_A), 2),
idxm(idxm(y, tile_rows_B), 2),
].astype(out_dtype),
name="C",
)
else:
# Execute GEMM
C_interleaved = te.compute(
(batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B),
lambda b, x, y, w, z: te.sum(
A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32")
* B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"),
axis=k,
),
name="C_interleaved",
)
# Unpack the result
C = te.compute(
(batches, M, N),
lambda b, x, y: C_interleaved[
b,
x // tile_rows_A,
y // tile_rows_B,
idxm(x, tile_rows_A),
idxm(y, tile_rows_B),
].astype(out_dtype),
name="C",
)
zero = tvm.tir.const(0)
else:
# No need to pack/unpack, execute GEMM directly
C = te.compute(
(batches, M_padded, N_padded),
lambda b, x, y: te.sum(
A[b, x, k].astype("int32")
* B_interleaved_t[
y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B)
].astype("int32"),
axis=k,
),
name="C",
)
# We need to ensure that infer bound pass does not remove the padding
# which is necessary for the tensorizations to work. So we need to
# add a dummy reference to the padding area of the result
zero = (
tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
- tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1]
)
# Reshape the result into a convolution output
out_shape = (batches, OH, OW, OC)
out = te.compute(
out_shape,
lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype),
name="conv2d_gemm_output",
)
return out
def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out):
"""Schedule the conv2d_gemm interleaved strategy"""
C = out.op.input_tensors[0]
C_interleaved = C.op.input_tensors[0]
A_interleaved = C_interleaved.op.input_tensors[0]
# Input transform
A_interleaved_input = A_interleaved.op.input_tensors[0]
if A_interleaved_input.op.name == "A_padded":
s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3])
s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2])
s[A_interleaved_input].compute_inline()
data_im2col = A_interleaved_input.op.input_tensors[0]
else:
data_im2col = A_interleaved_input
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
b_m_fused = s[data_im2col].fuse(b, m)
s[data_im2col].parallel(b_m_fused)
else:
s[data_im2col].compute_inline()
# Computation(through tensorize)
b, xo, yo, xi, yi = C_interleaved.op.axis[0:5]
outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo])
b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm)
s[C_interleaved].parallel(b_outer_gemm_fused)
s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused)
_, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis
cfg["A_interleaved_unroll_vec"].apply(
s, A_interleaved, [outer_A_interleaved, inner_A_interleaved]
)
in_type = A_interleaved.dtype
out_type = C.dtype
k = C_interleaved.op.reduce_axis[0]
_, M, N = C.shape
if in_type in ["int8", "uint8"]:
target = Target.current(allow_none=False)
if target.features.has_matmul_i8:
gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type)
xi_inner, yi_inner = C_interleaved.op.axis[-2:]
k_outer, k_inner = s[C_interleaved].split(k, 8)
s[C_interleaved].reorder(
b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner
)
s[C_interleaved].tensorize(xi_inner, gemm_acc)
s[C_interleaved].unroll(xi)
s[C_interleaved].unroll(yi)
elif target.features.has_dotprod:
gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type)
xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile(
xi, yi, x_factor=8, y_factor=4
)
k_outer, k_inner = s[C_interleaved].split(k, 4)
xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4)
s[C_interleaved].reorder(
b_outer_gemm_fused,
inner_gemm,
xi_outer,
yi_outer,
k_outer,
xi_inner_outer,
xi_inner_inner,
yi_inner,
k_inner,
)
s[C_interleaved].tensorize(xi_inner_inner, gemm_acc)
s[C_interleaved].unroll(xi_inner_outer)
elif target.features.has_asimd:
s[C_interleaved].reorder(yi, xi)
K = A_interleaved_input.shape[2]
assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported"
unroll = cfg["gemm_quantized_unroll"].val
gemm = gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type)
s[C_interleaved].tensorize(yi, gemm)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[C].compute_at(s[out], inner)
s[out].vectorize(inner)
return s
def schedule_conv2d_gemm_native(cfg, s, out, final_out):
"""Schedule the conv2d_gemm hybrid strategy"""
C = out.op.input_tensors[0]
A = C.op.input_tensors[0]
in_type = A.dtype
# Computation
b, x, y = C.op.axis
(k,) = C.op.reduce_axis
k_outer, k_inner = s[C].split(k, 16)
x_outer, y_outer, x_inner, y_inner = s[C].tile(x, y, x_factor=4, y_factor=16)
s[C].reorder(b, x_outer, y_outer, k_outer, x_inner, y_inner, k_inner)
gemm_acc = gemm_acc_nx16_int8_int8_int32(in_type, rows=1)
s[C].unroll(x_inner)
s[C].tensorize(y_inner, gemm_acc)
s[C].parallel(x_outer)
# Input transform
if A.op.name == "A_padded":
padding_A = True
data_im2col = A.op.input_tensors[0]
else:
padding_A = False
data_im2col = A
b, m, n = data_im2col.op.axis
if data_im2col.op.name == "data_im2col":
n_outer, n_inner = s[data_im2col].split(n, 16)
s[data_im2col].unroll(n_outer)
s[data_im2col].vectorize(n_inner)
s[data_im2col].parallel(m)
elif padding_A:
s[data_im2col].compute_inline()
s[A].compute_at(s[C], x_inner)
else:
s[data_im2col].compute_at(s[C], x_inner)
# Output transform
if out != final_out:
n, h, w, c = out.op.axis
_, inner = s[out].split(c, 4)
s[out].vectorize(inner)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on ARM"""
from tvm import te, target, autotvm
from ..utils import traverse_inline, get_const_tuple
from ..generic import conv2d as conv2d_generic
from .. import nn
from ...target import codegen
from ..nn.conv2d import _get_workload as _get_conv2d_workload, unpack_NCHWc_to_nchw
from ..x86.conv2d_int8 import _pack_data
from ..nn.utils import get_pad_tuple
from .tensor_intrin import dot_int8_int8_int32_neon_82, dot_int8_int8_int32_neon
from .conv2d_gemm import (
compute_conv2d_gemm_without_weight_transform,
schedule_conv2d_gemm_interleaved,
schedule_conv2d_gemm_native,
)
from .arm_utils import get_tiling_B_interleaved_t
def _get_default_config(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""
Get default int8 schedule config for the workload
"""
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype)
is_kernel_1x1 = wkl.kernel_h == 1 and wkl.kernel_w == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(cfg, wkl, int32_lanes=4, num_int8_elements=4)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=4, num_int8_elements=4
)
@autotvm.register_topi_compute("conv2d_NCHWc_int8.arm_cpu")
def conv2d_NCHWc_int8(cfg, data, kernel, strides, padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d int8 with NCHWc layout"""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
if len(data.shape) == 5: # data is in nchwc
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
in_channel = ic_chunk * ic_bn
oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn, _ = get_const_tuple(kernel.shape)
num_filter = oc_chunk * oc_bn
else:
# data is nchw, implicitly treat it as nchw1c
n, in_channel, ih, iw = get_const_tuple(data.shape)
num_filter, _, kh, kw = get_const_tuple(kernel.shape)
# Define autotvm tuning space
is_kernel_1x1 = kh == 1 and kw == 1
pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
dh, dw = dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
dilated_kernel_h = (kh - 1) * dh + 1
dilated_kernel_w = (kw - 1) * dw + 1
oh = (ih - dilated_kernel_h + pt + pb) // sh + 1
ow = (iw - dilated_kernel_w + pl + pr) // sw + 1
# input and output should be a multiple of 8 (intrinsics are 8 lanes)
cfg.define_split(
"tile_ic", in_channel, num_outputs=2, filter=lambda y: y.size[-1] % min(8, in_channel) == 0
)
cfg.define_split(
"tile_oc", num_filter, num_outputs=2, filter=lambda y: y.size[-1] % min(8, num_filter) == 0
)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to NCHW config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((n, in_channel, ih, iw), dtype=data.dtype),
te.placeholder((num_filter, in_channel, kh, kw), dtype=kernel.dtype),
strides,
padding,
dilation,
out_dtype,
)
# Pack data if raw 4-D data is provided.
# This can only happen when autotuning.
if len(data.shape) == 4:
data, kernel = _pack_data(cfg, data, kernel)
n_elems = int(kernel.shape[-1])
return nn.conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, out_layout, out_dtype, n_elems=n_elems
)
def is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use int8 on arm
1) The datatypes are correct.
2) LLVM version has support for the instructions.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == kernel_dtype and "int8" in data_dtype
# 2) Check LLVM support
llvm_version = codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
current_target = target.Target.current(allow_none=False)
is_target_support = bool(
current_target.features.has_asimd or current_target.features.has_dotprod
)
return is_dtype_support and is_llvm_support and is_target_support
@autotvm.register_topi_schedule("conv2d_NCHWc_int8.arm_cpu")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def _callback(op):
if "conv2d_NCHWc_int8" in op.tag:
conv_out = op.output(0)
kernel_vec = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = (
data_vec.op.input_tensors[0]
if isinstance(data_vec.op, te.tensor.ComputeOp) and "pad" not in data_vec.op.tag
else data_vec
)
if isinstance(data.op, te.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]]
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, n_elems = get_const_tuple(kernel_vec.shape)
assert n_elems == 4
dtype = "uint" if data.dtype == "uint8" else "int"
current_target = target.Target.current(allow_none=False)
if current_target.features.has_dotprod:
intrin = dot_int8_int8_int32_neon_82(int32_lanes=4, dtype=dtype)
elif current_target.features.has_asimd:
assert dtype == "int", "uint8 not supported if dot product is not available"
intrin = dot_int8_int8_int32_neon()
else:
raise RuntimeError(
"Cannot schedule schedule_NCHWc_int8 without neon or arm v8.2 neon support"
)
# On raspberry pi 4s, we see poor performance when the fused
# operations are inlined into the main computation body. These
# fused ops dominated the runtime on small convolutions repeatedly
# blow the cache. Using workloads from resnet50, inceptionv3, and
# mobilenetv3, we empirically determine the size at which inline is
# not worth it to be kernel heigh * kernel width < 500. These tests
# were only run on raspberry pi 4, other arm cpus may have larger
# caches where inlining has good performance.
if target.Target.current().mcpu == "cortex-a72" and kh * kw < 500:
inline_fused = False
else:
inline_fused = True
if kh == 1 and kw == 1:
conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8(
*args, int32_lanes=4, int8_elems=4, intrin=intrin, inline_fused=inline_fused
)
else:
conv2d_generic.schedule_conv_NCHWc_cpu_common_int8(
*args, int32_lanes=4, int8_elems=4, intrin=intrin, inline_fused=inline_fused
)
traverse_inline(s, outs[0].op, _callback)
return s
def conv2d_nchw_int8(data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout and int8 dtype"""
layout = "NCHW"
# pylint: disable=no-value-for-parameter
packed_out = conv2d_NCHWc_int8(
data, kernel, strides, padding, dilation, layout, layout, out_dtype
)
return unpack_NCHWc_to_nchw(packed_out, out_dtype)
def schedule_conv2d_nchw_int8(outs):
"""Create the schedule for conv2d_nchw_int8"""
# pylint: disable=no-value-for-parameter
return schedule_conv2d_NCHWc_int8(outs)
def _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, interleave_A
):
N, IH, IW, IC = get_const_tuple(data.shape)
KH, KW, _, OC = get_const_tuple(kernel.shape)
tile_rows_B, tile_cols_B = get_tiling_B_interleaved_t(interleave_A)
kernel = nn.conv2d_gemm_weight_transform(kernel, tile_rows_B, tile_cols_B)
return compute_conv2d_gemm_without_weight_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, (KH, KW), OC, interleave_A
)
def _compute_conv2d_NHWC_quantized_without_transform(
cfg,
data,
B,
strides,
padding,
dilation,
out_dtype,
kernel_size=None,
output_channels=None,
interleave_A=False,
):
return compute_conv2d_gemm_without_weight_transform(
cfg,
data,
B,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
interleave_A,
)
def _schedule_conv2d_NHWC_quantized(cfg, outs, interleave_A):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
# Vectorize the output and then inline all the rest
out = outs[0]
n, h, w, c = out.op.axis
n_h_fused = s[out].fuse(n, h)
outer, inner = s[out].split(c, 4)
s[out].vectorize(inner)
s[out].parallel(n_h_fused)
def _callback(op):
"""Traverse operators from computation graph"""
if op.name == "conv2d_gemm_output":
conv_out = op.output(0)
if interleave_A:
schedule_conv2d_gemm_interleaved(cfg, s, conv_out, out)
else:
schedule_conv2d_gemm_native(cfg, s, conv_out, out)
if out != conv_out:
s[conv_out].compute_at(s[out], inner)
else:
C = conv_out.op.input_tensors[0]
if interleave_A:
s[C].compute_at(s[out], inner)
traverse_inline(s, outs[0].op, _callback)
return s
# Interleaved schedules: those schedule will interleave the input data. The
# weights are interleaved and transposed
@autotvm.register_topi_compute("conv2d_NHWC_quantized_interleaved.arm_cpu")
def compute_conv2d_NHWC_quantized_interleaved(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
"""Interface for interleaved compute_conv2d_NHWC_quantized_interleaved"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, True
)
@autotvm.register_topi_compute("conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu")
def compute_conv2d_NHWC_quantized_interleaved_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
"""Interface for interleaved compute_conv2d_NHWC_quantized_interleaved_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels, True
)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_interleaved.arm_cpu")
def schedule_conv2d_NHWC_quantized_interleaved(cfg, outs):
"""Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, True)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu")
def schedule_conv2d_NHWC_quantized_interleaved_without_transform(cfg, outs):
"""Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, True)
# Native schedules: those schedule won't interleave A (which is left in its native form).
# The weights are interleaved and transposed
@autotvm.register_topi_compute("conv2d_NHWC_quantized_native.arm_cpu")
def compute_conv2d_NHWC_quantized_native(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Interface for native compute_conv2d_NHWC_quantized"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, False
)
@autotvm.register_topi_compute("conv2d_NHWC_quantized_native_without_transform.arm_cpu")
def compute_conv2d_NHWC_quantized_native_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
"""Interface for compute_conv2d_NHWC_quantized_native_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg,
data,
kernel,
strides,
padding,
dilation,
out_dtype,
kernel_size,
output_channels,
False,
)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_native.arm_cpu")
def schedule_conv2d_NHWC_quantized_native(cfg, outs):
"""Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_native_without_transform.arm_cpu")
def schedule_conv2d_NHWC_quantized_native_without_transform(cfg, outs):
"""Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d_spatial_pack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,no-else-return
"""Conv2D spatial pack implementation for ARM CPU"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from .. import nn
from ..utils import get_const_tuple
from ..nn.utils import get_const_int, get_pad_tuple
def conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile):
"""compute define for Conv2d Spatial Pack with NCHW layout"""
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
CO, _, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
CO, _, KH, KW, VC = get_const_tuple(kernel.shape)
CO = CO * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_bottom - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
data_pad = nn.pad(data, [0, 0, pad_top, pad_left], [0, 0, pad_bottom, pad_right])
# ==================== define configuration space ====================
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
n_tuning_axis = N if isinstance(N, int) else 1
n, co, oh, ow = cfg.axis(n_tuning_axis), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", co, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split("tile_co", co, num_outputs=3)
oh, _, vh = cfg.define_split("tile_oh", oh, num_outputs=3)
ow, _, vw = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
if num_tile == 2: # arm cpu
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "conv2d_nchw_spatial_pack.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
elif num_tile == 3: # mali gpu
ref_log = autotvm.tophub.load_reference_log(
"mali", "rk3399", "conv2d_nchw_spatial_pack.mali"
)
cfg.fallback_with_reference_log(ref_log)
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, CI, KH, KW, VH, VW)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, kh, kw, vh, vw: data_pad[n][ci][
(h * VH + vh) * HSTR + kh * dilation_h
][(w * VW + vw) * WSTR + kw * dilation_w],
name="data_vec_undilated",
)
else:
dvshape = (N, OH // VH, OW // VW, CI, VH * HSTR + KH - 1, VW * WSTR + KW - 1)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw: data_pad[n][ci][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
# use "kernel_autotvm" instead of "kernel" to avoid naming conflict with OpenCL keyword
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel_autotvm")
else:
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(
kvshape,
lambda co, ci, kh, kw, vc: kernel[co * VC + vc][ci][kh][kw],
name="kernel_vec",
)
ci = te.reduce_axis((0, CI), name="ci")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, kh, kw, vh, vw].astype(out_dtype)
* kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
else:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, vh * HSTR + kh, vw * WSTR + kw].astype(out_dtype)
* kernel_vec[co, ci, kh, kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_conv2d_output",
)
return output
def schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
ci, kh, kw = s[conv].op.reduce_axis
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, ci, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=None,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=None,
cfg=cfg,
)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(
s,
last,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
s[conv].compute_at(s[last], ow)
# mark parallel
s[last].parallel(co)
if data_vec.op.name == "data_vec_undilated":
_, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
_, h, _, _, _, _ = s[data_vec].op.axis
s[data_vec].parallel(h)
if kernel_vec.op.name == "kernel_vec":
if not autotvm.GLOBAL_SCOPE.in_tuning:
co, _, _, _, _ = s[kernel_vec].op.axis
s[kernel_vec].parallel(co)
elif kernel_vec.op.name == "kernel_vec_conv2d_transpose": # for conv2d transpose
co, _, _, _, _ = s[kernel_vec].op.axis
s[kernel_vec].parallel(co)
return s
def conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2):
"""Spatial pack compute for Conv2d NHWC"""
out_dtype = out_dtype or data.dtype
N, IH, IW, IC = get_const_tuple(data.shape)
assert len(kernel.shape) == 4, "AlterOpLayout not enabled for NHWC yet"
KH, KW, _, OC = get_const_tuple(kernel.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
data_pad = nn.pad(data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0])
# ==================== define configuration space ====================
# If it has dynamic shape in batch, we fix the split factor to 1
n = cfg.axis(N) if isinstance(N, int) else cfg.axis(1)
oc, oh, ow = cfg.axis(OC), cfg.axis(OH), cfg.axis(OW)
ic, kh, kw = cfg.reduce_axis(IC), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
oco, oci = cfg.define_split("tile_co", oc, num_outputs=2)
oho, ohi = cfg.define_split("tile_oh", oh, num_outputs=2)
owo, owi = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
oco, _, oci = cfg.define_split("tile_co", oc, num_outputs=3)
oho, _, ohi = cfg.define_split("tile_oh", oh, num_outputs=3)
owo, _, owi = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_conv",
[n, oho, owo, oco, kh, kw, ic, ohi, owi, oci],
policy="candidate",
candidate=[
[n, oho, owo, oco, kh, kw, ic, ohi, owi, oci],
[n, oho, owo, oco, ohi, kh, kw, ic, owi, oci],
[n, oho, owo, oco, ohi, kh, kw, owi, ic, oci],
[n, oho, owo, ohi, oco, kh, kw, owi, ic, oci],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [ohi, owi, oci], policy="try_unroll_vec")
# ====================================================================
OCI = cfg["tile_co"].size[-1]
OHI = cfg["tile_oh"].size[-1]
OWI = cfg["tile_ow"].size[-1]
OCO = OC // OCI
OHO = OH // OHI
OWO = OW // OWI
kvshape = (OCO, KH, KW, IC, OCI)
ovshape = (N, OHO, OWO, OCO, OHI, OWI, OCI)
oshape = (N, OH, OW, OC)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OHO, OWO, KH, KW, IC, OHI, OWI)
data_vec = te.compute(
dvshape,
lambda n, oho, owo, kh, kw, ic, ohi, owi: data_pad[n][
(oho * OHI + ohi) * HSTR + kh * dilation_h
][(owo * OWI + owi) * WSTR + kw * dilation_w][ic],
name="data_vec_undilated",
)
else:
dvshape = (N, OHO, OWO, KH + (OHI - 1) * HSTR, KW + (OWI - 1) * WSTR, IC)
data_vec = te.compute(
dvshape,
lambda n, oho, owo, ohi, owi, ic: data_pad[n][oho * OHI * HSTR + ohi][
owo * OWI * WSTR + owi
][ic],
name="data_vec",
)
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel")
else:
kernel_vec = te.compute(
kvshape,
lambda oco, kh, kw, ic, oci: kernel[kh][kw][ic][oco * OCI + oci],
name="kernel_vec",
)
ic = te.reduce_axis((0, IC), name="ic")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, oho, owo, oco, ohi, owi, oci: te.sum(
data_vec[n, oho, owo, kh, kw, ic, ohi, owi].astype(out_dtype)
* kernel_vec[oco, kh, kw, ic, oci].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
else:
conv = te.compute(
ovshape,
lambda n, oho, owo, oco, ohi, owi, oci: te.sum(
data_vec[n, oho, owo, ohi * HSTR + kh, owi * WSTR + kw, ic].astype(out_dtype)
* kernel_vec[oco, kh, kw, ic, oci].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
idiv = tvm.tir.indexdiv
imod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, oho, owo, oc: conv[n][idiv(oho, OHI)][idiv(owo, OWI)][idiv(oc, OCI)][
imod(oho, OHI)
][imod(owo, OWI)][imod(oc, OCI)],
name="output_unpack",
tag="spatial_conv_output_NHWC",
)
return output
def schedule_conv2d_spatial_pack_nhwc(cfg, s, op, output):
"""Spatial Pack schedule for Conv2d NHWC"""
unpack = op.output(0)
conv = unpack.op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
kernel_vec = conv.op.input_tensors[1]
data_pad = data_vec.op.input_tensors[0]
OHI = cfg["tile_oh"].size[-1]
OWI = cfg["tile_ow"].size[-1]
OCI = cfg["tile_co"].size[-1]
# schedule unpack/output
if output != unpack:
s[unpack].compute_inline()
n, oh, ow, oc = s[output].op.axis
oco, oci = cfg["tile_co"].apply(s, output, oc)
oho, ohi = cfg["tile_oh"].apply(s, output, oh)
owo, owi = cfg["tile_ow"].apply(s, output, ow)
s[output].reorder(n, oho, owo, oco, ohi, owi, oci)
cfg["ann_spatial"].apply(
s, output, [ohi, owi, oci], axis_lens=[OHI, OWI, OCI], max_unroll=16, cfg=cfg
)
cfg.define_knob("compat", [0, 1, 2])
if cfg["compat"].val < 2:
compat_axis = [owo, oco][cfg["compat"].val] # pylint: disable=R1706
s[conv].compute_at(s[output], compat_axis)
paxis = s[output].fuse(n, oho)
s[output].parallel(paxis)
# schedule conv
n, oho, owo, oco, ohi, owi, oci = s[conv].op.axis
ic, kh, kw = s[conv].op.reduce_axis
cfg["reorder_conv"].apply(s, conv, [n, oho, owo, oco, kh, kw, ohi, owi, ic, oci])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s, conv, [ohi, owi, oci], axis_lens=[OHI, OWI, OCI], max_unroll=16, cfg=cfg
)
if cfg["compat"].val < 2:
compat_axis = [owo, oco][cfg["compat"].val] # pylint: disable=R1706
s[kernel_vec].compute_at(s[conv], compat_axis)
s[data_vec].compute_at(s[conv], compat_axis)
if not autotvm.GLOBAL_SCOPE.in_tuning:
# schedule kernel pack
oco, kh, kw, ic, oci = kernel_vec.op.axis
s[kernel_vec].vectorize(oci)
s[kernel_vec].unroll(ic)
if cfg["compat"].val == 2:
s[kernel_vec].parallel(oco)
# schedule data pack
if data_vec.op.name == "data_vec_undilated":
n, oho, owo, kh, kw, ic, ohi, owi = s[data_vec].op.axis
s[data_vec].vectorize(owi)
s[data_vec].unroll(ohi)
else:
n, oho, owo, ohi, owi, ic = s[data_vec].op.axis
s[data_vec].vectorize(ic)
s[data_vec].unroll(owi)
if cfg["compat"].val == 2:
paxis = s[data_vec].fuse(n, oho)
s[data_vec].parallel(paxis)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from ..nn import dilate, pad, get_pad_tuple
from ..utils import get_const_tuple, traverse_inline
from .conv2d_spatial_pack import schedule_conv2d_spatial_pack_nchw
@autotvm.register_topi_compute("conv2d_transpose_nchw.arm_cpu")
def conv2d_transpose_nchw(cfg, Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype: str
The output data type. This is used for mixed precision.
output_padding : tuple of int
Used to get the right output shape in gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(
cfg, Input, Filter, strides, padding, "NCHW", out_dtype, 2, output_padding
)
def _decl_spatial_pack(
cfg, data, kernel, strides, padding, layout, out_dtype, num_tile, output_padding
):
assert layout == "NCHW", "Only support NCHW"
out_dtype = out_dtype or data.dtype
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(IH, int) or not isinstance(IW, int):
raise RuntimeError("ARM winograd conv2d doesn't support dynamic input height or width.")
_, CO, KH, KW = get_const_tuple(kernel.shape)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
opad_h, opad_w = output_padding
assert opad_h < HSTR and opad_w < WSTR
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (KH, KW))
bpad_top, bpad_bottom = KH - 1 - pad_top, KH - 1 - pad_bottom + opad_h
bpad_left, bpad_right = KW - 1 - pad_left, KW - 1 - pad_right + opad_w
OH = (IH - 1) * HSTR - pad_top - pad_bottom + KH + opad_h
OW = (IW - 1) * WSTR - pad_left - pad_right + KW + opad_w
dilated_input = dilate(data, [1, 1, HSTR, WSTR])
data_pad = pad(dilated_input, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right])
# ==================== define configuration space ====================
# TODO(@kevinthesun): Support tuning/optimization for dynamic shape.
n_tuning_axis = N if isinstance(N, int) else 1
n, co, oh, ow = cfg.axis(n_tuning_axis), cfg.axis(CO), cfg.axis(OH), cfg.axis(OW)
ci, kh, kw = cfg.reduce_axis(CI), cfg.reduce_axis(KH), cfg.reduce_axis(KW)
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", co, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
elif num_tile == 3: # for mali gpu
co, _, vc = cfg.define_split("tile_co", co, num_outputs=3)
oh, _, vh = cfg.define_split("tile_oh", oh, num_outputs=3)
ow, _, vw = cfg.define_split("tile_ow", ow, num_outputs=3)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, ci, kh, kw, vh, vw, vc],
[n, co, oh, ow, ci, kh, kw, vc, vh, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
dvshape = (N, OH // VH, OW // VW, CI, VH + KH - 1, VW + KW - 1)
kvshape = (CO // VC, CI, KH, KW, VC)
ovshape = (N, CO // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, CO, OH, OW)
data_vec = te.compute(
dvshape,
lambda n, h, w, ci, vh, vw: data_pad[n][ci][h * VH + vh][w * VW + vw],
name="data_vec",
)
kernel_vec = te.compute(
kvshape,
lambda co, ci, kh, kw, vc: kernel[ci][co * VC + vc][kh][kw],
name="kernel_vec_conv2d_transpose",
)
ci = te.reduce_axis((0, CI), name="ci")
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, ci, vh + kh, vw + kw].astype(out_dtype)
* kernel_vec[co, ci, KH - 1 - kh, KW - 1 - kw, vc].astype(out_dtype),
axis=[ci, kh, kw],
),
name="conv",
)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_conv2d_transpose_output",
)
return output
# register customized schedule for arm cpu.
@autotvm.register_topi_schedule("conv2d_transpose_nchw.arm_cpu")
def schedule_conv2d_transpose_nchw(cfg, outs):
"""Schedule conv2d transpose for arm cpu"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "spatial_conv2d_transpose_output" in op.tag:
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
dilated_input = data_pad.op.input_tensors[0]
s[data_pad].compute_inline()
s[dilated_input].compute_inline()
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
schedule_conv2d_spatial_pack_nchw(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, no-else-return, unused-argument, import-outside-toplevel
"""Dense schedule for ARM CPU"""
from tvm import autotvm
from .mprofile.dsp.dense import dense_dsp_schedule, dense_dsp_compute
@autotvm.register_topi_compute("dense_dsp.arm_cpu")
def dense_dsp(cfg, data, weight, bias, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return dense_dsp_compute(cfg, data, weight, bias=bias, out_dtype=out_dtype)
@autotvm.register_topi_schedule("dense_dsp.arm_cpu")
def schedule_dense_dsp(cfg, outs):
"""Create schedule for dense_dsp"""
return dense_dsp_schedule(cfg, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""Depthwise convolution schedule for ARM CPU"""
import tvm
from tvm.target import Target
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from ..utils import traverse_inline, get_const_tuple, get_const_int
from ..nn.utils import get_pad_tuple
from .tensor_intrin import smlal_int16_int32
from .mprofile.dsp.depthwise_conv2d import (
depthwise_conv2d_nhwc_dsp_compute,
depthwise_conv2d_nhwc_dsp_schedule,
)
from .mprofile.dsp.tensordot_conv2ds import (
depthwise_conv2d_nchw_oihw_dsp_compute,
tensordot_conv2ds_schedule,
)
@autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu")
def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout"""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.arm_cpu")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule depthwise conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of depthwise convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data, data_pad, kernel, output):
A, B, C = data, kernel, output
s[data_pad].compute_inline()
##### space definition begin #####
n, c, h, w = s[output].op.axis
_, vc = cfg.define_split("tile_c", c, num_outputs=2)
_, vh = cfg.define_split("tile_h", h, num_outputs=2)
_, vw = cfg.define_split("tile_w", w, num_outputs=2)
cfg.define_annotate("ann", [vh, vw, vc], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
# park data to vector form [n, c, h, w] -> [n, C, h, w, VC]
A0 = s.cache_read(data_pad, "global", C)
n, c, h, w = s[A0].op.axis
c, vc = cfg["tile_c"].apply(s, A0, c)
s[A0].reorder(n, c, h, w, vc)
A1 = s.cache_write(A0, "global")
s[A0].compute_inline()
# park kernel to vector form [co, ci, kh, kw] -> [CO, ci, kh, kw, VC]
B0 = s.cache_read(B, "global", C)
c, m, h, w = s[B0].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, B0, c)
s[B0].reorder(c, m, h, w, vc)
B1 = s.cache_write(B0, "global")
s[B0].compute_inline()
n, c, h, w = s[C].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, C, c)
s[C].reorder(n, c, h, w, vc)
# depthwise conv
C0 = s.cache_write(C, "global")
_, c, h, w, vc = s[C0].op.axis
dh, dw = s[C0].op.reduce_axis
oh, ih = cfg["tile_h"].apply(s, C0, h)
ow, iw = cfg["tile_w"].apply(s, C0, w)
s[C0].reorder(c, oh, ow, dh, dw, ih, iw, vc)
s[A1].compute_at(s[C0], oh)
# try unroll and vectorization
cfg["ann"].apply(
s,
C0,
[ih, iw, vc],
axis_lens=[cfg["tile_h"].size[-1], cfg["tile_w"].size[-1], cfg["tile_c"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# fusion
if C.op not in s.outputs:
s[C].compute_inline()
# mark parallel
last = outs[0]
n, c, h, w = s[last].op.axis
s[last].parallel(c)
n, c, h, w, vc = s[C0].op.axis
s[C0].parallel(c)
c, m, h, w, vc = s[B1].op.axis
s[B1].parallel(c)
return s
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
output = op.output(0)
kernel = op.input_tensors[1]
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
_schedule(cfg, s, data, data_pad, kernel, output)
traverse_inline(s, outs[0].op, _callback)
return s
# TODO:
# This schedule has incorrect result on some hardware platforms (like NV Jetson TX2)
# Let us comment it out but not remove.
# see discussion:
# https://discuss.tvm.apache.org/t/autotuner-incorrect-result-after-tuning-mobilenetv2-on-arm-cpu
@autotvm.register_topi_compute("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nchw
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, multiplier, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, multiplier, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2)
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.arm_cpu")
def compute_depthwise_conv2d_nhwc(_, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nhwc
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
out_dtype = out_dtype or data.dtype
N, IH, IW, IC = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
KH, KW, IC, channel_multiplier = get_const_tuple(kernel.shape)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left or pad_down or pad_right:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
output_shape = (N, OH, OW, IC * channel_multiplier)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
reduce_h = te.reduce_axis((0, KH), name="reduce_h")
reduce_w = te.reduce_axis((0, KW), name="reduce_w")
out = te.compute(
output_shape,
lambda n, h, w, c: te.sum(
data_pad[
n,
HSTR * h + dilation_h * reduce_h,
w * WSTR + reduce_w * dilation_w,
idxdiv(c, channel_multiplier),
].astype(out_dtype)
* kernel[
reduce_h, reduce_w, idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier)
].astype(out_dtype),
axis=[reduce_h, reduce_w],
),
name="depthwise_conv2d_nhwc_output",
)
return out
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.arm_cpu")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
out = outs[0]
##### space definition begin #####
n, h, w, c = s[out].op.axis
# Split the number of input/output channels
cfg.define_split("tile_c", c, num_outputs=2)
# Split the height of the convolution
_, hi = cfg.define_split("tile_h", h, num_outputs=2)
# Split the width of the convolution
_, wi = cfg.define_split("tile_w", w, num_outputs=2)
# Additional out (e.g., requantization, bias addition, etc..)
# 0: locate the output on the second last axis of the main compuation
# 1: locate the output closest to the main computation
cfg.define_knob("locate_output", [0, 1])
# Determine if we should unroll the computation of the inner tile
cfg.define_knob("unroll_tile", [True, False])
# fallback support
if cfg.is_fallback:
cfg["tile_c"] = SplitEntity([-1, 8])
cfg["tile_h"] = SplitEntity([-1, 2])
cfg["tile_w"] = SplitEntity([-1, 2])
cfg["locate_output"] = OtherOptionEntity(1)
cfg["unroll_tile"] = OtherOptionEntity(True)
##### space definition end #####
def schedule_conv(conv):
conv_data = conv.op.input_tensors[0]
kernel_data = conv.op.input_tensors[1]
in_type = conv_data.dtype
_, _, IC, channel_multiplier = get_const_tuple(kernel_data.shape)
n, w, h, c = conv.op.axis
r_h, r_w = conv.op.reduce_axis
ho, hi = cfg["tile_h"].apply(s, conv, h)
wo, wi = cfg["tile_w"].apply(s, conv, w)
co, ci = cfg["tile_c"].apply(s, conv, c)
split_val = cfg["tile_c"].size[-1]
target = Target.current(allow_none=False)
use_tensorization = (
(in_type == "int16")
and (split_val == 8)
and (IC % split_val == 0)
and (channel_multiplier == 1)
and target.features.has_asimd
)
data_pad_value = -1
if conv_data.name == "data_pad":
assert isinstance(conv_data.op, tvm.te.ComputeOp)
# Define a strategy for padding computation
cfg.define_knob("data_pad_strategy", [1, 2, 3])
if cfg.is_fallback:
# We cannot inline padding when tensorizing.
# So, if we can tensorize, let's compute_at the closest axis
cfg["data_pad_strategy"] = (
OtherOptionEntity(2) if use_tensorization else OtherOptionEntity(3)
)
# Compute padding on the third to last axis of the computation
if cfg["data_pad_strategy"].val == 1:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], ho)
# Compute padding on the second to last axis of the computation
if cfg["data_pad_strategy"].val == 2:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], wo)
# Inline padding during computation
if cfg["data_pad_strategy"].val == 3:
s[conv_data].compute_inline()
data_pad_value = cfg["data_pad_strategy"].val
if use_tensorization and data_pad_value != 3:
smlal = smlal_int16_int32()
s[conv].tensorize(ci, smlal)
else:
s[conv].vectorize(ci)
if cfg["unroll_tile"].val:
s[conv].unroll(r_h)
s[conv].unroll(r_w)
s[conv].unroll(wi)
s[conv].unroll(hi)
s[conv].reorder(n, ho, wo, co, hi, wi, r_h, r_w, ci)
fused_n_ho = s[conv].fuse(n, ho)
return fused_n_ho
def schedule_conv_out(out):
n, h, w, c = out.op.axis
co, ci = cfg["tile_c"].apply(s, out, c)
wo, wi = cfg["tile_w"].apply(s, out, w)
ho, hi = cfg["tile_h"].apply(s, out, h)
s[out].reorder(n, ho, wo, co, hi, wi, ci)
if cfg["unroll_tile"]:
s[out].unroll(wi)
s[out].unroll(hi)
if out.dtype in ["int8", "uint8"]:
# In case of quantized convolution further split the channel in batches of 4 elements
# so that we can use arm intrinsics to run fixed_point_multiplication
ci_outer, ci_inner = s[out].split(ci, 4)
s[out].vectorize(ci_inner)
s[out].unroll(ci_outer)
fused_n_ho = s[out].fuse(n, ho)
return hi, wi, fused_n_ho
def _callback(op):
if op.name == "depthwise_conv2d_nhwc_output":
conv = op.output(0)
if conv != out:
hi, wi, p_axis = schedule_conv_out(out)
schedule_conv(conv)
if cfg["locate_output"].val == 0:
s[conv].compute_at(s[out], hi)
if cfg["locate_output"].val == 1:
s[conv].compute_at(s[out], wi)
else:
p_axis = schedule_conv(out)
s[out].parallel(p_axis)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "spatial_depthwise_conv2d_nchw_output":
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile):
out_dtype = out_dtype or data.dtype
N, C, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
C, M, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
C, M, KH, KW, VC = get_const_tuple(kernel.shape)
C = C * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
# pack data
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
DOPAD = HPAD != 0 or WPAD != 0
if DOPAD:
data_pad = nn.pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), name="data_pad"
)
else:
data_pad = data
# fallback support
# Currently, Mali schedule doesn't use it like conv2d.
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw_spatial_pack.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
# ==================== define configuration space ====================
n, c, oh, ow = cfg.axis(N), cfg.axis(C), cfg.axis(OH), cfg.axis(OW)
kh, kw = cfg.reduce_axis(KH), cfg.reduce_axis(KW)
# Currently, Mali schedule doesn't use it like conv2d.
# Leave num_tile for possible future use of Mali schedule
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", c, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[[n, co, oh, ow, kh, kw, vh, vw, vc], [n, co, oh, ow, kh, kw, vc, vh, vw]],
)
cfg.define_reorder(
"reorder_1",
[n, co, oh, ow, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, vh, vw, vc],
[n, co, oh, ow, vc, vh, vw],
[n, co, oh, ow, vh, vc, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (C // VC, M, KH, KW, VC)
ovshape = (N, C * M // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, C * M, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, C, KH, KW, VH, VW)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, kh, kw, vh, vw: data_pad[n][c][
(h * VH + vh) * HSTR + kh * dilation_h
][(w * VW + vw) * WSTR + kw * dilation_w],
name="data_vec_undilated",
)
else:
dvshape = (N, OH // VH, OW // VW, C, VH * HSTR + KH - 1, VW * WSTR + KW - 1)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, vh, vw: data_pad[n][c][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(
kvshape, lambda co, m, kh, kw, vc: kernel[co * VC + vc][m][kh][kw], name="kernel_vec"
)
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv(co * VC + vc, M), kh, kw, vh, vw].astype(out_dtype)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
else:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv((co * VC + vc), M), vh * HSTR + kh, vw * WSTR + kw].astype(
out_dtype
)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_depthwise_conv2d_nchw_output",
)
return output
def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
kh, kw = s[conv].op.reduce_axis
if data_vec.op.name == "data_vec_undilated":
_, dv_oh, dv_ow, dv_c, _, _, dv_vh, dv_vw = s[data_vec].op.axis
else:
_, dv_oh, dv_ow, dv_c, dv_vh, dv_vw = s[data_vec].op.axis
data_pad = data_vec.op.input_tensors[0]
if data_pad.op.name == "data_pad":
assert isinstance(data_pad.op, tvm.te.ComputeOp)
has_padding = True
else:
assert isinstance(data_pad.op, tvm.te.PlaceholderOp)
has_padding = False
cfg.define_knob("data_pad_inline", [0, 1, 2, 3, 4])
if cfg["data_pad_inline"].val == 1 and has_padding:
s[data_pad].compute_inline()
if cfg["data_pad_inline"].val == 2 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
if cfg["data_pad_inline"].val == 3 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_oh)
if cfg["data_pad_inline"].val == 4 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_ow)
cfg.define_knob("data_vec_inline", [0, 1, 2, 3])
if cfg["data_vec_inline"].val == 1:
s[data_vec].compute_at(s[conv], oh)
if cfg["data_vec_inline"].val == 2:
s[data_vec].compute_at(s[conv], ow)
if cfg["data_vec_inline"].val == 3:
s[data_vec].compute_at(s[conv], co)
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
cfg["reorder_1"].apply(s, last, [n, co, oh, ow, vh, vw, vc])
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(
s,
last,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
else:
s[last].vectorize(vw)
cfg.define_knob("conv_inline", [0, 1, 2, 3])
if cfg["conv_inline"].val == 1:
s[conv].compute_at(s[last], ow)
if cfg["conv_inline"].val == 2:
s[conv].compute_at(s[last], oh)
if cfg["conv_inline"].val == 3:
s[conv].compute_at(s[last], co)
# mark parallel
s[last].parallel(co)
if data_vec.op.name == "data_vec_undilated":
_, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
_, h, _, _, _, _ = s[data_vec].op.axis
s[data_vec].parallel(h)
if kernel_vec.op.name == "kernel_vec":
co, _, _, _, _ = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel packing will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[kernel_vec].pragma(co, "debug_skip_region")
else:
s[kernel_vec].parallel(co)
return s
@autotvm.register_topi_compute("depthwise_conv2d_nhwc_dsp.arm_cpu")
def depthwise_conv2d_nhwc_dsp(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nhwc with v7e-m DSP instructions."""
return depthwise_conv2d_nhwc_dsp_compute(
cfg, data, kernel, strides, padding, dilation, out_dtype
)
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc_dsp.arm_cpu")
def schedule_depthwise_conv2d_nhwc_dsp(cfg, outs):
"""Create schedule for conv2d_nhwc_dsp"""
return depthwise_conv2d_nhwc_dsp_schedule(cfg, outs)
@autotvm.register_topi_compute("depthwise_conv2d_nchw_oihw_dsp.arm_cpu")
def depthwise_conv2d_nchw_oihw_dsp(
cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype
):
"""Compute depthwise_conv2d_nchw_oihw with v7e-m DSP instructions and the tensordot kernel."""
return depthwise_conv2d_nchw_oihw_dsp_compute(
cfg, data, kernel, strides, padding, dilation, out_layout, out_dtype
)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw_oihw_dsp.arm_cpu")
def schedule_depthwise_conv2d_nchw_oihw_dsp(cfg, outs):
"""Create schedule for depthwise_conv2d_nchw_oihw."""
return tensordot_conv2ds_schedule(cfg, outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/topi/arm_cpu/group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter,import-outside-toplevel
"""Grouped Spatial Pack Convolution (Group Conv2D) schedule on ARM"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from ..utils import get_const_tuple
from ..nn.pad import pad
from .. import tag
from ..nn.conv2d import _get_workload as _get_conv2d_workload
def group_conv2d_nchw(data, kernel, strides, padding, dilation, groups, out_dtype):
"""Compute group_conv2d with NCHW layout"""
return group_conv2d_nchw_spatial_pack(
data, kernel, strides, padding, dilation, groups, out_dtype
)
def schedule_group_conv2d_nchw(outs):
"""Compute group_conv2d with NCHW layout"""
return schedule_group_conv2d_nchwc(outs)
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
static_data_shape = []
for dim in get_const_tuple(data.shape):
if isinstance(dim, tvm.tir.Var):
static_data_shape.append(1)
else:
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
_fallback_schedule(cfg, wkl)
def _fallback_schedule(cfg, wkl):
simd_width = 4 # assume ARM SIMD Width is 4
pad_left, pad_right = wkl.padl, wkl.padr
stride_w = wkl.stride_w
out_width = (wkl.width + pad_left + pad_right - wkl.kernel_w) // stride_w + 1
groups = wkl.groups
kernels_per_group = wkl.out_filter // groups
kernel_depth = wkl.in_filter // groups
oc_bn = 1
oc_bn = 1
for bn in range(simd_width, 0, -1):
if kernels_per_group % bn == 0:
oc_bn = bn
break
if oc_bn > kernels_per_group:
oc_bn = kernels_per_group
ic_bn = 1
for bn in range(oc_bn, 0, -1):
if kernel_depth % bn == 0:
ic_bn = bn
break
if ic_bn > kernel_depth:
ic_bn = kernel_depth
reg_n = 1
for n in range(31, 0, -1):
if out_width % n == 0:
reg_n = n
break
cfg["tile_ic"] = SplitEntity([wkl.in_filter // ic_bn, ic_bn])
cfg["tile_oc"] = SplitEntity([wkl.out_filter // oc_bn, oc_bn])
cfg["tile_ow"] = SplitEntity([out_width // reg_n, reg_n])
cfg["unroll_kw"] = OtherOptionEntity(False)
@autotvm.register_topi_compute("group_conv2d_nchw.arm_cpu")
def group_conv2d_nchw_spatial_pack(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype="float32"
):
"""
Compute group conv2d with NCHW layout, using GSPC algorithm.
https://arxiv.org/abs/2006.09791
"""
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(dilation, int):
dilation_h, dilation_w = dilation, dilation
else:
dilation_h, dilation_w = dilation
assert isinstance(padding, int) or len(padding) == 2 or len(padding) == 4
if isinstance(padding, int):
pad_top, pad_left, pad_bottom, pad_right = padding, padding, padding, padding
elif len(padding) == 2:
hpad, wpad = padding
pad_top, pad_bottom = hpad, hpad
pad_left, pad_right = wpad, wpad
else:
pad_top, pad_left, pad_bottom, pad_right = padding
hpad = pad_top + pad_bottom
wpad = pad_left + pad_right
assert isinstance(strides, int) or len(strides) == 2
if isinstance(strides, int):
stride_h, stride_w = strides, strides
else:
stride_h, stride_w = strides
batch_size, in_channel, in_height, in_width = get_const_tuple(data.shape)
out_channel, kernel_depth, k_height, k_width = get_const_tuple(kernel.shape)
pad_height = in_height + pad_top + pad_bottom
pad_width = in_width + pad_left + pad_right
dilated_kernel_h = (k_height - 1) * dilation_h + 1
dilated_kernel_w = (k_width - 1) * dilation_w + 1
out_height = (in_height + pad_top + pad_bottom - dilated_kernel_h) // stride_h + 1
out_width = (in_width + pad_left + pad_right - dilated_kernel_w) // stride_w + 1
kernels_per_group = out_channel // groups
cfg.define_split("tile_ic", in_channel, num_outputs=2)
cfg.define_split("tile_oc", out_channel, num_outputs=2)
cfg.define_split("tile_ow", out_width, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
cfg.define_knob("unroll_kw", [True, False])
# If no config was set, we can fallback to default config.
if cfg.is_fallback:
_get_default_config(
cfg,
te.placeholder((batch_size, in_channel, in_height, in_width), dtype=data.dtype),
te.placeholder(
(out_channel, in_channel // groups, k_height, k_width), dtype=kernel.dtype
),
strides,
padding,
dilation,
groups,
out_dtype,
)
oc_bn = cfg["tile_oc"].size[-1]
ic_bn = cfg["tile_ic"].size[-1]
# pack data
DOPAD = hpad != 0 or wpad != 0
if DOPAD:
data_pad = pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_bottom, pad_right), name="data_pad"
)
else:
data_pad = data
shape = (groups, batch_size, kernel_depth // ic_bn, pad_height, ic_bn, pad_width)
data_vec = te.compute(
shape,
lambda g, n, C, h, c, w: data_pad[n, C * ic_bn + c + kernel_depth * g, h, w],
name="data_vec",
)
# pack kernel
shape = (
groups,
kernels_per_group // oc_bn,
kernel_depth // ic_bn,
k_height,
k_width,
ic_bn,
oc_bn,
)
kernel_vec = te.compute(
shape,
lambda g, out_channel, in_channel, h, w, ci, co: kernel[
(out_channel * oc_bn + co + g * kernels_per_group), in_channel * ic_bn + ci, h, w
],
name="kernel_vec",
)
# convolution
oshape = (groups, batch_size, kernels_per_group // oc_bn, out_height, out_width, oc_bn)
unpack_shape = (batch_size, out_channel, out_height, out_width)
ic = te.reduce_axis((0, (kernel_depth)), name="ic")
kh = te.reduce_axis((0, k_height), name="kh")
kw = te.reduce_axis((0, k_width), name="kw")
idxmod = tvm.tir.indexmod
idxdiv = tvm.tir.indexdiv
conv = te.compute(
oshape,
lambda g, n, oc_chunk, oh, ow, oc_block: te.sum(
data_vec[
g,
n,
idxdiv(ic, ic_bn),
oh * stride_h + kh * dilation_h,
idxmod(ic, ic_bn),
ow * stride_w + kw * dilation_w,
].astype(out_dtype)
* kernel_vec[
g, oc_chunk, idxdiv(ic, ic_bn), kh, kw, idxmod(ic, ic_bn), oc_block
].astype(out_dtype),
axis=[ic, kh, kw],
),
name="conv",
)
unpack = te.compute(
unpack_shape,
lambda n, c, h, w: conv[
idxdiv(c, kernels_per_group),
n,
idxmod(idxdiv(c, oc_bn), (kernels_per_group // oc_bn)),
h,
w,
idxmod(idxmod(c, oc_bn), kernels_per_group),
].astype(out_dtype),
name="output_unpack",
tag="group_conv2d_nchw",
)
return unpack
@autotvm.register_topi_schedule("group_conv2d_nchw.arm_cpu")
def schedule_group_conv2d_nchwc(cfg, outs):
"""Create schedule for tensors"""
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if "group_conv2d_nchw" in op.tag:
output = op.output(0)
if "tile_ic" not in cfg:
return
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel = kernel_vec.op.input_tensors[0]
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, outs[0]]
_schedule_gspc_nchw(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_gspc_nchw(s, cfg, data, data_pad, data_vec, kernel_vec, conv_out, output, last):
"""Schedule GSPC"""
ic_bn, oc_bn, reg_n, unroll_kw = (
cfg["tile_ic"].size[-1],
cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1],
cfg["unroll_kw"].val,
)
_, W = data, kernel_vec
A0, A1 = data_pad, data_vec
# schedule data
if (
data_pad is not None
and isinstance(data_pad.op, tvm.te.ComputeOp)
and "pad" in data_pad.op.tag
):
s[A0].compute_inline()
groups, batch, ic_chunk, ih, ic_block, _ = s[A1].op.axis
parallel_axis = s[A1].fuse(batch, ic_chunk, ih)
s[A1].parallel(parallel_axis)
# schedule kernel pack
groups, oc_chunk, ic_chunk, oh, ow, ic_block, oc_block = s[W].op.axis
s[W].reorder(oc_chunk, oh, ic_chunk, ow, ic_block, oc_block)
if oc_bn > 1:
s[W].vectorize(oc_block)
parallel_axis = s[W].fuse(groups, oc_chunk, oh)
s[W].parallel(parallel_axis)
# schedule conv
C, O0, O = conv_out, output, last
CC = s.cache_write(C, "global")
_, _, oc_chunk, oh, ow, oc_block = s[C].op.axis
ow_chunk, ow_block = s[C].split(ow, factor=reg_n)
s[C].reorder(oc_chunk, oh, ow_chunk, ow_block, oc_block)
s[C].fuse(oc_chunk, oh)
s[C].vectorize(oc_block)
groups, batch, oc_chunk, oh, ow, oc_block = s[CC].op.axis
ic, kh, kw = s[CC].op.reduce_axis
ow_chunk, ow_block = s[CC].split(ow, factor=reg_n)
ic_chunk, ic_block = s[CC].split(ic, factor=ic_bn)
if unroll_kw:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, ic_block, kw, ow_block, oc_block)
s[CC].unroll(kw)
else:
s[CC].reorder(oc_chunk, oh, ow_chunk, ic_chunk, kh, kw, ic_block, ow_block, oc_block)
parallel_axis = s[CC].fuse(groups, batch, oc_chunk, oh)
s[CC].parallel(parallel_axis)
s[CC].vectorize(oc_block)
s[CC].unroll(ow_block)
if O0 != O:
s[O0].compute_inline()
batch, oc, oh, ow = s[O].op.axis
ow_chunk, ow_block = s[O].split(ow, factor=reg_n)
oc_chunk, oc_block = s[O].split(oc, factor=oc_bn)
s[O].reorder(batch, oc_chunk, oh, ow_chunk, ow_block, oc_block)
parallel_axis = s[O].fuse(oc_chunk, oh)
s[O].vectorize(oc_block)
s[O].parallel(parallel_axis)
return s
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.